Update include/base headers for C++11/14 (see issue #3140)

See the issue for update guidelines.
This commit is contained in:
Marshall Greenblatt
2021-06-17 15:40:57 -04:00
parent 6d80ec69d7
commit 43f9baa23a
57 changed files with 5466 additions and 11523 deletions

View File

@@ -1,335 +0,0 @@
// Copyright (c) 2012 Google Inc. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the name Chromium Embedded
// Framework nor the names of its contributors may be used to endorse
// or promote products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Do not include this header file directly. Use base/cef_atomicops.h
// instead.
#ifndef CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_ARM64_GCC_H_
#define CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_ARM64_GCC_H_
namespace base {
namespace subtle {
inline void MemoryBarrier() {
__asm__ __volatile__ ("dmb ish" ::: "memory"); // NOLINT
}
// NoBarrier versions of the operation include "memory" in the clobber list.
// This is not required for direct usage of the NoBarrier versions of the
// operations. However this is required for correctness when they are used as
// part of the Acquire or Release versions, to ensure that nothing from outside
// the call is reordered between the operation and the memory barrier. This does
// not change the code generated, so has no or minimal impact on the
// NoBarrier operations.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
"cmp %w[prev], %w[old_value] \n\t"
"bne 1f \n\t"
"stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
"cbnz %w[temp], 0b \n\t" // Retry if it did not work.
"1: \n\t"
: [prev]"=&r" (prev),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [old_value]"IJr" (old_value),
[new_value]"r" (new_value)
: "cc", "memory"
); // NOLINT
return prev;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 result;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %w[result], %[ptr] \n\t" // Load the previous value.
"stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
"cbnz %w[temp], 0b \n\t" // Retry if it did not work.
: [result]"=&r" (result),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [new_value]"r" (new_value)
: "memory"
); // NOLINT
return result;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 result;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %w[result], %[ptr] \n\t" // Load the previous value.
"add %w[result], %w[result], %w[increment]\n\t"
"stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result.
"cbnz %w[temp], 0b \n\t" // Retry on failure.
: [result]"=&r" (result),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [increment]"IJr" (increment)
: "memory"
); // NOLINT
return result;
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 result;
MemoryBarrier();
result = NoBarrier_AtomicIncrement(ptr, increment);
MemoryBarrier();
return result;
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrier();
return prev;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
MemoryBarrier();
prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return prev;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
__asm__ __volatile__ ( // NOLINT
"stlr %w[value], %[ptr] \n\t"
: [ptr]"=Q" (*ptr)
: [value]"r" (value)
: "memory"
); // NOLINT
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value;
__asm__ __volatile__ ( // NOLINT
"ldar %w[value], %[ptr] \n\t"
: [value]"=r" (value)
: [ptr]"Q" (*ptr)
: "memory"
); // NOLINT
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
// 64-bit versions of the operations.
// See the 32-bit versions for comments.
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %[prev], %[ptr] \n\t"
"cmp %[prev], %[old_value] \n\t"
"bne 1f \n\t"
"stxr %w[temp], %[new_value], %[ptr] \n\t"
"cbnz %w[temp], 0b \n\t"
"1: \n\t"
: [prev]"=&r" (prev),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [old_value]"IJr" (old_value),
[new_value]"r" (new_value)
: "cc", "memory"
); // NOLINT
return prev;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 result;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %[result], %[ptr] \n\t"
"stxr %w[temp], %[new_value], %[ptr] \n\t"
"cbnz %w[temp], 0b \n\t"
: [result]"=&r" (result),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [new_value]"r" (new_value)
: "memory"
); // NOLINT
return result;
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 result;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %[result], %[ptr] \n\t"
"add %[result], %[result], %[increment] \n\t"
"stxr %w[temp], %[result], %[ptr] \n\t"
"cbnz %w[temp], 0b \n\t"
: [result]"=&r" (result),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [increment]"IJr" (increment)
: "memory"
); // NOLINT
return result;
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 result;
MemoryBarrier();
result = NoBarrier_AtomicIncrement(ptr, increment);
MemoryBarrier();
return result;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrier();
return prev;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
MemoryBarrier();
prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return prev;
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
__asm__ __volatile__ ( // NOLINT
"stlr %x[value], %[ptr] \n\t"
: [ptr]"=Q" (*ptr)
: [value]"r" (value)
: "memory"
); // NOLINT
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value;
__asm__ __volatile__ ( // NOLINT
"ldar %x[value], %[ptr] \n\t"
: [value]"=r" (value)
: [ptr]"Q" (*ptr)
: "memory"
); // NOLINT
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
} } // namespace base::subtle
#endif // CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_ARM64_GCC_H_

View File

@@ -1,197 +0,0 @@
// Copyright (c) 2008 Google Inc. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the name Chromium Embedded
// Framework nor the names of its contributors may be used to endorse
// or promote products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Do not include this header file directly. Use base/cef_atomicops.h
// instead.
#ifndef CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_ARM64_MSVC_H_
#define CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_ARM64_MSVC_H_
#include <windows.h>
#include <intrin.h>
#include "include/base/cef_macros.h"
namespace base {
namespace subtle {
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
LONG result = _InterlockedCompareExchange(
reinterpret_cast<volatile LONG*>(ptr), static_cast<LONG>(new_value),
static_cast<LONG>(old_value));
return static_cast<Atomic32>(result);
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
LONG result = _InterlockedExchange(reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(new_value));
return static_cast<Atomic32>(result);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return _InterlockedExchangeAdd(reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(increment)) +
increment;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
#error "We require at least vs2005 for MemoryBarrier"
#endif
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
NoBarrier_AtomicExchange(ptr, value);
// acts as a barrier in this implementation
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
// See comments in Atomic64 version of Release_Store() below.
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
#if defined(_WIN64)
// 64-bit low-level operations on 64-bit platform.
COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic);
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
PVOID result = InterlockedCompareExchangePointer(
reinterpret_cast<volatile PVOID*>(ptr),
reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
return reinterpret_cast<Atomic64>(result);
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
PVOID result =
InterlockedExchangePointer(reinterpret_cast<volatile PVOID*>(ptr),
reinterpret_cast<PVOID>(new_value));
return reinterpret_cast<Atomic64>(result);
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return InterlockedExchangeAdd64(reinterpret_cast<volatile LONGLONG*>(ptr),
static_cast<LONGLONG>(increment)) +
increment;
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
NoBarrier_AtomicExchange(ptr, value);
// acts as a barrier in this implementation
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
#endif // defined(_WIN64)
} // namespace base::subtle
} // namespace base
#endif // CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_ARM64_MSVC_H_

View File

@@ -1,325 +0,0 @@
// Copyright (c) 2013 Google Inc. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the name Chromium Embedded
// Framework nor the names of its contributors may be used to endorse
// or promote products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Do not include this header file directly. Use base/cef_atomicops.h
// instead.
//
// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
#ifndef CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_ARM_GCC_H_
#define CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_ARM_GCC_H_
#if defined(OS_QNX)
#include <sys/cpuinline.h>
#endif
namespace base {
namespace subtle {
// Memory barriers on ARM are funky, but the kernel is here to help:
//
// * ARMv5 didn't support SMP, there is no memory barrier instruction at
// all on this architecture, or when targeting its machine code.
//
// * Some ARMv6 CPUs support SMP. A full memory barrier can be produced by
// writing a random value to a very specific coprocessor register.
//
// * On ARMv7, the "dmb" instruction is used to perform a full memory
// barrier (though writing to the co-processor will still work).
// However, on single core devices (e.g. Nexus One, or Nexus S),
// this instruction will take up to 200 ns, which is huge, even though
// it's completely un-needed on these devices.
//
// * There is no easy way to determine at runtime if the device is
// single or multi-core. However, the kernel provides a useful helper
// function at a fixed memory address (0xffff0fa0), which will always
// perform a memory barrier in the most efficient way. I.e. on single
// core devices, this is an empty function that exits immediately.
// On multi-core devices, it implements a full memory barrier.
//
// * This source could be compiled to ARMv5 machine code that runs on a
// multi-core ARMv6 or ARMv7 device. In this case, memory barriers
// are needed for correct execution. Always call the kernel helper, even
// when targeting ARMv5TE.
//
inline void MemoryBarrier() {
#if defined(OS_LINUX) || defined(OS_ANDROID)
// Note: This is a function call, which is also an implicit compiler barrier.
typedef void (*KernelMemoryBarrierFunc)();
((KernelMemoryBarrierFunc)0xffff0fa0)();
#elif defined(OS_QNX)
__cpu_membarrier();
#else
#error MemoryBarrier() is not implemented on this platform.
#endif
}
// An ARM toolchain would only define one of these depending on which
// variant of the target architecture is being used. This tests against
// any known ARMv6 or ARMv7 variant, where it is possible to directly
// use ldrex/strex instructions to implement fast atomic operations.
#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \
defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__)
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
int reloop;
do {
// The following is equivalent to:
//
// prev_value = LDREX(ptr)
// reloop = 0
// if (prev_value != old_value)
// reloop = STREX(ptr, new_value)
__asm__ __volatile__(
" ldrex %0, [%3]\n"
" mov %1, #0\n"
" cmp %0, %4\n"
#ifdef __thumb2__
" it eq\n"
#endif
" strexeq %1, %5, [%3]\n"
: "=&r"(prev_value), "=&r"(reloop), "+m"(*ptr)
: "r"(ptr), "r"(old_value), "r"(new_value)
: "cc", "memory");
} while (reloop != 0);
return prev_value;
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 result = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrier();
return result;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
MemoryBarrier();
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 value;
int reloop;
do {
// Equivalent to:
//
// value = LDREX(ptr)
// value += increment
// reloop = STREX(ptr, value)
//
__asm__ __volatile__(
" ldrex %0, [%3]\n"
" add %0, %0, %4\n"
" strex %1, %0, [%3]\n"
: "=&r"(value), "=&r"(reloop), "+m"(*ptr)
: "r"(ptr), "r"(increment)
: "cc", "memory");
} while (reloop);
return value;
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
// TODO(digit): Investigate if it's possible to implement this with
// a single MemoryBarrier() operation between the LDREX and STREX.
// See http://crbug.com/246514
MemoryBarrier();
Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
MemoryBarrier();
return result;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_value;
int reloop;
do {
// old_value = LDREX(ptr)
// reloop = STREX(ptr, new_value)
__asm__ __volatile__(
" ldrex %0, [%3]\n"
" strex %1, %4, [%3]\n"
: "=&r"(old_value), "=&r"(reloop), "+m"(*ptr)
: "r"(ptr), "r"(new_value)
: "cc", "memory");
} while (reloop != 0);
return old_value;
}
// This tests against any known ARMv5 variant.
#elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) || \
defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__)
// The kernel also provides a helper function to perform an atomic
// compare-and-swap operation at the hard-wired address 0xffff0fc0.
// On ARMv5, this is implemented by a special code path that the kernel
// detects and treats specially when thread pre-emption happens.
// On ARMv6 and higher, it uses LDREX/STREX instructions instead.
//
// Note that this always perform a full memory barrier, there is no
// need to add calls MemoryBarrier() before or after it. It also
// returns 0 on success, and 1 on exit.
//
// Available and reliable since Linux 2.6.24. Both Android and ChromeOS
// use newer kernel revisions, so this should not be a concern.
namespace {
inline int LinuxKernelCmpxchg(Atomic32 old_value,
Atomic32 new_value,
volatile Atomic32* ptr) {
typedef int (*KernelCmpxchgFunc)(Atomic32, Atomic32, volatile Atomic32*);
return ((KernelCmpxchgFunc)0xffff0fc0)(old_value, new_value, ptr);
}
} // namespace
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
for (;;) {
prev_value = *ptr;
if (prev_value != old_value)
return prev_value;
if (!LinuxKernelCmpxchg(old_value, new_value, ptr))
return old_value;
}
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (LinuxKernelCmpxchg(old_value, new_value, ptr));
return old_value;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
for (;;) {
// Atomic exchange the old value with an incremented one.
Atomic32 old_value = *ptr;
Atomic32 new_value = old_value + increment;
if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) {
// The exchange took place as expected.
return new_value;
}
// Otherwise, *ptr changed mid-loop and we need to retry.
}
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
for (;;) {
prev_value = *ptr;
if (prev_value != old_value) {
// Always ensure acquire semantics.
MemoryBarrier();
return prev_value;
}
if (!LinuxKernelCmpxchg(old_value, new_value, ptr))
return old_value;
}
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
// This could be implemented as:
// MemoryBarrier();
// return NoBarrier_CompareAndSwap();
//
// But would use 3 barriers per succesful CAS. To save performance,
// use Acquire_CompareAndSwap(). Its implementation guarantees that:
// - A succesful swap uses only 2 barriers (in the kernel helper).
// - An early return due to (prev_value != old_value) performs
// a memory barrier with no store, which is equivalent to the
// generic implementation above.
return Acquire_CompareAndSwap(ptr, old_value, new_value);
}
#else
#error "Your CPU's ARM architecture is not supported yet"
#endif
// NOTE: Atomicity of the following load and store operations is only
// guaranteed in case of 32-bit alignement of |ptr| values.
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
} // namespace base::subtle
} // namespace base
#endif // CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_ARM_GCC_H_

View File

@@ -1,124 +0,0 @@
// Copyright (c) 2011 Google Inc. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the name Chromium Embedded
// Framework nor the names of its contributors may be used to endorse
// or promote products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Do not include this header file directly. Use base/cef_atomicops.h
// instead.
#ifndef CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_ATOMICWORD_COMPAT_H_
#define CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_ATOMICWORD_COMPAT_H_
// AtomicWord is a synonym for intptr_t, and Atomic32 is a synonym for int32,
// which in turn means int. On some LP32 platforms, intptr_t is an int, but
// on others, it's a long. When AtomicWord and Atomic32 are based on different
// fundamental types, their pointers are incompatible.
//
// This file defines function overloads to allow both AtomicWord and Atomic32
// data to be used with this interface.
//
// On LP64 platforms, AtomicWord and Atomic64 are both always long,
// so this problem doesn't occur.
#if !defined(ARCH_CPU_64_BITS)
namespace base {
namespace subtle {
inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return NoBarrier_CompareAndSwap(reinterpret_cast<volatile Atomic32*>(ptr),
old_value, new_value);
}
inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
AtomicWord new_value) {
return NoBarrier_AtomicExchange(reinterpret_cast<volatile Atomic32*>(ptr),
new_value);
}
inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
AtomicWord increment) {
return NoBarrier_AtomicIncrement(reinterpret_cast<volatile Atomic32*>(ptr),
increment);
}
inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
AtomicWord increment) {
return Barrier_AtomicIncrement(reinterpret_cast<volatile Atomic32*>(ptr),
increment);
}
inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return base::subtle::Acquire_CompareAndSwap(
reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
}
inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return base::subtle::Release_CompareAndSwap(
reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
}
inline void NoBarrier_Store(volatile AtomicWord* ptr, AtomicWord value) {
NoBarrier_Store(reinterpret_cast<volatile Atomic32*>(ptr), value);
}
inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
return base::subtle::Acquire_Store(reinterpret_cast<volatile Atomic32*>(ptr),
value);
}
inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
return base::subtle::Release_Store(reinterpret_cast<volatile Atomic32*>(ptr),
value);
}
inline AtomicWord NoBarrier_Load(volatile const AtomicWord* ptr) {
return NoBarrier_Load(reinterpret_cast<volatile const Atomic32*>(ptr));
}
inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
return base::subtle::Acquire_Load(
reinterpret_cast<volatile const Atomic32*>(ptr));
}
inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
return base::subtle::Release_Load(
reinterpret_cast<volatile const Atomic32*>(ptr));
}
} // namespace base::subtle
} // namespace base
#endif // !defined(ARCH_CPU_64_BITS)
#endif // CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_ATOMICWORD_COMPAT_H_

View File

@@ -1,223 +0,0 @@
// Copyright (c) 2012 Google Inc. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the name Chromium Embedded
// Framework nor the names of its contributors may be used to endorse
// or promote products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Do not include this header file directly. Use base/cef_atomicops.h
// instead.
#ifndef CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_MAC_H_
#define CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_MAC_H_
#include <libkern/OSAtomic.h>
namespace base {
namespace subtle {
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
do {
if (OSAtomicCompareAndSwap32(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap32(old_value, new_value,
const_cast<Atomic32*>(ptr)));
return old_value;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
}
inline void MemoryBarrier() {
OSMemoryBarrier();
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
do {
if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return Acquire_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
#ifdef __LP64__
// 64-bit implementation on 64-bit platform
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64(old_value, new_value,
reinterpret_cast<volatile int64_t*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap64(old_value, new_value,
reinterpret_cast<volatile int64_t*>(ptr)));
return old_value;
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr));
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return OSAtomicAdd64Barrier(increment,
reinterpret_cast<volatile int64_t*>(ptr));
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64Barrier(
old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
// The lib kern interface does not distinguish between
// Acquire and Release memory barriers; they are equivalent.
return Acquire_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
#endif // defined(__LP64__)
} // namespace base::subtle
} // namespace base
#endif // CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_MAC_H_

View File

@@ -1,268 +0,0 @@
// Copyright (c) 2011 Google Inc. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the name Chromium Embedded
// Framework nor the names of its contributors may be used to endorse
// or promote products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Do not include this header file directly. Use base/cef_atomicops.h
// instead.
#ifndef CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_X86_GCC_H_
#define CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_X86_GCC_H_
// This struct is not part of the public API of this module; clients may not
// use it.
// Features of this x86. Values may not be correct before main() is run,
// but are set conservatively.
struct AtomicOps_x86CPUFeatureStruct {
bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
// after acquire compare-and-swap.
};
extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
namespace base {
namespace subtle {
// 32-bit low-level operations on any platform.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
__asm__ __volatile__("lock; cmpxchgl %1,%2"
: "=a"(prev)
: "q"(new_value), "m"(*ptr), "0"(old_value)
: "memory");
return prev;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
__asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg.
: "=r"(new_value)
: "m"(*ptr), "0"(new_value)
: "memory");
return new_value; // Now it's the previous value.
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 temp = increment;
__asm__ __volatile__("lock; xaddl %0,%1"
: "+r"(temp), "+m"(*ptr)
:
: "memory");
// temp now holds the old value of *ptr
return temp + increment;
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 temp = increment;
__asm__ __volatile__("lock; xaddl %0,%1"
: "+r"(temp), "+m"(*ptr)
:
: "memory");
// temp now holds the old value of *ptr
if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
__asm__ __volatile__("lfence" : : : "memory");
}
return temp + increment;
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
__asm__ __volatile__("lfence" : : : "memory");
}
return x;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void MemoryBarrier() {
__asm__ __volatile__("mfence" : : : "memory");
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
ATOMICOPS_COMPILER_BARRIER();
*ptr = value; // An x86 store acts as a release barrier.
// See comments in Atomic64 version of Release_Store(), below.
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr; // An x86 load acts as a acquire barrier.
// See comments in Atomic64 version of Release_Store(), below.
ATOMICOPS_COMPILER_BARRIER();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
#if defined(__x86_64__)
// 64-bit low-level operations on 64-bit platform.
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
__asm__ __volatile__("lock; cmpxchgq %1,%2"
: "=a"(prev)
: "q"(new_value), "m"(*ptr), "0"(old_value)
: "memory");
return prev;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
__asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg.
: "=r"(new_value)
: "m"(*ptr), "0"(new_value)
: "memory");
return new_value; // Now it's the previous value.
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 temp = increment;
__asm__ __volatile__("lock; xaddq %0,%1"
: "+r"(temp), "+m"(*ptr)
:
: "memory");
// temp now contains the previous value of *ptr
return temp + increment;
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 temp = increment;
__asm__ __volatile__("lock; xaddq %0,%1"
: "+r"(temp), "+m"(*ptr)
:
: "memory");
// temp now contains the previous value of *ptr
if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
__asm__ __volatile__("lfence" : : : "memory");
}
return temp + increment;
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
ATOMICOPS_COMPILER_BARRIER();
*ptr = value; // An x86 store acts as a release barrier
// for current AMD/Intel chips as of Jan 2008.
// See also Acquire_Load(), below.
// When new chips come out, check:
// IA-32 Intel Architecture Software Developer's Manual, Volume 3:
// System Programming Guide, Chatper 7: Multiple-processor management,
// Section 7.2, Memory Ordering.
// Last seen at:
// http://developer.intel.com/design/pentium4/manuals/index_new.htm
//
// x86 stores/loads fail to act as barriers for a few instructions (clflush
// maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
// not generated by the compiler, and are rare. Users of these instructions
// need to know about cache behaviour in any case since all of these involve
// either flushing cache lines or non-temporal cache hints.
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr; // An x86 load acts as a acquire barrier,
// for current AMD/Intel chips as of Jan 2008.
// See also Release_Store(), above.
ATOMICOPS_COMPILER_BARRIER();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
__asm__ __volatile__("lfence" : : : "memory");
}
return x;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
#endif // defined(__x86_64__)
} // namespace base::subtle
} // namespace base
#undef ATOMICOPS_COMPILER_BARRIER
#endif // CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_X86_GCC_H_

View File

@@ -1,221 +0,0 @@
// Copyright (c) 2008 Google Inc. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the name Chromium Embedded
// Framework nor the names of its contributors may be used to endorse
// or promote products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Do not include this header file directly. Use base/cef_atomicops.h
// instead.
#ifndef CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_X86_MSVC_H_
#define CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_X86_MSVC_H_
#include <windows.h>
#include <intrin.h>
#include "include/base/cef_macros.h"
#if defined(ARCH_CPU_64_BITS)
// windows.h #defines this (only on x64). This causes problems because the
// public API also uses MemoryBarrier at the public name for this fence. So, on
// X64, undef it, and call its documented
// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
// implementation directly.
#undef MemoryBarrier
#endif
namespace base {
namespace subtle {
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
LONG result = _InterlockedCompareExchange(
reinterpret_cast<volatile LONG*>(ptr), static_cast<LONG>(new_value),
static_cast<LONG>(old_value));
return static_cast<Atomic32>(result);
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
LONG result = _InterlockedExchange(reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(new_value));
return static_cast<Atomic32>(result);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return _InterlockedExchangeAdd(reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(increment)) +
increment;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
#error "We require at least vs2005 for MemoryBarrier"
#endif
inline void MemoryBarrier() {
#if defined(ARCH_CPU_64_BITS)
// See #undef and note at the top of this file.
__faststorefence();
#else
// We use MemoryBarrier from WinNT.h
::MemoryBarrier();
#endif
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
NoBarrier_AtomicExchange(ptr, value);
// acts as a barrier in this implementation
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
// See comments in Atomic64 version of Release_Store() below.
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
#if defined(_WIN64)
// 64-bit low-level operations on 64-bit platform.
COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic);
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
PVOID result = InterlockedCompareExchangePointer(
reinterpret_cast<volatile PVOID*>(ptr),
reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
return reinterpret_cast<Atomic64>(result);
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
PVOID result =
InterlockedExchangePointer(reinterpret_cast<volatile PVOID*>(ptr),
reinterpret_cast<PVOID>(new_value));
return reinterpret_cast<Atomic64>(result);
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return InterlockedExchangeAdd64(reinterpret_cast<volatile LONGLONG*>(ptr),
static_cast<LONGLONG>(increment)) +
increment;
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
NoBarrier_AtomicExchange(ptr, value);
// acts as a barrier in this implementation
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
// When new chips come out, check:
// IA-32 Intel Architecture Software Developer's Manual, Volume 3:
// System Programming Guide, Chatper 7: Multiple-processor management,
// Section 7.2, Memory Ordering.
// Last seen at:
// http://developer.intel.com/design/pentium4/manuals/index_new.htm
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
#endif // defined(_WIN64)
} // namespace base::subtle
} // namespace base
#endif // CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_X86_MSVC_H_

File diff suppressed because it is too large Load Diff

View File

@@ -1,398 +0,0 @@
// Copyright (c) 2011 Google Inc. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the name Chromium Embedded
// Framework nor the names of its contributors may be used to endorse
// or promote products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Do not include this header file directly. Use base/cef_bind.h instead.
// Specializations of RunnableAdapter<> for Windows specific calling
// conventions. Please see base/bind_internal.h for more info.
#ifndef CEF_INCLUDE_BASE_INTERNAL_CEF_BIND_INTERNAL_WIN_H_
#define CEF_INCLUDE_BASE_INTERNAL_CEF_BIND_INTERNAL_WIN_H_
// In the x64 architecture in Windows, __fastcall, __stdcall, etc, are all
// the same as __cdecl which would turn the following specializations into
// multiple definitions.
#if defined(ARCH_CPU_X86_FAMILY)
#if defined(ARCH_CPU_32_BITS)
namespace base {
namespace cef_internal {
template <typename Functor>
class RunnableAdapter;
// __stdcall Function: Arity 0.
template <typename R>
class RunnableAdapter<R(__stdcall*)()> {
public:
typedef R(RunType)();
explicit RunnableAdapter(R(__stdcall* function)()) : function_(function) {}
R Run() { return function_(); }
private:
R(__stdcall* function_)();
};
// __fastcall Function: Arity 0.
template <typename R>
class RunnableAdapter<R(__fastcall*)()> {
public:
typedef R(RunType)();
explicit RunnableAdapter(R(__fastcall* function)()) : function_(function) {}
R Run() { return function_(); }
private:
R(__fastcall* function_)();
};
// __stdcall Function: Arity 1.
template <typename R, typename A1>
class RunnableAdapter<R(__stdcall*)(A1)> {
public:
typedef R(RunType)(A1);
explicit RunnableAdapter(R(__stdcall* function)(A1)) : function_(function) {}
R Run(typename CallbackParamTraits<A1>::ForwardType a1) {
return function_(a1);
}
private:
R(__stdcall* function_)(A1);
};
// __fastcall Function: Arity 1.
template <typename R, typename A1>
class RunnableAdapter<R(__fastcall*)(A1)> {
public:
typedef R(RunType)(A1);
explicit RunnableAdapter(R(__fastcall* function)(A1)) : function_(function) {}
R Run(typename CallbackParamTraits<A1>::ForwardType a1) {
return function_(a1);
}
private:
R(__fastcall* function_)(A1);
};
// __stdcall Function: Arity 2.
template <typename R, typename A1, typename A2>
class RunnableAdapter<R(__stdcall*)(A1, A2)> {
public:
typedef R(RunType)(A1, A2);
explicit RunnableAdapter(R(__stdcall* function)(A1, A2))
: function_(function) {}
R Run(typename CallbackParamTraits<A1>::ForwardType a1,
typename CallbackParamTraits<A2>::ForwardType a2) {
return function_(a1, a2);
}
private:
R(__stdcall* function_)(A1, A2);
};
// __fastcall Function: Arity 2.
template <typename R, typename A1, typename A2>
class RunnableAdapter<R(__fastcall*)(A1, A2)> {
public:
typedef R(RunType)(A1, A2);
explicit RunnableAdapter(R(__fastcall* function)(A1, A2))
: function_(function) {}
R Run(typename CallbackParamTraits<A1>::ForwardType a1,
typename CallbackParamTraits<A2>::ForwardType a2) {
return function_(a1, a2);
}
private:
R(__fastcall* function_)(A1, A2);
};
// __stdcall Function: Arity 3.
template <typename R, typename A1, typename A2, typename A3>
class RunnableAdapter<R(__stdcall*)(A1, A2, A3)> {
public:
typedef R(RunType)(A1, A2, A3);
explicit RunnableAdapter(R(__stdcall* function)(A1, A2, A3))
: function_(function) {}
R Run(typename CallbackParamTraits<A1>::ForwardType a1,
typename CallbackParamTraits<A2>::ForwardType a2,
typename CallbackParamTraits<A3>::ForwardType a3) {
return function_(a1, a2, a3);
}
private:
R(__stdcall* function_)(A1, A2, A3);
};
// __fastcall Function: Arity 3.
template <typename R, typename A1, typename A2, typename A3>
class RunnableAdapter<R(__fastcall*)(A1, A2, A3)> {
public:
typedef R(RunType)(A1, A2, A3);
explicit RunnableAdapter(R(__fastcall* function)(A1, A2, A3))
: function_(function) {}
R Run(typename CallbackParamTraits<A1>::ForwardType a1,
typename CallbackParamTraits<A2>::ForwardType a2,
typename CallbackParamTraits<A3>::ForwardType a3) {
return function_(a1, a2, a3);
}
private:
R(__fastcall* function_)(A1, A2, A3);
};
// __stdcall Function: Arity 4.
template <typename R, typename A1, typename A2, typename A3, typename A4>
class RunnableAdapter<R(__stdcall*)(A1, A2, A3, A4)> {
public:
typedef R(RunType)(A1, A2, A3, A4);
explicit RunnableAdapter(R(__stdcall* function)(A1, A2, A3, A4))
: function_(function) {}
R Run(typename CallbackParamTraits<A1>::ForwardType a1,
typename CallbackParamTraits<A2>::ForwardType a2,
typename CallbackParamTraits<A3>::ForwardType a3,
typename CallbackParamTraits<A4>::ForwardType a4) {
return function_(a1, a2, a3, a4);
}
private:
R(__stdcall* function_)(A1, A2, A3, A4);
};
// __fastcall Function: Arity 4.
template <typename R, typename A1, typename A2, typename A3, typename A4>
class RunnableAdapter<R(__fastcall*)(A1, A2, A3, A4)> {
public:
typedef R(RunType)(A1, A2, A3, A4);
explicit RunnableAdapter(R(__fastcall* function)(A1, A2, A3, A4))
: function_(function) {}
R Run(typename CallbackParamTraits<A1>::ForwardType a1,
typename CallbackParamTraits<A2>::ForwardType a2,
typename CallbackParamTraits<A3>::ForwardType a3,
typename CallbackParamTraits<A4>::ForwardType a4) {
return function_(a1, a2, a3, a4);
}
private:
R(__fastcall* function_)(A1, A2, A3, A4);
};
// __stdcall Function: Arity 5.
template <typename R,
typename A1,
typename A2,
typename A3,
typename A4,
typename A5>
class RunnableAdapter<R(__stdcall*)(A1, A2, A3, A4, A5)> {
public:
typedef R(RunType)(A1, A2, A3, A4, A5);
explicit RunnableAdapter(R(__stdcall* function)(A1, A2, A3, A4, A5))
: function_(function) {}
R Run(typename CallbackParamTraits<A1>::ForwardType a1,
typename CallbackParamTraits<A2>::ForwardType a2,
typename CallbackParamTraits<A3>::ForwardType a3,
typename CallbackParamTraits<A4>::ForwardType a4,
typename CallbackParamTraits<A5>::ForwardType a5) {
return function_(a1, a2, a3, a4, a5);
}
private:
R(__stdcall* function_)(A1, A2, A3, A4, A5);
};
// __fastcall Function: Arity 5.
template <typename R,
typename A1,
typename A2,
typename A3,
typename A4,
typename A5>
class RunnableAdapter<R(__fastcall*)(A1, A2, A3, A4, A5)> {
public:
typedef R(RunType)(A1, A2, A3, A4, A5);
explicit RunnableAdapter(R(__fastcall* function)(A1, A2, A3, A4, A5))
: function_(function) {}
R Run(typename CallbackParamTraits<A1>::ForwardType a1,
typename CallbackParamTraits<A2>::ForwardType a2,
typename CallbackParamTraits<A3>::ForwardType a3,
typename CallbackParamTraits<A4>::ForwardType a4,
typename CallbackParamTraits<A5>::ForwardType a5) {
return function_(a1, a2, a3, a4, a5);
}
private:
R(__fastcall* function_)(A1, A2, A3, A4, A5);
};
// __stdcall Function: Arity 6.
template <typename R,
typename A1,
typename A2,
typename A3,
typename A4,
typename A5,
typename A6>
class RunnableAdapter<R(__stdcall*)(A1, A2, A3, A4, A5, A6)> {
public:
typedef R(RunType)(A1, A2, A3, A4, A5, A6);
explicit RunnableAdapter(R(__stdcall* function)(A1, A2, A3, A4, A5, A6))
: function_(function) {}
R Run(typename CallbackParamTraits<A1>::ForwardType a1,
typename CallbackParamTraits<A2>::ForwardType a2,
typename CallbackParamTraits<A3>::ForwardType a3,
typename CallbackParamTraits<A4>::ForwardType a4,
typename CallbackParamTraits<A5>::ForwardType a5,
typename CallbackParamTraits<A6>::ForwardType a6) {
return function_(a1, a2, a3, a4, a5, a6);
}
private:
R(__stdcall* function_)(A1, A2, A3, A4, A5, A6);
};
// __fastcall Function: Arity 6.
template <typename R,
typename A1,
typename A2,
typename A3,
typename A4,
typename A5,
typename A6>
class RunnableAdapter<R(__fastcall*)(A1, A2, A3, A4, A5, A6)> {
public:
typedef R(RunType)(A1, A2, A3, A4, A5, A6);
explicit RunnableAdapter(R(__fastcall* function)(A1, A2, A3, A4, A5, A6))
: function_(function) {}
R Run(typename CallbackParamTraits<A1>::ForwardType a1,
typename CallbackParamTraits<A2>::ForwardType a2,
typename CallbackParamTraits<A3>::ForwardType a3,
typename CallbackParamTraits<A4>::ForwardType a4,
typename CallbackParamTraits<A5>::ForwardType a5,
typename CallbackParamTraits<A6>::ForwardType a6) {
return function_(a1, a2, a3, a4, a5, a6);
}
private:
R(__fastcall* function_)(A1, A2, A3, A4, A5, A6);
};
// __stdcall Function: Arity 7.
template <typename R,
typename A1,
typename A2,
typename A3,
typename A4,
typename A5,
typename A6,
typename A7>
class RunnableAdapter<R(__stdcall*)(A1, A2, A3, A4, A5, A6, A7)> {
public:
typedef R(RunType)(A1, A2, A3, A4, A5, A6, A7);
explicit RunnableAdapter(R(__stdcall* function)(A1, A2, A3, A4, A5, A6, A7))
: function_(function) {}
R Run(typename CallbackParamTraits<A1>::ForwardType a1,
typename CallbackParamTraits<A2>::ForwardType a2,
typename CallbackParamTraits<A3>::ForwardType a3,
typename CallbackParamTraits<A4>::ForwardType a4,
typename CallbackParamTraits<A5>::ForwardType a5,
typename CallbackParamTraits<A6>::ForwardType a6,
typename CallbackParamTraits<A7>::ForwardType a7) {
return function_(a1, a2, a3, a4, a5, a6, a7);
}
private:
R(__stdcall* function_)(A1, A2, A3, A4, A5, A6, A7);
};
// __fastcall Function: Arity 7.
template <typename R,
typename A1,
typename A2,
typename A3,
typename A4,
typename A5,
typename A6,
typename A7>
class RunnableAdapter<R(__fastcall*)(A1, A2, A3, A4, A5, A6, A7)> {
public:
typedef R(RunType)(A1, A2, A3, A4, A5, A6, A7);
explicit RunnableAdapter(R(__fastcall* function)(A1, A2, A3, A4, A5, A6, A7))
: function_(function) {}
R Run(typename CallbackParamTraits<A1>::ForwardType a1,
typename CallbackParamTraits<A2>::ForwardType a2,
typename CallbackParamTraits<A3>::ForwardType a3,
typename CallbackParamTraits<A4>::ForwardType a4,
typename CallbackParamTraits<A5>::ForwardType a5,
typename CallbackParamTraits<A6>::ForwardType a6,
typename CallbackParamTraits<A7>::ForwardType a7) {
return function_(a1, a2, a3, a4, a5, a6, a7);
}
private:
R(__fastcall* function_)(A1, A2, A3, A4, A5, A6, A7);
};
} // namespace cef_internal
} // namespace base
#endif // defined(ARCH_CPU_32_BITS)
#endif // defined(ARCH_CPU_X86_FAMILY)
#endif // CEF_INCLUDE_BASE_INTERNAL_CEF_BIND_INTERNAL_WIN_H_

View File

@@ -36,72 +36,156 @@
#ifndef CEF_INCLUDE_BASE_INTERNAL_CEF_CALLBACK_INTERNAL_H_
#define CEF_INCLUDE_BASE_INTERNAL_CEF_CALLBACK_INTERNAL_H_
#include <stddef.h>
#include "include/base/cef_atomic_ref_count.h"
#include "include/base/cef_macros.h"
#include "include/base/cef_callback_forward.h"
#include "include/base/cef_ref_counted.h"
#include "include/base/cef_scoped_ptr.h"
#include "include/base/cef_template_util.h"
template <typename T>
class ScopedVector;
namespace base {
namespace cef_internal {
class CallbackBase;
// At the base level, the only task is to add reference counting data. Don't use
// RefCountedThreadSafe since it requires the destructor to be a virtual method.
// Creating a vtable for every BindState template instantiation results in a lot
// of bloat. Its only task is to call the destructor which can be done with a
// function pointer.
class BindStateBase {
protected:
explicit BindStateBase(void (*destructor)(BindStateBase*))
: ref_count_(0), destructor_(destructor) {}
~BindStateBase() {}
struct FakeBindState;
namespace internal {
class BindStateBase;
class FinallyExecutorCommon;
class ThenAndCatchExecutorCommon;
template <typename ReturnType>
class PostTaskExecutor;
template <typename Functor, typename... BoundArgs>
struct BindState;
class CallbackBase;
class CallbackBaseCopyable;
struct BindStateBaseRefCountTraits {
static void Destruct(const BindStateBase*);
};
template <typename T>
using PassingType = std::conditional_t<std::is_scalar<T>::value, T, T&&>;
// BindStateBase is used to provide an opaque handle that the Callback
// class can use to represent a function object with bound arguments. It
// behaves as an existential type that is used by a corresponding
// DoInvoke function to perform the function execution. This allows
// us to shield the Callback class from the types of the bound argument via
// "type erasure."
// At the base level, the only task is to add reference counting data. Avoid
// using or inheriting any virtual functions. Creating a vtable for every
// BindState template instantiation results in a lot of bloat. Its only task is
// to call the destructor which can be done with a function pointer.
class BindStateBase
: public RefCountedThreadSafe<BindStateBase, BindStateBaseRefCountTraits> {
public:
REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE();
enum CancellationQueryMode {
IS_CANCELLED,
MAYBE_VALID,
};
using InvokeFuncStorage = void (*)();
BindStateBase(const BindStateBase&) = delete;
BindStateBase& operator=(const BindStateBase&) = delete;
private:
friend class scoped_refptr<BindStateBase>;
BindStateBase(InvokeFuncStorage polymorphic_invoke,
void (*destructor)(const BindStateBase*));
BindStateBase(InvokeFuncStorage polymorphic_invoke,
void (*destructor)(const BindStateBase*),
bool (*query_cancellation_traits)(const BindStateBase*,
CancellationQueryMode mode));
~BindStateBase() = default;
friend struct BindStateBaseRefCountTraits;
friend class RefCountedThreadSafe<BindStateBase, BindStateBaseRefCountTraits>;
friend class CallbackBase;
friend class CallbackBaseCopyable;
void AddRef();
void Release();
// Allowlist subclasses that access the destructor of BindStateBase.
template <typename Functor, typename... BoundArgs>
friend struct BindState;
friend struct ::base::FakeBindState;
AtomicRefCount ref_count_;
bool IsCancelled() const {
return query_cancellation_traits_(this, IS_CANCELLED);
}
bool MaybeValid() const {
return query_cancellation_traits_(this, MAYBE_VALID);
}
// In C++, it is safe to cast function pointers to function pointers of
// another type. It is not okay to use void*. We create a InvokeFuncStorage
// that that can store our function pointer, and then cast it back to
// the original type on usage.
InvokeFuncStorage polymorphic_invoke_;
// Pointer to a function that will properly destroy |this|.
void (*destructor_)(BindStateBase*);
DISALLOW_COPY_AND_ASSIGN(BindStateBase);
void (*destructor_)(const BindStateBase*);
bool (*query_cancellation_traits_)(const BindStateBase*,
CancellationQueryMode mode);
};
// Holds the Callback methods that don't require specialization to reduce
// template bloat.
// CallbackBase<MoveOnly> is a direct base class of MoveOnly callbacks, and
// CallbackBase<Copyable> uses CallbackBase<MoveOnly> for its implementation.
class CallbackBase {
public:
inline CallbackBase(CallbackBase&& c) noexcept;
CallbackBase& operator=(CallbackBase&& c) noexcept;
explicit CallbackBase(const CallbackBaseCopyable& c);
CallbackBase& operator=(const CallbackBaseCopyable& c);
explicit CallbackBase(CallbackBaseCopyable&& c) noexcept;
CallbackBase& operator=(CallbackBaseCopyable&& c) noexcept;
// Returns true if Callback is null (doesn't refer to anything).
bool is_null() const { return bind_state_.get() == NULL; }
bool is_null() const { return !bind_state_; }
explicit operator bool() const { return !is_null(); }
// Returns true if the callback invocation will be nop due to an cancellation.
// It's invalid to call this on uninitialized callback.
//
// Must be called on the Callback's destination sequence.
bool IsCancelled() const;
// If this returns false, the callback invocation will be a nop due to a
// cancellation. This may(!) still return true, even on a cancelled callback.
//
// This function is thread-safe.
bool MaybeValid() const;
// Returns the Callback into an uninitialized state.
void Reset();
protected:
// In C++, it is safe to cast function pointers to function pointers of
// another type. It is not okay to use void*. We create a InvokeFuncStorage
// that that can store our function pointer, and then cast it back to
// the original type on usage.
typedef void (*InvokeFuncStorage)(void);
friend class FinallyExecutorCommon;
friend class ThenAndCatchExecutorCommon;
template <typename ReturnType>
friend class PostTaskExecutor;
using InvokeFuncStorage = BindStateBase::InvokeFuncStorage;
// Returns true if this callback equals |other|. |other| may be null.
bool Equals(const CallbackBase& other) const;
bool EqualsInternal(const CallbackBase& other) const;
constexpr inline CallbackBase();
// Allow initializing of |bind_state_| via the constructor to avoid default
// initialization of the scoped_refptr. We do not also initialize
// |polymorphic_invoke_| here because doing a normal assignment in the
// derived Callback templates makes for much nicer compiler errors.
explicit CallbackBase(BindStateBase* bind_state);
// initialization of the scoped_refptr.
explicit inline CallbackBase(BindStateBase* bind_state);
InvokeFuncStorage polymorphic_invoke() const {
return bind_state_->polymorphic_invoke_;
}
// Force the destructor to be instantiated inside this translation unit so
// that our subclasses will not get inlined versions. Avoids more template
@@ -109,116 +193,83 @@ class CallbackBase {
~CallbackBase();
scoped_refptr<BindStateBase> bind_state_;
InvokeFuncStorage polymorphic_invoke_;
};
// A helper template to determine if given type is non-const move-only-type,
// i.e. if a value of the given type should be passed via .Pass() in a
// destructive way.
template <typename T>
struct IsMoveOnlyType {
template <typename U>
static YesType Test(const typename U::MoveOnlyTypeForCPP03*);
constexpr CallbackBase::CallbackBase() = default;
CallbackBase::CallbackBase(CallbackBase&&) noexcept = default;
CallbackBase::CallbackBase(BindStateBase* bind_state)
: bind_state_(AdoptRef(bind_state)) {}
template <typename U>
static NoType Test(...);
// CallbackBase<Copyable> is a direct base class of Copyable Callbacks.
class CallbackBaseCopyable : public CallbackBase {
public:
CallbackBaseCopyable(const CallbackBaseCopyable& c);
CallbackBaseCopyable(CallbackBaseCopyable&& c) noexcept = default;
CallbackBaseCopyable& operator=(const CallbackBaseCopyable& c);
CallbackBaseCopyable& operator=(CallbackBaseCopyable&& c) noexcept;
static const bool value =
sizeof(Test<T>(0)) == sizeof(YesType) && !is_const<T>::value;
protected:
constexpr CallbackBaseCopyable() = default;
explicit CallbackBaseCopyable(BindStateBase* bind_state)
: CallbackBase(bind_state) {}
~CallbackBaseCopyable() = default;
};
// This is a typetraits object that's used to take an argument type, and
// extract a suitable type for storing and forwarding arguments.
//
// In particular, it strips off references, and converts arrays to
// pointers for storage; and it avoids accidentally trying to create a
// "reference of a reference" if the argument is a reference type.
//
// This array type becomes an issue for storage because we are passing bound
// parameters by const reference. In this case, we end up passing an actual
// array type in the initializer list which C++ does not allow. This will
// break passing of C-string literals.
template <typename T, bool is_move_only = IsMoveOnlyType<T>::value>
struct CallbackParamTraits {
typedef const T& ForwardType;
typedef T StorageType;
// Helpers for the `Then()` implementation.
template <typename OriginalCallback, typename ThenCallback>
struct ThenHelper;
// Specialization when original callback returns `void`.
template <template <typename> class OriginalCallback,
template <typename>
class ThenCallback,
typename... OriginalArgs,
typename ThenR,
typename... ThenArgs>
struct ThenHelper<OriginalCallback<void(OriginalArgs...)>,
ThenCallback<ThenR(ThenArgs...)>> {
static_assert(sizeof...(ThenArgs) == 0,
"|then| callback cannot accept parameters if |this| has a "
"void return type.");
static auto CreateTrampoline() {
return [](OriginalCallback<void(OriginalArgs...)> c1,
ThenCallback<ThenR(ThenArgs...)> c2, OriginalArgs... c1_args) {
std::move(c1).Run(std::forward<OriginalArgs>(c1_args)...);
return std::move(c2).Run();
};
}
};
// The Storage should almost be impossible to trigger unless someone manually
// specifies type of the bind parameters. However, in case they do,
// this will guard against us accidentally storing a reference parameter.
//
// The ForwardType should only be used for unbound arguments.
template <typename T>
struct CallbackParamTraits<T&, false> {
typedef T& ForwardType;
typedef T StorageType;
// Specialization when original callback returns a non-void type.
template <template <typename> class OriginalCallback,
template <typename>
class ThenCallback,
typename OriginalR,
typename... OriginalArgs,
typename ThenR,
typename... ThenArgs>
struct ThenHelper<OriginalCallback<OriginalR(OriginalArgs...)>,
ThenCallback<ThenR(ThenArgs...)>> {
static_assert(sizeof...(ThenArgs) == 1,
"|then| callback must accept exactly one parameter if |this| "
"has a non-void return type.");
// TODO(dcheng): This should probably check is_convertible as well (same with
// `AssertBindArgsValidity`).
static_assert(std::is_constructible<ThenArgs..., OriginalR&&>::value,
"|then| callback's parameter must be constructible from "
"return type of |this|.");
static auto CreateTrampoline() {
return [](OriginalCallback<OriginalR(OriginalArgs...)> c1,
ThenCallback<ThenR(ThenArgs...)> c2, OriginalArgs... c1_args) {
return std::move(c2).Run(
std::move(c1).Run(std::forward<OriginalArgs>(c1_args)...));
};
}
};
// Note that for array types, we implicitly add a const in the conversion. This
// means that it is not possible to bind array arguments to functions that take
// a non-const pointer. Trying to specialize the template based on a "const
// T[n]" does not seem to match correctly, so we are stuck with this
// restriction.
template <typename T, size_t n>
struct CallbackParamTraits<T[n], false> {
typedef const T* ForwardType;
typedef const T* StorageType;
};
// See comment for CallbackParamTraits<T[n]>.
template <typename T>
struct CallbackParamTraits<T[], false> {
typedef const T* ForwardType;
typedef const T* StorageType;
};
// Parameter traits for movable-but-not-copyable scopers.
//
// Callback<>/Bind() understands movable-but-not-copyable semantics where
// the type cannot be copied but can still have its state destructively
// transferred (aka. moved) to another instance of the same type by calling a
// helper function. When used with Bind(), this signifies transferal of the
// object's state to the target function.
//
// For these types, the ForwardType must not be a const reference, or a
// reference. A const reference is inappropriate, and would break const
// correctness, because we are implementing a destructive move. A non-const
// reference cannot be used with temporaries which means the result of a
// function or a cast would not be usable with Callback<> or Bind().
template <typename T>
struct CallbackParamTraits<T, true> {
typedef T ForwardType;
typedef T StorageType;
};
// CallbackForward() is a very limited simulation of C++11's std::forward()
// used by the Callback/Bind system for a set of movable-but-not-copyable
// types. It is needed because forwarding a movable-but-not-copyable
// argument to another function requires us to invoke the proper move
// operator to create a rvalue version of the type. The supported types are
// whitelisted below as overloads of the CallbackForward() function. The
// default template compiles out to be a no-op.
//
// In C++11, std::forward would replace all uses of this function. However, it
// is impossible to implement a general std::forward with C++11 due to a lack
// of rvalue references.
//
// In addition to Callback/Bind, this is used by PostTaskAndReplyWithResult to
// simulate std::forward() and forward the result of one Callback as a
// parameter to another callback. This is to support Callbacks that return
// the movable-but-not-copyable types whitelisted above.
template <typename T>
typename enable_if<!IsMoveOnlyType<T>::value, T>::type& CallbackForward(T& t) {
return t;
}
template <typename T>
typename enable_if<IsMoveOnlyType<T>::value, T>::type CallbackForward(T& t) {
return t.Pass();
}
} // namespace cef_internal
} // namespace internal
} // namespace base
#endif // CEF_INCLUDE_BASE_INTERNAL_CEF_CALLBACK_INTERNAL_H_

View File

@@ -32,10 +32,9 @@
#ifndef CEF_INCLUDE_BASE_INTERNAL_CEF_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
#define CEF_INCLUDE_BASE_INTERNAL_CEF_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
#include "include/base/cef_build.h"
#include "include/base/cef_ref_counted.h"
#include <type_traits>
#include "include/base/cef_template_util.h"
#include "include/base/cef_tuple.h"
// It is dangerous to post a task with a T* argument where T is a subtype of
// RefCounted(Base|ThreadSafeBase), since by the time the parameter is used, the
@@ -46,135 +45,30 @@
namespace base {
namespace cef_internal {
// This is a base internal implementation file used by task.h and callback.h.
// Not for public consumption, so we wrap it in namespace internal.
namespace internal {
template <typename T, typename = void>
struct IsRefCountedType : std::false_type {};
template <typename T>
struct NeedsScopedRefptrButGetsRawPtr {
#if defined(OS_WIN)
enum { value = base::false_type::value };
#else
enum {
// Human readable translation: you needed to be a scoped_refptr if you are a
// raw pointer type and are convertible to a RefCounted(Base|ThreadSafeBase)
// type.
value = (is_pointer<T>::value &&
(is_convertible<T, subtle::RefCountedBase*>::value ||
is_convertible<T, subtle::RefCountedThreadSafeBase*>::value))
};
#endif
struct IsRefCountedType<T,
void_t<decltype(std::declval<T*>()->AddRef()),
decltype(std::declval<T*>()->Release())>>
: std::true_type {};
// Human readable translation: you needed to be a scoped_refptr if you are a raw
// pointer type and are convertible to a RefCounted(Base|ThreadSafeBase) type.
template <typename T>
struct NeedsScopedRefptrButGetsRawPtr
: conjunction<std::is_pointer<T>,
IsRefCountedType<std::remove_pointer_t<T>>> {
static_assert(!std::is_reference<T>::value,
"NeedsScopedRefptrButGetsRawPtr requires non-reference type.");
};
template <typename Params>
struct ParamsUseScopedRefptrCorrectly {
enum { value = 0 };
};
template <>
struct ParamsUseScopedRefptrCorrectly<Tuple0> {
enum { value = 1 };
};
template <typename A>
struct ParamsUseScopedRefptrCorrectly<Tuple1<A>> {
enum { value = !NeedsScopedRefptrButGetsRawPtr<A>::value };
};
template <typename A, typename B>
struct ParamsUseScopedRefptrCorrectly<Tuple2<A, B>> {
enum {
value = !(NeedsScopedRefptrButGetsRawPtr<A>::value ||
NeedsScopedRefptrButGetsRawPtr<B>::value)
};
};
template <typename A, typename B, typename C>
struct ParamsUseScopedRefptrCorrectly<Tuple3<A, B, C>> {
enum {
value = !(NeedsScopedRefptrButGetsRawPtr<A>::value ||
NeedsScopedRefptrButGetsRawPtr<B>::value ||
NeedsScopedRefptrButGetsRawPtr<C>::value)
};
};
template <typename A, typename B, typename C, typename D>
struct ParamsUseScopedRefptrCorrectly<Tuple4<A, B, C, D>> {
enum {
value = !(NeedsScopedRefptrButGetsRawPtr<A>::value ||
NeedsScopedRefptrButGetsRawPtr<B>::value ||
NeedsScopedRefptrButGetsRawPtr<C>::value ||
NeedsScopedRefptrButGetsRawPtr<D>::value)
};
};
template <typename A, typename B, typename C, typename D, typename E>
struct ParamsUseScopedRefptrCorrectly<Tuple5<A, B, C, D, E>> {
enum {
value = !(NeedsScopedRefptrButGetsRawPtr<A>::value ||
NeedsScopedRefptrButGetsRawPtr<B>::value ||
NeedsScopedRefptrButGetsRawPtr<C>::value ||
NeedsScopedRefptrButGetsRawPtr<D>::value ||
NeedsScopedRefptrButGetsRawPtr<E>::value)
};
};
template <typename A,
typename B,
typename C,
typename D,
typename E,
typename F>
struct ParamsUseScopedRefptrCorrectly<Tuple6<A, B, C, D, E, F>> {
enum {
value = !(NeedsScopedRefptrButGetsRawPtr<A>::value ||
NeedsScopedRefptrButGetsRawPtr<B>::value ||
NeedsScopedRefptrButGetsRawPtr<C>::value ||
NeedsScopedRefptrButGetsRawPtr<D>::value ||
NeedsScopedRefptrButGetsRawPtr<E>::value ||
NeedsScopedRefptrButGetsRawPtr<F>::value)
};
};
template <typename A,
typename B,
typename C,
typename D,
typename E,
typename F,
typename G>
struct ParamsUseScopedRefptrCorrectly<Tuple7<A, B, C, D, E, F, G>> {
enum {
value = !(NeedsScopedRefptrButGetsRawPtr<A>::value ||
NeedsScopedRefptrButGetsRawPtr<B>::value ||
NeedsScopedRefptrButGetsRawPtr<C>::value ||
NeedsScopedRefptrButGetsRawPtr<D>::value ||
NeedsScopedRefptrButGetsRawPtr<E>::value ||
NeedsScopedRefptrButGetsRawPtr<F>::value ||
NeedsScopedRefptrButGetsRawPtr<G>::value)
};
};
template <typename A,
typename B,
typename C,
typename D,
typename E,
typename F,
typename G,
typename H>
struct ParamsUseScopedRefptrCorrectly<Tuple8<A, B, C, D, E, F, G, H>> {
enum {
value = !(NeedsScopedRefptrButGetsRawPtr<A>::value ||
NeedsScopedRefptrButGetsRawPtr<B>::value ||
NeedsScopedRefptrButGetsRawPtr<C>::value ||
NeedsScopedRefptrButGetsRawPtr<D>::value ||
NeedsScopedRefptrButGetsRawPtr<E>::value ||
NeedsScopedRefptrButGetsRawPtr<F>::value ||
NeedsScopedRefptrButGetsRawPtr<G>::value ||
NeedsScopedRefptrButGetsRawPtr<H>::value)
};
};
} // namespace cef_internal
} // namespace internal
} // namespace base

View File

@@ -0,0 +1,66 @@
// Copyright (c) 2013 Google Inc. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the name Chromium Embedded
// Framework nor the names of its contributors may be used to endorse
// or promote products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Do not include this header file directly. Use base/mac/scoped_block.h
// instead.
#ifndef CEF_INCLUDE_BASE_INTERNAL_CEF_SCOPED_BLOCK_MAC_H_
#define CEF_INCLUDE_BASE_INTERNAL_CEF_SCOPED_BLOCK_MAC_H_
#include <Block.h>
#include "include/base/cef_scoped_typeref_mac.h"
#if defined(__has_feature) && __has_feature(objc_arc)
#error "Cannot include include/base/internal/cef_scoped_block_mac.h in file built with ARC."
#endif
namespace base {
namespace mac {
namespace internal {
template <typename B>
struct ScopedBlockTraits {
static B InvalidValue() { return nullptr; }
static B Retain(B block) { return Block_copy(block); }
static void Release(B block) { Block_release(block); }
};
} // namespace internal
// ScopedBlock<> is patterned after ScopedCFTypeRef<>, but uses Block_copy() and
// Block_release() instead of CFRetain() and CFRelease().
template <typename B>
using ScopedBlock = ScopedTypeRef<B, internal::ScopedBlockTraits<B>>;
} // namespace mac
} // namespace base
#endif // CEF_INCLUDE_BASE_INTERNAL_CEF_SCOPED_BLOCK_MAC_H_

View File

@@ -0,0 +1,53 @@
// Copyright (c) 2012 Google Inc. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the name Chromium Embedded
// Framework nor the names of its contributors may be used to endorse
// or promote products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Do not include this header file directly. Use base/memory/scoped_policy.h
// instead.
#ifndef CEF_INCLUDE_BASE_INTERNAL_CEF_SCOPED_POLICY_H_
#define CEF_INCLUDE_BASE_INTERNAL_CEF_SCOPED_POLICY_H_
namespace base {
namespace scoped_policy {
// Defines the ownership policy for a scoped object.
enum OwnershipPolicy {
// The scoped object takes ownership of an object by taking over an existing
// ownership claim.
ASSUME,
// The scoped object will retain the object and any initial ownership is
// not changed.
RETAIN
};
} // namespace scoped_policy
} // namespace base
#endif // CEF_INCLUDE_BASE_INTERNAL_CEF_SCOPED_POLICY_H_