diff --git a/cef_paths2.gypi b/cef_paths2.gypi index 29aae9673..9c499a6af 100644 --- a/cef_paths2.gypi +++ b/cef_paths2.gypi @@ -81,6 +81,7 @@ ], 'includes_linux': [ 'include/base/internal/cef_atomicops_atomicword_compat.h', + 'include/base/internal/cef_atomicops_arm_gcc.h', 'include/base/internal/cef_atomicops_x86_gcc.h', 'include/internal/cef_linux.h', 'include/internal/cef_types_linux.h', diff --git a/include/base/cef_atomicops.h b/include/base/cef_atomicops.h index dc1639b59..ae74802f2 100644 --- a/include/base/cef_atomicops.h +++ b/include/base/cef_atomicops.h @@ -183,6 +183,8 @@ Atomic64 Release_Load(volatile const Atomic64* ptr); #include "include/base/internal/cef_atomicops_mac.h" #elif defined(COMPILER_GCC) && defined(ARCH_CPU_X86_FAMILY) #include "include/base/internal/cef_atomicops_x86_gcc.h" +#elif defined(COMPILER_GCC) && defined(__ARM_ARCH) +#include "include/base/internal/cef_atomicops_arm_gcc.h" #else #error "Atomic operations are not supported on your platform" #endif diff --git a/include/base/internal/cef_atomicops_arm_gcc.h b/include/base/internal/cef_atomicops_arm_gcc.h new file mode 100644 index 000000000..c228b6af0 --- /dev/null +++ b/include/base/internal/cef_atomicops_arm_gcc.h @@ -0,0 +1,320 @@ +// Copyright (c) 2013 Google Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the name Chromium Embedded +// Framework nor the names of its contributors may be used to endorse +// or promote products derived from this software without specific prior +// written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Do not include this header file directly. Use base/cef_atomicops.h +// instead. +// +// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. + +#ifndef CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_ARM_GCC_H_ +#define CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_ARM_GCC_H_ + +#if defined(OS_QNX) +#include +#endif + +namespace base { +namespace subtle { + +// Memory barriers on ARM are funky, but the kernel is here to help: +// +// * ARMv5 didn't support SMP, there is no memory barrier instruction at +// all on this architecture, or when targeting its machine code. +// +// * Some ARMv6 CPUs support SMP. A full memory barrier can be produced by +// writing a random value to a very specific coprocessor register. +// +// * On ARMv7, the "dmb" instruction is used to perform a full memory +// barrier (though writing to the co-processor will still work). +// However, on single core devices (e.g. Nexus One, or Nexus S), +// this instruction will take up to 200 ns, which is huge, even though +// it's completely un-needed on these devices. +// +// * There is no easy way to determine at runtime if the device is +// single or multi-core. However, the kernel provides a useful helper +// function at a fixed memory address (0xffff0fa0), which will always +// perform a memory barrier in the most efficient way. I.e. on single +// core devices, this is an empty function that exits immediately. +// On multi-core devices, it implements a full memory barrier. +// +// * This source could be compiled to ARMv5 machine code that runs on a +// multi-core ARMv6 or ARMv7 device. In this case, memory barriers +// are needed for correct execution. Always call the kernel helper, even +// when targeting ARMv5TE. +// + +inline void MemoryBarrier() { +#if defined(OS_LINUX) || defined(OS_ANDROID) + // Note: This is a function call, which is also an implicit compiler barrier. + typedef void (*KernelMemoryBarrierFunc)(); + ((KernelMemoryBarrierFunc)0xffff0fa0)(); +#elif defined(OS_QNX) + __cpu_membarrier(); +#else +#error MemoryBarrier() is not implemented on this platform. +#endif +} + +// An ARM toolchain would only define one of these depending on which +// variant of the target architecture is being used. This tests against +// any known ARMv6 or ARMv7 variant, where it is possible to directly +// use ldrex/strex instructions to implement fast atomic operations. +#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \ + defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \ + defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \ + defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \ + defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) + +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 prev_value; + int reloop; + do { + // The following is equivalent to: + // + // prev_value = LDREX(ptr) + // reloop = 0 + // if (prev_value != old_value) + // reloop = STREX(ptr, new_value) + __asm__ __volatile__(" ldrex %0, [%3]\n" + " mov %1, #0\n" + " cmp %0, %4\n" +#ifdef __thumb2__ + " it eq\n" +#endif + " strexeq %1, %5, [%3]\n" + : "=&r"(prev_value), "=&r"(reloop), "+m"(*ptr) + : "r"(ptr), "r"(old_value), "r"(new_value) + : "cc", "memory"); + } while (reloop != 0); + return prev_value; +} + +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 result = NoBarrier_CompareAndSwap(ptr, old_value, new_value); + MemoryBarrier(); + return result; +} + +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + MemoryBarrier(); + return NoBarrier_CompareAndSwap(ptr, old_value, new_value); +} + +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + Atomic32 value; + int reloop; + do { + // Equivalent to: + // + // value = LDREX(ptr) + // value += increment + // reloop = STREX(ptr, value) + // + __asm__ __volatile__(" ldrex %0, [%3]\n" + " add %0, %0, %4\n" + " strex %1, %0, [%3]\n" + : "=&r"(value), "=&r"(reloop), "+m"(*ptr) + : "r"(ptr), "r"(increment) + : "cc", "memory"); + } while (reloop); + return value; +} + +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + // TODO(digit): Investigate if it's possible to implement this with + // a single MemoryBarrier() operation between the LDREX and STREX. + // See http://crbug.com/246514 + MemoryBarrier(); + Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment); + MemoryBarrier(); + return result; +} + +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + Atomic32 old_value; + int reloop; + do { + // old_value = LDREX(ptr) + // reloop = STREX(ptr, new_value) + __asm__ __volatile__(" ldrex %0, [%3]\n" + " strex %1, %4, [%3]\n" + : "=&r"(old_value), "=&r"(reloop), "+m"(*ptr) + : "r"(ptr), "r"(new_value) + : "cc", "memory"); + } while (reloop != 0); + return old_value; +} + +// This tests against any known ARMv5 variant. +#elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) || \ + defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__) + +// The kernel also provides a helper function to perform an atomic +// compare-and-swap operation at the hard-wired address 0xffff0fc0. +// On ARMv5, this is implemented by a special code path that the kernel +// detects and treats specially when thread pre-emption happens. +// On ARMv6 and higher, it uses LDREX/STREX instructions instead. +// +// Note that this always perform a full memory barrier, there is no +// need to add calls MemoryBarrier() before or after it. It also +// returns 0 on success, and 1 on exit. +// +// Available and reliable since Linux 2.6.24. Both Android and ChromeOS +// use newer kernel revisions, so this should not be a concern. +namespace { + +inline int LinuxKernelCmpxchg(Atomic32 old_value, + Atomic32 new_value, + volatile Atomic32* ptr) { + typedef int (*KernelCmpxchgFunc)(Atomic32, Atomic32, volatile Atomic32*); + return ((KernelCmpxchgFunc)0xffff0fc0)(old_value, new_value, ptr); +} + +} // namespace + +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 prev_value; + for (;;) { + prev_value = *ptr; + if (prev_value != old_value) + return prev_value; + if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) + return old_value; + } +} + +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + Atomic32 old_value; + do { + old_value = *ptr; + } while (LinuxKernelCmpxchg(old_value, new_value, ptr)); + return old_value; +} + +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + return Barrier_AtomicIncrement(ptr, increment); +} + +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + for (;;) { + // Atomic exchange the old value with an incremented one. + Atomic32 old_value = *ptr; + Atomic32 new_value = old_value + increment; + if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) { + // The exchange took place as expected. + return new_value; + } + // Otherwise, *ptr changed mid-loop and we need to retry. + } +} + +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 prev_value; + for (;;) { + prev_value = *ptr; + if (prev_value != old_value) { + // Always ensure acquire semantics. + MemoryBarrier(); + return prev_value; + } + if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) + return old_value; + } +} + +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + // This could be implemented as: + // MemoryBarrier(); + // return NoBarrier_CompareAndSwap(); + // + // But would use 3 barriers per succesful CAS. To save performance, + // use Acquire_CompareAndSwap(). Its implementation guarantees that: + // - A succesful swap uses only 2 barriers (in the kernel helper). + // - An early return due to (prev_value != old_value) performs + // a memory barrier with no store, which is equivalent to the + // generic implementation above. + return Acquire_CompareAndSwap(ptr, old_value, new_value); +} + +#else +# error "Your CPU's ARM architecture is not supported yet" +#endif + +// NOTE: Atomicity of the following load and store operations is only +// guaranteed in case of 32-bit alignement of |ptr| values. + +inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; +} + +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; + MemoryBarrier(); +} + +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { + MemoryBarrier(); + *ptr = value; +} + +inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; } + +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { + Atomic32 value = *ptr; + MemoryBarrier(); + return value; +} + +inline Atomic32 Release_Load(volatile const Atomic32* ptr) { + MemoryBarrier(); + return *ptr; +} + +} // namespace base::subtle +} // namespace base + +#endif // CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_ARM_GCC_H_ diff --git a/tools/automate/automate-git.py b/tools/automate/automate-git.py index 0efb9d49e..d5e75ae90 100644 --- a/tools/automate/automate-git.py +++ b/tools/automate/automate-git.py @@ -403,6 +403,9 @@ parser.add_option('--build-log-file', parser.add_option('--x64-build', action='store_true', dest='x64build', default=False, help='Create a 64-bit build.') +parser.add_option('--arm-build', + action='store_true', dest='armbuild', default=False, + help='Create an ARM build.') # Distribution-related options. parser.add_option('--force-distrib', @@ -466,6 +469,11 @@ if (options.noreleasebuild and \ parser.print_help(sys.stderr) sys.exit() +if options.x64build and options.armbuild: + print 'Invalid combination of options.' + parser.print_help(sys.stderr) + sys.exit() + # Operating system. if options.dryrun and options.dryrunplatform is not None: platform = options.dryrunplatform @@ -528,9 +536,22 @@ if branch_is_newer_than_2785 and not 'CEF_USE_GN' in os.environ.keys(): use_gn = bool(int(os.environ.get('CEF_USE_GN', '0'))) if use_gn: if branch_is_2743_or_older: - print 'GN is not supported with branch 2743 and older.' + print 'GN is not supported with branch 2743 and older (set CEF_USE_GN=0).' sys.exit() + + if options.armbuild: + if platform != 'linux': + print 'The ARM build option is only supported on Linux.' + sys.exit() + + if not branch_is_newer_than_2785: + print 'The ARM build option is not supported with branch 2785 and older.' + sys.exit() else: + if options.armbuild: + print 'The ARM build option is not supported by GYP.' + sys.exit() + if options.x64build and platform != 'windows' and platform != 'macosx': print 'The x64 build option is only used on Windows and Mac OS X.' sys.exit() @@ -936,6 +957,8 @@ if not options.nobuild and (chromium_checkout_changed or \ # GetAllPlatformConfigs in tools/gn_args.py. if options.x64build: build_dir_suffix = '_GN_x64' + elif options.armbuild: + build_dir_suffix = '_GN_arm' else: build_dir_suffix = '_GN_x86' else: @@ -1008,6 +1031,8 @@ if not options.nodistrib and (chromium_checkout_changed or \ path = path + ' --ninja-build' if options.x64build: path = path + ' --x64-build' + elif options.armbuild: + path = path + ' --arm-build' if type == 'minimal': path = path + ' --minimal' diff --git a/tools/cefbuilds/cef_html_builder.py b/tools/cefbuilds/cef_html_builder.py index f763d42e1..be947a167 100644 --- a/tools/cefbuilds/cef_html_builder.py +++ b/tools/cefbuilds/cef_html_builder.py @@ -119,6 +119,7 @@ class cef_html_builder: return { 'linux32': 'Linux 32-bit', 'linux64': 'Linux 64-bit', + 'linuxarm': 'Linux ARM', 'macosx64': 'Mac OS X 64-bit', 'windows32': 'Windows 32-bit', 'windows64': 'Windows 64-bit' @@ -169,7 +170,7 @@ class cef_html_builder: @staticmethod def _get_tooltip_text(platform, cef_version, file): - if platform == 'linux32' or platform == 'linux64': + if platform.startswith('linux'): sample_app = 'cefsimple' else: sample_app = 'cefclient' diff --git a/tools/cefbuilds/cef_json_builder.py b/tools/cefbuilds/cef_json_builder.py index ef9791b6c..5c5d33776 100644 --- a/tools/cefbuilds/cef_json_builder.py +++ b/tools/cefbuilds/cef_json_builder.py @@ -75,7 +75,7 @@ class cef_json_builder: @staticmethod def get_platforms(): """ Returns the list of supported platforms. """ - return ('linux32', 'linux64', 'macosx64', 'windows32', 'windows64') + return ('linux32', 'linux64', 'linuxarm', 'macosx64', 'windows32', 'windows64') @staticmethod def get_distrib_types(): diff --git a/tools/gn_args.py b/tools/gn_args.py index 762312994..e575de256 100644 --- a/tools/gn_args.py +++ b/tools/gn_args.py @@ -190,11 +190,16 @@ def ValidateArgs(args): # - Linux supports only "x64" unless using a sysroot environment. if platform == 'macosx': assert target_cpu == 'x64', 'target_cpu must be "x64"' - else: + elif platform == 'windows': assert target_cpu in ('x86', 'x64'), 'target_cpu must be "x86" or "x64"' + elif platform == 'linux': + assert target_cpu in ('x86', 'x64', 'arm'), 'target_cpu must be "x86", "x64" or "arm"' - if platform == 'linux' and target_cpu == 'x86': - assert use_sysroot, 'target_cpu="x86" requires use_sysroot=true' + if platform == 'linux': + if target_cpu == 'x86': + assert use_sysroot, 'target_cpu="x86" requires use_sysroot=true' + elif target_cpu == 'arm': + assert use_sysroot, 'target_cpu="arm" requires use_sysroot=true' # ASAN requires Release builds. if is_asan: @@ -274,7 +279,7 @@ def ValidateArgs(args): if (os.path.exists(vcvars_path)): msg('INCLUDE/LIB/PATH values will be derived from %s' % vcvars_path) -def GetConfigArgs(args, is_debug, is_x64): +def GetConfigArgs(args, is_debug, cpu): """ Return merged GN args for the configuration and validate. """ @@ -291,8 +296,15 @@ def GetConfigArgs(args, is_debug, is_x64): result = MergeDicts(args, add_args, { 'is_debug': is_debug, - 'target_cpu': 'x64' if is_x64 else 'x86', + 'target_cpu': cpu, }) + + if platform == 'linux' and cpu != 'arm': + # Remove any arm-related values from non-arm configs. + for key in result.keys(): + if key.startswith('arm_'): + del result[key] + ValidateArgs(result) return result @@ -306,6 +318,8 @@ def GetAllPlatformConfigs(build_args): args = GetMergedArgs(build_args) create_debug = True + if platform == 'linux': + use_sysroot = GetArgValue(args, 'use_sysroot') # Don't create debug directories for asan builds. if GetArgValue(args, 'is_asan'): @@ -314,15 +328,20 @@ def GetAllPlatformConfigs(build_args): # Always create x64 configs. if create_debug: - result['Debug_GN_x64'] = GetConfigArgs(args, True, True) - result['Release_GN_x64'] = GetConfigArgs(args, False, True) + result['Debug_GN_x64'] = GetConfigArgs(args, True, 'x64') + result['Release_GN_x64'] = GetConfigArgs(args, False, 'x64') # Create x86 configs on Windows and on Linux when using the sysroot. - if platform == 'windows' or \ - (platform == 'linux' and GetArgValue(args, 'use_sysroot') == True): + if platform == 'windows' or (platform == 'linux' and use_sysroot): if create_debug: - result['Debug_GN_x86'] = GetConfigArgs(args, True, False) - result['Release_GN_x86'] = GetConfigArgs(args, False, False) + result['Debug_GN_x86'] = GetConfigArgs(args, True, 'x86') + result['Release_GN_x86'] = GetConfigArgs(args, False, 'x86') + + # Create arm configs on Linux when using the sysroot. + if platform == 'linux' and use_sysroot: + if create_debug: + result['Debug_GN_arm'] = GetConfigArgs(args, True, 'arm') + result['Release_GN_arm'] = GetConfigArgs(args, False, 'arm') return result diff --git a/tools/make_distrib.py b/tools/make_distrib.py index 2493c2024..5eefb18a5 100644 --- a/tools/make_distrib.py +++ b/tools/make_distrib.py @@ -262,6 +262,9 @@ parser.add_option('--ninja-build', parser.add_option('--x64-build', action='store_true', dest='x64build', default=False, help='create a 64-bit binary distribution') +parser.add_option('--arm-build', + action='store_true', dest='armbuild', default=False, + help='create an ARM binary distribution') parser.add_option('--minimal', action='store_true', dest='minimal', default=False, help='include only release build binary files') @@ -292,6 +295,15 @@ if options.minimal and options.client: parser.print_help(sys.stderr) sys.exit() +if options.x64build and options.armbuild: + print 'Cannot specify both --x64-build and --arm-build' + parser.print_help(sys.stderr) + sys.exit() + +if options.armbuild and platform != 'linux': + print '--arm-build is only supported on Linux.' + sys.exit() + if not options.ninjabuild: print 'Ninja build is required on all platforms' sys.exit() @@ -333,9 +345,12 @@ chromium_ver = args['MAJOR']+'.'+args['MINOR']+'.'+args['BUILD']+'.'+args['PATCH # list of output directories to be archived archive_dirs = [] -platform_arch = '32' if options.x64build: platform_arch = '64' +elif options.armbuild: + platform_arch = 'arm' +else: + platform_arch = '32' # output directory output_dir_base = 'cef_binary_' + cef_ver @@ -374,6 +389,8 @@ cef_paths2 = cef_paths2['variables'] # scheme for GN via GetAllPlatformConfigs in gn_args.py. if options.x64build: build_dir_suffix = '_GN_x64' +elif options.armbuild: + build_dir_suffix = '_GN_arm' else: build_dir_suffix = '_GN_x86'