diff --git a/cef_paths2.gypi b/cef_paths2.gypi index c919c9d90..06180afa2 100644 --- a/cef_paths2.gypi +++ b/cef_paths2.gypi @@ -88,6 +88,7 @@ 'includes_linux': [ 'include/base/internal/cef_atomicops_atomicword_compat.h', 'include/base/internal/cef_atomicops_arm_gcc.h', + 'include/base/internal/cef_atomicops_arm64_gcc.h', 'include/base/internal/cef_atomicops_x86_gcc.h', 'include/internal/cef_linux.h', 'include/internal/cef_types_linux.h', diff --git a/include/base/cef_atomicops.h b/include/base/cef_atomicops.h index 96aebabf8..c8796c10b 100644 --- a/include/base/cef_atomicops.h +++ b/include/base/cef_atomicops.h @@ -182,6 +182,8 @@ Atomic64 Release_Load(volatile const Atomic64* ptr); #include "include/base/internal/cef_atomicops_mac.h" #elif defined(COMPILER_GCC) && defined(ARCH_CPU_X86_FAMILY) #include "include/base/internal/cef_atomicops_x86_gcc.h" +#elif defined(COMPILER_GCC) && defined(__ARM_ARCH_ISA_A64) +#include "include/base/internal/cef_atomicops_arm64_gcc.h" #elif defined(COMPILER_GCC) && defined(__ARM_ARCH) #include "include/base/internal/cef_atomicops_arm_gcc.h" #else diff --git a/include/base/internal/cef_atomicops_arm64_gcc.h b/include/base/internal/cef_atomicops_arm64_gcc.h new file mode 100644 index 000000000..787b3cabe --- /dev/null +++ b/include/base/internal/cef_atomicops_arm64_gcc.h @@ -0,0 +1,335 @@ +// Copyright (c) 2012 Google Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the name Chromium Embedded +// Framework nor the names of its contributors may be used to endorse +// or promote products derived from this software without specific prior +// written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Do not include this header file directly. Use base/cef_atomicops.h +// instead. + +#ifndef CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_ARM64_GCC_H_ +#define CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_ARM64_GCC_H_ + +namespace base { +namespace subtle { + +inline void MemoryBarrier() { + __asm__ __volatile__ ("dmb ish" ::: "memory"); // NOLINT +} + +// NoBarrier versions of the operation include "memory" in the clobber list. +// This is not required for direct usage of the NoBarrier versions of the +// operations. However this is required for correctness when they are used as +// part of the Acquire or Release versions, to ensure that nothing from outside +// the call is reordered between the operation and the memory barrier. This does +// not change the code generated, so has no or minimal impact on the +// NoBarrier operations. + +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 prev; + int32_t temp; + + __asm__ __volatile__ ( // NOLINT + "0: \n\t" + "ldxr %w[prev], %[ptr] \n\t" // Load the previous value. + "cmp %w[prev], %w[old_value] \n\t" + "bne 1f \n\t" + "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value. + "cbnz %w[temp], 0b \n\t" // Retry if it did not work. + "1: \n\t" + : [prev]"=&r" (prev), + [temp]"=&r" (temp), + [ptr]"+Q" (*ptr) + : [old_value]"IJr" (old_value), + [new_value]"r" (new_value) + : "cc", "memory" + ); // NOLINT + + return prev; +} + +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + Atomic32 result; + int32_t temp; + + __asm__ __volatile__ ( // NOLINT + "0: \n\t" + "ldxr %w[result], %[ptr] \n\t" // Load the previous value. + "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value. + "cbnz %w[temp], 0b \n\t" // Retry if it did not work. + : [result]"=&r" (result), + [temp]"=&r" (temp), + [ptr]"+Q" (*ptr) + : [new_value]"r" (new_value) + : "memory" + ); // NOLINT + + return result; +} + +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + Atomic32 result; + int32_t temp; + + __asm__ __volatile__ ( // NOLINT + "0: \n\t" + "ldxr %w[result], %[ptr] \n\t" // Load the previous value. + "add %w[result], %w[result], %w[increment]\n\t" + "stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result. + "cbnz %w[temp], 0b \n\t" // Retry on failure. + : [result]"=&r" (result), + [temp]"=&r" (temp), + [ptr]"+Q" (*ptr) + : [increment]"IJr" (increment) + : "memory" + ); // NOLINT + + return result; +} + +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + Atomic32 result; + + MemoryBarrier(); + result = NoBarrier_AtomicIncrement(ptr, increment); + MemoryBarrier(); + + return result; +} + +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 prev; + + prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value); + MemoryBarrier(); + + return prev; +} + +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 prev; + + MemoryBarrier(); + prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value); + + return prev; +} + +inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; +} + +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; + MemoryBarrier(); +} + +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { + __asm__ __volatile__ ( // NOLINT + "stlr %w[value], %[ptr] \n\t" + : [ptr]"=Q" (*ptr) + : [value]"r" (value) + : "memory" + ); // NOLINT +} + +inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { + return *ptr; +} + +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { + Atomic32 value; + + __asm__ __volatile__ ( // NOLINT + "ldar %w[value], %[ptr] \n\t" + : [value]"=r" (value) + : [ptr]"Q" (*ptr) + : "memory" + ); // NOLINT + + return value; +} + +inline Atomic32 Release_Load(volatile const Atomic32* ptr) { + MemoryBarrier(); + return *ptr; +} + +// 64-bit versions of the operations. +// See the 32-bit versions for comments. + +inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 prev; + int32_t temp; + + __asm__ __volatile__ ( // NOLINT + "0: \n\t" + "ldxr %[prev], %[ptr] \n\t" + "cmp %[prev], %[old_value] \n\t" + "bne 1f \n\t" + "stxr %w[temp], %[new_value], %[ptr] \n\t" + "cbnz %w[temp], 0b \n\t" + "1: \n\t" + : [prev]"=&r" (prev), + [temp]"=&r" (temp), + [ptr]"+Q" (*ptr) + : [old_value]"IJr" (old_value), + [new_value]"r" (new_value) + : "cc", "memory" + ); // NOLINT + + return prev; +} + +inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, + Atomic64 new_value) { + Atomic64 result; + int32_t temp; + + __asm__ __volatile__ ( // NOLINT + "0: \n\t" + "ldxr %[result], %[ptr] \n\t" + "stxr %w[temp], %[new_value], %[ptr] \n\t" + "cbnz %w[temp], 0b \n\t" + : [result]"=&r" (result), + [temp]"=&r" (temp), + [ptr]"+Q" (*ptr) + : [new_value]"r" (new_value) + : "memory" + ); // NOLINT + + return result; +} + +inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, + Atomic64 increment) { + Atomic64 result; + int32_t temp; + + __asm__ __volatile__ ( // NOLINT + "0: \n\t" + "ldxr %[result], %[ptr] \n\t" + "add %[result], %[result], %[increment] \n\t" + "stxr %w[temp], %[result], %[ptr] \n\t" + "cbnz %w[temp], 0b \n\t" + : [result]"=&r" (result), + [temp]"=&r" (temp), + [ptr]"+Q" (*ptr) + : [increment]"IJr" (increment) + : "memory" + ); // NOLINT + + return result; +} + +inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, + Atomic64 increment) { + Atomic64 result; + + MemoryBarrier(); + result = NoBarrier_AtomicIncrement(ptr, increment); + MemoryBarrier(); + + return result; +} + +inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 prev; + + prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value); + MemoryBarrier(); + + return prev; +} + +inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 prev; + + MemoryBarrier(); + prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value); + + return prev; +} + +inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { + *ptr = value; +} + +inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { + *ptr = value; + MemoryBarrier(); +} + +inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { + __asm__ __volatile__ ( // NOLINT + "stlr %x[value], %[ptr] \n\t" + : [ptr]"=Q" (*ptr) + : [value]"r" (value) + : "memory" + ); // NOLINT +} + +inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { + return *ptr; +} + +inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { + Atomic64 value; + + __asm__ __volatile__ ( // NOLINT + "ldar %x[value], %[ptr] \n\t" + : [value]"=r" (value) + : [ptr]"Q" (*ptr) + : "memory" + ); // NOLINT + + return value; +} + +inline Atomic64 Release_Load(volatile const Atomic64* ptr) { + MemoryBarrier(); + return *ptr; +} + +} } // namespace base::subtle + +#endif // CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_ARM64_GCC_H_ + diff --git a/tools/automate/automate-git.py b/tools/automate/automate-git.py index 0af1d1c7c..a8c7f36c7 100644 --- a/tools/automate/automate-git.py +++ b/tools/automate/automate-git.py @@ -497,6 +497,8 @@ def get_build_directory_name(is_debug): build_dir += 'GN_x64' elif options.armbuild: build_dir += 'GN_arm' + elif options.arm64build: + build_dir += 'GN_arm64' else: build_dir += 'GN_x86' else: @@ -817,6 +819,12 @@ parser.add_option( dest='armbuild', default=False, help='Create an ARM build.') +parser.add_option( + '--arm64-build', + action='store_true', + dest='arm64build', + default=False, + help='Create an ARM64 build.') # Test-related options. parser.add_option( @@ -959,7 +967,7 @@ if (options.noreleasebuild and \ parser.print_help(sys.stderr) sys.exit() -if options.x64build and options.armbuild: +if options.x64build + options.armbuild + options.arm64build > 1: print 'Invalid combination of options.' parser.print_help(sys.stderr) sys.exit() @@ -1025,6 +1033,9 @@ branch_is_2743_or_older = (cef_branch != 'trunk' and int(cef_branch) <= 2743) # True if the requested branch is newer than 2785. branch_is_newer_than_2785 = (cef_branch == 'trunk' or int(cef_branch) > 2785) +# True if the requested branch is newer than 2840. +branch_is_newer_than_2840 = (cef_branch == 'trunk' or int(cef_branch) > 2840) + # True if the requested branch is 3029 or older. branch_is_3029_or_older = (cef_branch != 'trunk' and int(cef_branch) <= 3029) @@ -1050,8 +1061,18 @@ if use_gn: if not branch_is_newer_than_2785: print 'The ARM build option is not supported with branch 2785 and older.' sys.exit() + + if options.arm64build: + if platform != 'linux': + print 'The ARM64 build option is only supported on Linux.' + sys.exit() + + if not branch_is_newer_than_2840: + print 'The ARM build option is not supported with branch 2840 and older.' + sys.exit() + else: - if options.armbuild: + if options.armbuild or options.arm64build: print 'The ARM build option is not supported by GYP.' sys.exit() @@ -1642,6 +1663,8 @@ if not options.nodistrib and (chromium_checkout_changed or \ path = path + ' --x64-build' elif options.armbuild: path = path + ' --arm-build' + elif options.arm64build: + path = path + ' --arm64-build' if type == 'minimal': path = path + ' --minimal' diff --git a/tools/cefbuilds/cef_html_builder.py b/tools/cefbuilds/cef_html_builder.py index f529ce81d..92a8912b8 100644 --- a/tools/cefbuilds/cef_html_builder.py +++ b/tools/cefbuilds/cef_html_builder.py @@ -123,6 +123,7 @@ class cef_html_builder: 'linux32': 'Linux 32-bit', 'linux64': 'Linux 64-bit', 'linuxarm': 'Linux ARM', + 'linuxarm64': 'Linux ARM64', 'macosx64': 'Mac OS X 64-bit', 'windows32': 'Windows 32-bit', 'windows64': 'Windows 64-bit' diff --git a/tools/cefbuilds/cef_json_builder.py b/tools/cefbuilds/cef_json_builder.py index 292d46502..31d3f259a 100644 --- a/tools/cefbuilds/cef_json_builder.py +++ b/tools/cefbuilds/cef_json_builder.py @@ -92,8 +92,8 @@ class cef_json_builder: @staticmethod def get_platforms(): """ Returns the list of supported platforms. """ - return ('linux32', 'linux64', 'linuxarm', 'macosx64', 'windows32', - 'windows64') + return ('linux32', 'linux64', 'linuxarm', 'linuxarm64', 'macosx64', + 'windows32', 'windows64') @staticmethod def get_distrib_types(): diff --git a/tools/gn_args.py b/tools/gn_args.py index b1a06e0e9..057621a3c 100644 --- a/tools/gn_args.py +++ b/tools/gn_args.py @@ -317,14 +317,17 @@ def ValidateArgs(args): elif platform == 'windows': assert target_cpu in ('x86', 'x64'), 'target_cpu must be "x86" or "x64"' elif platform == 'linux': - assert target_cpu in ('x86', 'x64', - 'arm'), 'target_cpu must be "x86", "x64" or "arm"' + assert target_cpu in ( + 'x86', 'x64', 'arm', + 'arm64'), 'target_cpu must be "x86", "x64", "arm" or "arm64"' if platform == 'linux': if target_cpu == 'x86': assert use_sysroot, 'target_cpu="x86" requires use_sysroot=true' elif target_cpu == 'arm': assert use_sysroot, 'target_cpu="arm" requires use_sysroot=true' + elif target_cpu == 'arm64': + assert use_sysroot, 'target_cpu="arm64" requires use_sysroot=true' # ASAN requires Release builds. if is_asan: @@ -421,7 +424,7 @@ def GetConfigArgs(args, is_debug, cpu): 'target_cpu': cpu, }) - if platform == 'linux' and cpu != 'arm': + if platform == 'linux' and not cpu.startswith('arm'): # Remove any arm-related values from non-arm configs. for key in result.keys(): if key.startswith('arm_'): @@ -474,6 +477,8 @@ def LinuxSysrootExists(cpu): sysroot_name = 'debian_sid_amd64-sysroot' elif cpu == 'arm': sysroot_name = 'debian_sid_arm-sysroot' + elif cpu == 'arm64': + sysroot_name = 'debian_sid_arm64-sysroot' else: raise Exception('Unrecognized sysroot CPU: %s' % cpu) @@ -502,7 +507,7 @@ def GetAllPlatformConfigs(build_args): use_sysroot = GetArgValue(args, 'use_sysroot') if use_sysroot: # Only generate configurations for sysroots that have been installed. - for cpu in ('x86', 'x64', 'arm'): + for cpu in ('x86', 'x64', 'arm', 'arm64'): if LinuxSysrootExists(cpu): supported_cpus.append(cpu) else: diff --git a/tools/make_distrib.py b/tools/make_distrib.py index 23286f4d6..f4154dbc9 100644 --- a/tools/make_distrib.py +++ b/tools/make_distrib.py @@ -478,6 +478,12 @@ parser.add_option( dest='armbuild', default=False, help='create an ARM binary distribution (Linux only)') +parser.add_option( + '--arm64-build', + action='store_true', + dest='arm64build', + default=False, + help='create an ARM64 binary distribution (Linux only)') parser.add_option( '--minimal', action='store_true', @@ -530,13 +536,13 @@ if options.minimal and options.client: parser.print_help(sys.stderr) sys.exit() -if options.x64build and options.armbuild: - print 'Cannot specify both --x64-build and --arm-build' +if options.x64build + options.armbuild + options.arm64build > 1: + print 'Invalid combination of build options.' parser.print_help(sys.stderr) sys.exit() -if options.armbuild and platform != 'linux': - print '--arm-build is only supported on Linux.' +if (options.armbuild or options.arm64build) and platform != 'linux': + print '--arm-build and --arm64-build only supported on Linux.' sys.exit() if options.sandbox and not platform in ('macosx', 'windows'): @@ -586,15 +592,14 @@ chromium_ver = formatter.get_chromium_version_string() archive_dirs = [] if options.x64build: - if options.armbuild: - platform_arch = 'arm64' - binary_arch = 'arm64' - else: - platform_arch = '64' - binary_arch = 'x64' + platform_arch = '64' + binary_arch = 'x64' elif options.armbuild: platform_arch = 'arm' binary_arch = 'arm' +elif options.arm64build: + platform_arch = 'arm64' + binary_arch = 'arm64' else: platform_arch = '32' binary_arch = 'x86' @@ -644,6 +649,8 @@ if options.x64build: build_dir_suffix = '_GN_x64' elif options.armbuild: build_dir_suffix = '_GN_arm' +elif options.arm64build: + build_dir_suffix = '_GN_arm64' else: build_dir_suffix = '_GN_x86'