mirror of
https://bitbucket.org/chromiumembedded/cef
synced 2025-02-08 16:18:51 +01:00
windows: Fix ARM64 v8:run_mksnapshot_default build failure (see https://crbug.com/1344759)
This commit is contained in:
parent
269879747d
commit
44748db484
@ -591,5 +591,11 @@ patches = [
|
||||
# Windows: Fix time_win.cc compile error with cef_sandbox.
|
||||
# https://chromium-review.googlesource.com/c/chromium/src/+/3718022
|
||||
'name': 'base_time_3718022',
|
||||
},
|
||||
{
|
||||
# Windows: Fix ARM64 v8:run_mksnapshot_default fails
|
||||
# https://bugs.chromium.org/p/chromium/issues/detail?id=1344759
|
||||
'name': 'win_v8_mksnapshot_1344759',
|
||||
'path': 'v8',
|
||||
}
|
||||
]
|
||||
|
81
patch/patches/win_v8_mksnapshot_1344759.patch
Normal file
81
patch/patches/win_v8_mksnapshot_1344759.patch
Normal file
@ -0,0 +1,81 @@
|
||||
diff --git src/heap/code-range.cc src/heap/code-range.cc
|
||||
index badef8e17f5..02d5553c5d6 100644
|
||||
--- src/heap/code-range.cc
|
||||
+++ src/heap/code-range.cc
|
||||
@@ -107,6 +107,22 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
|
||||
if (requested <= kMinimumCodeRangeSize) {
|
||||
requested = kMinimumCodeRangeSize;
|
||||
}
|
||||
+
|
||||
+ // When V8_EXTERNAL_CODE_SPACE_BOOL is enabled the allocatable region must
|
||||
+ // not cross the 4Gb boundary and thus the default compression scheme of
|
||||
+ // truncating the Code pointers to 32-bits still works. It's achieved by
|
||||
+ // specifying base_alignment parameter.
|
||||
+ // Note that the alignment is calculated before adjusting the requested size
|
||||
+ // for GetWritableReservedAreaSize(). The reasons are:
|
||||
+ // - this extra page is used by breakpad on Windows and it's allowed to cross
|
||||
+ // the 4Gb boundary,
|
||||
+ // - rounding up the adjusted size would result in requresting unnecessarily
|
||||
+ // big aligment.
|
||||
+ const size_t base_alignment =
|
||||
+ V8_EXTERNAL_CODE_SPACE_BOOL
|
||||
+ ? base::bits::RoundUpToPowerOfTwo(requested)
|
||||
+ : VirtualMemoryCage::ReservationParams::kAnyBaseAlignment;
|
||||
+
|
||||
const size_t reserved_area = GetWritableReservedAreaSize();
|
||||
if (requested < (kMaximalCodeRangeSize - reserved_area)) {
|
||||
requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
|
||||
@@ -120,14 +136,8 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
|
||||
VirtualMemoryCage::ReservationParams params;
|
||||
params.page_allocator = page_allocator;
|
||||
params.reservation_size = requested;
|
||||
- // base_alignment should be kAnyBaseAlignment when V8_ENABLE_NEAR_CODE_RANGE
|
||||
- // is enabled so that InitReservation would not break the alignment in
|
||||
- // GetAddressHint().
|
||||
const size_t allocate_page_size = page_allocator->AllocatePageSize();
|
||||
- params.base_alignment =
|
||||
- V8_EXTERNAL_CODE_SPACE_BOOL
|
||||
- ? base::bits::RoundUpToPowerOfTwo(requested)
|
||||
- : VirtualMemoryCage::ReservationParams::kAnyBaseAlignment;
|
||||
+ params.base_alignment = base_alignment;
|
||||
params.base_bias_size = RoundUp(reserved_area, allocate_page_size);
|
||||
params.page_size = MemoryChunk::kPageSize;
|
||||
params.requested_start_hint =
|
||||
@@ -139,8 +149,8 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
|
||||
|
||||
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
|
||||
// Ensure that the code range does not cross the 4Gb boundary and thus
|
||||
- // default compression scheme of truncating the Code pointers to 32-bit
|
||||
- // still work.
|
||||
+ // default compression scheme of truncating the Code pointers to 32-bits
|
||||
+ // still works.
|
||||
Address base = page_allocator_->begin();
|
||||
Address last = base + page_allocator_->size() - 1;
|
||||
CHECK_EQ(GetPtrComprCageBaseAddress(base),
|
||||
diff --git src/utils/allocation.cc src/utils/allocation.cc
|
||||
index ac187407ce7..50255fcc641 100644
|
||||
--- src/utils/allocation.cc
|
||||
+++ src/utils/allocation.cc
|
||||
@@ -403,13 +403,17 @@ bool VirtualMemoryCage::InitReservation(
|
||||
base_ = reservation_.address() + params.base_bias_size;
|
||||
CHECK_EQ(reservation_.size(), params.reservation_size);
|
||||
} else {
|
||||
- // Otherwise, we need to try harder by first overreserving
|
||||
- // in hopes of finding a correctly aligned address within the larger
|
||||
- // reservation.
|
||||
+ // Otherwise, we need to try harder by first overreserving in hopes of
|
||||
+ // finding a correctly aligned address within the larger reservation.
|
||||
+ size_t bias_size = RoundUp(params.base_bias_size, allocate_page_size);
|
||||
Address hint =
|
||||
- RoundDown(params.requested_start_hint,
|
||||
+ RoundDown(params.requested_start_hint + bias_size,
|
||||
RoundUp(params.base_alignment, allocate_page_size)) -
|
||||
- RoundUp(params.base_bias_size, allocate_page_size);
|
||||
+ bias_size;
|
||||
+ // Alignments requring overreserving more than twice the requested size
|
||||
+ // are not supported (they are too expensive and shouldn't be necessary
|
||||
+ // in the first place).
|
||||
+ DCHECK_LE(params.base_alignment, params.reservation_size);
|
||||
const int kMaxAttempts = 4;
|
||||
for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
|
||||
// Reserve a region of twice the size so that there is an aligned address
|
Loading…
x
Reference in New Issue
Block a user