summary refs log tree commit diff
path: root/pkgs/development/interpreters/spidermonkey/aarch64-48bit-va-fix.patch
diff options
context:
space:
mode:
Diffstat (limited to 'pkgs/development/interpreters/spidermonkey/aarch64-48bit-va-fix.patch')
-rw-r--r--pkgs/development/interpreters/spidermonkey/aarch64-48bit-va-fix.patch106
1 files changed, 106 insertions, 0 deletions
diff --git a/pkgs/development/interpreters/spidermonkey/aarch64-48bit-va-fix.patch b/pkgs/development/interpreters/spidermonkey/aarch64-48bit-va-fix.patch
new file mode 100644
index 00000000000..8258a46b174
--- /dev/null
+++ b/pkgs/development/interpreters/spidermonkey/aarch64-48bit-va-fix.patch
@@ -0,0 +1,106 @@
+From a0c0f32299419359b44ac0f880c1ea9073ae51e1 Mon Sep 17 00:00:00 2001
+From: Zheng Xu <zheng.xu@linaro.org>
+Date: Fri, 02 Sep 2016 17:40:05 +0800
+Subject: [PATCH] Bug 1143022 - Manually mmap on arm64 to ensure high 17 bits are clear. r=ehoogeveen
+
+There might be 48-bit VA on arm64 depending on kernel configuration.
+Manually mmap heap memory to align with the assumption made by JS engine.
+
+Change-Id: Ic5d2b2fe4b758b3c87cc0688348af7e71a991146
+---
+
+diff --git a/js/src/gc/Memory.cpp b/js/src/gc/Memory.cpp
+index 5b386a2..38101cf 100644
+--- a/js/src/gc/Memory.cpp
++++ b/js/src/gc/Memory.cpp
+@@ -309,6 +309,75 @@
+ #endif
+ }
+ 
++static inline void *
++MapMemory(size_t length, int prot, int flags, int fd, off_t offset)
++{
++#if defined(__ia64__)
++    /*
++     * The JS engine assumes that all allocated pointers have their high 17 bits clear,
++     * which ia64's mmap doesn't support directly. However, we can emulate it by passing
++     * mmap an "addr" parameter with those bits clear. The mmap will return that address,
++     * or the nearest available memory above that address, providing a near-guarantee
++     * that those bits are clear. If they are not, we return NULL below to indicate
++     * out-of-memory.
++     *
++     * The addr is chosen as 0x0000070000000000, which still allows about 120TB of virtual
++     * address space.
++     *
++     * See Bug 589735 for more information.
++     */
++    void *region = mmap((void*)0x0000070000000000, length, prot, flags, fd, offset);
++    if (region == MAP_FAILED)
++        return MAP_FAILED;
++    /*
++     * If the allocated memory doesn't have its upper 17 bits clear, consider it
++     * as out of memory.
++     */
++    if ((uintptr_t(region) + (length - 1)) & 0xffff800000000000) {
++        JS_ALWAYS_TRUE(0 == munmap(region, length));
++        return MAP_FAILED;
++    }
++    return region;
++#elif defined(__aarch64__)
++   /*
++    * There might be similar virtual address issue on arm64 which depends on
++    * hardware and kernel configurations. But the work around is slightly
++    * different due to the different mmap behavior.
++    *
++    * TODO: Merge with the above code block if this implementation works for
++    * ia64 and sparc64.
++    */
++    const uintptr_t start = (uintptr_t)(0x0000070000000000UL);
++    const uintptr_t end   = (uintptr_t)(0x0000800000000000UL);
++    const uintptr_t step  = ChunkSize;
++   /*
++    * Optimization options if there are too many retries in practice:
++    * 1. Examine /proc/self/maps to find an available address. This file is
++    *    not always available, however. In addition, even if we examine
++    *    /proc/self/maps, we may still need to retry several times due to
++    *    racing with other threads.
++    * 2. Use a global/static variable with lock to track the addresses we have
++    *    allocated or tried.
++    */
++    uintptr_t hint;
++    void* region = MAP_FAILED;
++    for (hint = start; region == MAP_FAILED && hint + length <= end; hint += step) {
++        region = mmap((void*)hint, length, prot, flags, fd, offset);
++        if (region != MAP_FAILED) {
++            if ((uintptr_t(region) + (length - 1)) & 0xffff800000000000) {
++                if (munmap(region, length)) {
++                    MOZ_ASSERT(errno == ENOMEM);
++                }
++                region = MAP_FAILED;
++            }
++        }
++    }
++    return region == MAP_FAILED ? NULL : region;
++#else
++    return mmap(NULL, length, prot, flags, fd, offset);
++#endif
++}
++
+ void *
+ MapAlignedPages(size_t size, size_t alignment)
+ {
+@@ -322,12 +391,12 @@
+ 
+     /* Special case: If we want page alignment, no further work is needed. */
+     if (alignment == PageSize) {
+-        return mmap(NULL, size, prot, flags, -1, 0);
++        return MapMemory(size, prot, flags, -1, 0);
+     }
+ 
+     /* Overallocate and unmap the region's edges. */
+     size_t reqSize = Min(size + 2 * alignment, 2 * size);
+-    void *region = mmap(NULL, reqSize, prot, flags, -1, 0);
++    void *region = MapMemory(reqSize, prot, flags, -1, 0);
+     if (region == MAP_FAILED)
+         return NULL;
+