!561 elf/ld.so: ld.so mmap shared object use hugepage new feature and bugfix

From: @lvying6 
Reviewed-by: @liqingqing_1229 
Signed-off-by: @liqingqing_1229
This commit is contained in:
openeuler-ci-bot 2023-02-27 14:13:14 +00:00 committed by Gitee
commit 89404a4686
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
4 changed files with 441 additions and 1 deletions

View File

@ -0,0 +1,312 @@
From 7adf5ee832d2649fa85f8f104523932dab64f12e Mon Sep 17 00:00:00 2001
From: Lv Ying <lvying6@huawei.com>
Date: Tue, 7 Feb 2023 19:29:11 +0800
Subject: [PATCH 1/3] ld.so: support ld.so mmap hugetlb hugepage according to
PT_LOAD segment granularity
Only attempt to use hugepage to load PT_LOAD segments marked with PF_HUGEPAGE flag.
Even if the segment is marked with the PF_HUGEPAGE flag, the segment does not
necessarily use the hugetlb huge page, either because the segment is too small,
or because the segment is in an address space that is not suitable for using
a huge page. Also we add added the -i option to hugepageedit to mark any
PT_LOAD segment with the PF_HUGEPAGE flag.
Signed-off-by: Lv Ying <lvying6@huawei.com>
---
elf/dl-load.c | 15 ++++---
elf/dl-map-segments-hugepage.h | 72 ++++++++++++++++++++++++++++++----
elf/elf.h | 2 +
elf/hugepageedit.c | 58 ++++++++++++++++++++++++---
4 files changed, 128 insertions(+), 19 deletions(-)
diff --git a/elf/dl-load.c b/elf/dl-load.c
index f4b5c4a7..e0d4fa2e 100644
--- a/elf/dl-load.c
+++ b/elf/dl-load.c
@@ -1136,6 +1136,7 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
bool empty_dynamic = false;
#ifdef HUGEPAGE_SHARED_LIB
bool use_hugepage = false;
+ char hp_bitmap[l->l_phnum];
#endif
/* The struct is initialized to zero so this is not necessary:
@@ -1182,6 +1183,13 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
goto lose;
}
+#ifdef HUGEPAGE_SHARED_LIB
+ if (ph->p_flags & PF_HUGEPAGE) {
+ hp_bitmap[nloadcmds] = 1;
+ use_hugepage = true;
+ } else
+ hp_bitmap[nloadcmds] = 0;
+#endif
struct loadcmd *c = &loadcmds[nloadcmds++];
c->mapstart = ALIGN_DOWN (ph->p_vaddr, GLRO(dl_pagesize));
c->mapend = ALIGN_UP (ph->p_vaddr + ph->p_filesz, GLRO(dl_pagesize));
@@ -1194,11 +1202,6 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
if (nloadcmds > 1 && c[-1].mapend != c->mapstart)
has_holes = true;
-#ifdef HUGEPAGE_SHARED_LIB
- if (ph->p_flags & PF_HUGEPAGE)
- use_hugepage = true;
-#endif
-
/* Optimize a common case. */
#if (PF_R | PF_W | PF_X) == 7 && (PROT_READ | PROT_WRITE | PROT_EXEC) == 7
c->prot = (PF_TO_PROT
@@ -1297,7 +1300,7 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
((GLRO(dl_hugepage_mask) & DL_HUGEPAGE_PROBE_FLAG) && use_hugepage))
{
errstring = _dl_map_segments_largein (l, fd, header, type, loadcmds, nloadcmds,
- maplength, has_holes);
+ maplength, hp_bitmap);
if (__glibc_unlikely (errstring != NULL))
{
hp_errcode = errno;
diff --git a/elf/dl-map-segments-hugepage.h b/elf/dl-map-segments-hugepage.h
index 37788ef9..e7202131 100644
--- a/elf/dl-map-segments-hugepage.h
+++ b/elf/dl-map-segments-hugepage.h
@@ -400,6 +400,55 @@ _extra_mmap(struct link_map *l, const struct loadcmd loadcmds[], size_t nloadcmd
return extra_len;
}
+static __always_inline const char *
+__mmap_segment_normalsz(const struct loadcmd *c, ElfW(Addr) mapstart, int fd,
+ size_t *mapseglen)
+{
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
+ _dl_debug_printf("\tuse normal page mmap segment:[%lx-%lx)\n", mapstart,
+ mapstart + (c->allocend - c->mapstart));
+
+ if (c->mapend > c->mapstart &&
+ (__mmap((void *)mapstart, c->mapend - c->mapstart, c->prot,
+ MAP_FILE|MAP_PRIVATE|MAP_FIXED, fd, c->mapoff) == MAP_FAILED))
+ return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
+
+ if (c->allocend > c->dataend) {
+ ElfW(Addr) zero, zeroend, zeropage;
+
+ zero = mapstart + c->dataend - c->mapstart;
+ zeroend = mapstart + c->allocend - c->mapstart;
+ zeropage = ((zero + GLRO(dl_pagesize) - 1)
+ & ~(GLRO(dl_pagesize) - 1));
+
+ if (zeroend < zeropage)
+ zeropage = zeroend;
+
+ if (zeropage > zero) {
+ if (__glibc_unlikely ((c->prot & PROT_WRITE) == 0)) {
+ if (__mprotect ((caddr_t) (zero & ~(GLRO(dl_pagesize) - 1)),
+ GLRO(dl_pagesize), c->prot|PROT_WRITE) < 0)
+ return DL_MAP_SEGMENTS_ERROR_MPROTECT;
+ }
+
+ memset ((void *) zero, '\0', zeropage - zero);
+
+ if (__glibc_unlikely ((c->prot & PROT_WRITE) == 0))
+ __mprotect ((caddr_t) (zero & ~(GLRO(dl_pagesize) - 1)),
+ GLRO(dl_pagesize), c->prot);
+ }
+
+ if (zeroend > zeropage) {
+ if (__mmap ((caddr_t) zeropage, zeroend - zeropage, c->prot,
+ MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0) == MAP_FAILED)
+ return DL_MAP_SEGMENTS_ERROR_MAP_ZERO_FILL;
+ }
+ }
+
+ *mapseglen = c->allocend - c->mapstart;
+ return NULL;
+}
+
/*
* PT_LOAD segment is described by p_filesz and p_memsz.
* The bytes from the file are mapped to the beginning of the memory segment.
@@ -409,11 +458,17 @@ _extra_mmap(struct link_map *l, const struct loadcmd loadcmds[], size_t nloadcmd
*/
static __always_inline const char *
_mmap_segment(struct link_map *l, const struct loadcmd loadcmds[], size_t nloadcmds,
- const struct loadcmd *c, ElfW(Addr) mapstart, int fd, size_t *mapseglen)
+ const struct loadcmd *c, ElfW(Addr) mapstart, int fd,
+ size_t *mapseglen, const char hp_bitmap[])
{
const char * errstring = NULL;
- size_t extra_len = _extra_mmap(l, loadcmds, nloadcmds, c, mapstart);
- size_t memsz_len = 0;
+ size_t extra_len, memsz_len = 0;
+
+ if (!hp_bitmap[((void *)c - (void *)loadcmds) / sizeof(struct loadcmd)]) {
+ return __mmap_segment_normalsz(c, mapstart, fd, mapseglen);
+ }
+
+ extra_len = _extra_mmap(l, loadcmds, nloadcmds, c, mapstart);
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf("\t%s(0x%lx): extra_len = 0x%lx\n\t{\n", __func__,
(unsigned long)c, extra_len);
@@ -448,7 +503,7 @@ static __always_inline const char *
_dl_map_segments_largein (struct link_map *l, int fd,
const ElfW(Ehdr) *header, int type,
const struct loadcmd loadcmds[], size_t nloadcmds,
- const size_t maplength, bool has_holes)
+ const size_t maplength, const char hp_bitmap[])
{
if (__glibc_unlikely (type != ET_DYN))
return DL_MAP_SEGMENTS_ERROR_TYPE;
@@ -470,7 +525,8 @@ _dl_map_segments_largein (struct link_map *l, int fd,
const struct loadcmd * c = loadcmds;
ElfW(Addr) text_addr = ALIGN_UP((ElfW(Addr))map_area_start + (text->mapstart - c->mapstart), SIZE_2MB);
size_t mapseglen;
- errstring = _mmap_segment(l, loadcmds, nloadcmds, text, text_addr, fd, &mapseglen);
+ errstring = _mmap_segment(l, loadcmds, nloadcmds, text, text_addr, fd,
+ &mapseglen, hp_bitmap);
if (__glibc_unlikely(errstring != NULL))
goto unmap_reserved_area;
@@ -493,7 +549,8 @@ _dl_map_segments_largein (struct link_map *l, int fd,
}
map_addr += c->mapstart - prev->mapstart;
- errstring = _mmap_segment(l, loadcmds, nloadcmds, c, map_addr, fd, &mapseglen);
+ errstring = _mmap_segment(l, loadcmds, nloadcmds, c, map_addr, fd,
+ &mapseglen, hp_bitmap);
if (__glibc_unlikely(errstring != NULL))
goto unmap_reserved_area;
prev = c;
@@ -514,7 +571,8 @@ _dl_map_segments_largein (struct link_map *l, int fd,
}
map_addr -= prev->mapstart - c->mapstart;
- errstring = _mmap_segment(l, loadcmds, nloadcmds, c, map_addr, fd, &mapseglen);
+ errstring = _mmap_segment(l, loadcmds, nloadcmds, c, map_addr, fd,
+ &mapseglen, hp_bitmap);
if (__glibc_unlikely(errstring != NULL))
goto unmap_reserved_area;
diff --git a/elf/elf.h b/elf/elf.h
index c5315d1b..a64576bb 100644
--- a/elf/elf.h
+++ b/elf/elf.h
@@ -730,8 +730,10 @@ typedef struct
/* Legal values for p_flags (segment flags). */
+#ifdef HUGEPAGE_SHARED_LIB
/* libhugetlbfs's hugeedit use 0x00100000, here use another */
#define PF_HUGEPAGE (0x01000000)
+#endif
#define PF_X (1 << 0) /* Segment is executable */
#define PF_W (1 << 1) /* Segment is writable */
#define PF_R (1 << 2) /* Segment is readable */
diff --git a/elf/hugepageedit.c b/elf/hugepageedit.c
index ab4247ad..0a44ece6 100644
--- a/elf/hugepageedit.c
+++ b/elf/hugepageedit.c
@@ -31,18 +31,52 @@
void print_usage(void)
{
- fprintf(stderr, "%s [-x] [-d] <ELF file>\n" \
+ fprintf(stderr, "%s [-x] [-d] [-i index] <ELF file>\n" \
"\tdefault mark all PT_LOAD segment PF_HUGEPAGE flag\n" \
"\t-x option only mark executable PT_LOAD segment PF_HUGEPAGE flag\n" \
+ "\t-i [index(start from 0)] option specifies the index that marks the PT_LOAD segment PF_HUGEPAGE flag\n" \
"\t-d option delete all the PT_LOAD segment PF_HUGEPAGE flag\n", TOOL_NAME);
}
+
+static long parse_index(char *str)
+{
+ char *endptr;
+
+ errno = 0;
+ long val = strtol(str, &endptr, 10);
+
+ if (errno != 0) {
+ perror("strtol");
+ return -1;
+ }
+
+ if (endptr == str) {
+ fprintf(stderr, "No digits were found in -i option\n");
+ return -1;
+ }
+
+ if (*endptr != '\0') {
+ fprintf(stderr, "Invalid characters %s in -i %s option\n", endptr, str);
+ return -1;
+ }
+
+ if (val < 0) {
+ fprintf(stderr, "Negative index %ld in -i %s option\n", val, str);
+ return -1;
+ }
+
+ return val;
+}
+
+
int main(int argc, char *argv[])
{
size_t length;
int exit_status = -1;
- int i, opt, delete = 0, exec_only = 0;
- while ((opt = getopt(argc, argv, "dx")) != -1)
+ int i, opt, delete = 0, exec_only = 0, index_set = 0;
+ long index = -1;
+ while ((opt = getopt(argc, argv, "dxi:")) != -1)
{
switch (opt)
{
@@ -52,15 +86,21 @@ int main(int argc, char *argv[])
case 'x':
exec_only = 1;
break;
+ case 'i':
+ index = parse_index(optarg);
+ index_set = 1;
+ if (index < 0)
+ return -1;
+ break;
default:
print_usage();
return 0;
}
}
- if (delete && exec_only)
+ if (delete + exec_only + index_set > 1)
{
- fprintf(stderr, "can not specify -x and -d option at the same time\n");
+ fprintf(stderr, "can not specify -x, -d and -i option at the same time\n");
return -1;
}
@@ -81,6 +121,12 @@ int main(int argc, char *argv[])
if (ehdr == NULL)
goto close_fd;
+ if (index_set && index >= ((ElfW(Ehdr) *)ehdr)->e_phnum) {
+ fprintf(stderr, "Index %ld in -i %s option out of PT_LOAD segment range\n",
+ index, argv[optind]);
+ goto close_fd;
+ }
+
ElfW(Phdr) *phdr = (ElfW(Phdr) *)get_phdr(ehdr, length);
if (phdr == NULL)
goto unmap;
@@ -100,7 +146,7 @@ int main(int argc, char *argv[])
}
else
{
- if (exec_only && !(phdr[i].p_flags & PF_X))
+ if ((exec_only && !(phdr[i].p_flags & PF_X)) || (index_set && index != i))
continue;
phdr[i].p_flags |= PF_HUGEPAGE;
}
--
2.38.1

View File

@ -0,0 +1,34 @@
From 2ace2eddbd6a0cda2006e632444cd19a360d12d4 Mon Sep 17 00:00:00 2001
From: Lv Ying <lvying6@huawei.com>
Date: Fri, 24 Feb 2023 16:14:42 +0800
Subject: [PATCH 2/3] elf/ld.so: keep compatible with the original policy of
the LD_HUGEPAGE_LIB env
When we use new policy: ld.so load PT_LOAD segement use hugepage according to
PF_HUGEPAGE flag. We should keep compatible with the original policy of
the LD_HUGEPAGE_LIB env which loads all the PT_LOAD segment try to use
hugepage.
Signed-off-by: Lv Ying <lvying6@huawei.com>
---
elf/dl-load.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/elf/dl-load.c b/elf/dl-load.c
index e0d4fa2e..e01bb448 100644
--- a/elf/dl-load.c
+++ b/elf/dl-load.c
@@ -1184,7 +1184,9 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
}
#ifdef HUGEPAGE_SHARED_LIB
- if (ph->p_flags & PF_HUGEPAGE) {
+ if (GLRO(dl_hugepage_mask) & DL_HUGEPAGE_LIB_LARGE_IN_FLAG) {
+ hp_bitmap[nloadcmds] = 1;
+ } else if (ph->p_flags & PF_HUGEPAGE) {
hp_bitmap[nloadcmds] = 1;
use_hugepage = true;
} else
--
2.38.1

View File

@ -0,0 +1,84 @@
From 117a913e7b1c380bb99796588b266547de959de9 Mon Sep 17 00:00:00 2001
From: Lv Ying <lvying6@huawei.com>
Date: Thu, 23 Feb 2023 22:15:06 +0800
Subject: [PATCH 3/3] elf/ld.so: remove _mmap_hole when ld.so mmap PT_LOAD
segment try to use hugepage
When ld.so mmap PT_LOAD segment try to use hugepage, it's unnecessary to
remap hole area, becasue __mmap_reserved_area already mmap reserved area
as PROT_NONE. Besides _mmap_hole may mmap hole area from 4KB page
misaligned addresses which will cause mmap faiure(invalid argument).
Signed-off-by: Lv Ying <lvying6@huawei.com>
---
elf/dl-load.h | 2 --
elf/dl-map-segments-hugepage.h | 26 --------------------------
2 files changed, 28 deletions(-)
diff --git a/elf/dl-load.h b/elf/dl-load.h
index fcf91a47..f2428165 100644
--- a/elf/dl-load.h
+++ b/elf/dl-load.h
@@ -136,8 +136,6 @@ static const char *_dl_map_segments (struct link_map *l, int fd,
N_("cannot map Non shared object file in hugepage")
#define DL_MAP_SEGMENTS_ERROR_ARRANGE \
N_("shared object's PT_LOAD segment in wrong arrange")
-#define DL_MAP_SEGMENTS_ERROR_MAP_HOLE_FILL \
- N_("failed to mmap shared object's hole part of PT_LOAD")
#define DL_MAP_RESERVED_HUGEPAGE_AREA_ERROR \
N_("failed to map reserved 2MB contiguous hugepage va space")
#define DL_FIND_EXEC_SEGMENT_ERROR \
diff --git a/elf/dl-map-segments-hugepage.h b/elf/dl-map-segments-hugepage.h
index e7202131..a352c903 100644
--- a/elf/dl-map-segments-hugepage.h
+++ b/elf/dl-map-segments-hugepage.h
@@ -486,19 +486,6 @@ _mmap_segment(struct link_map *l, const struct loadcmd loadcmds[], size_t nloadc
return NULL;
}
-static __always_inline void *
-_mmap_hole(const struct loadcmd *current, const struct loadcmd *next,
- ElfW(Addr) mapstart, size_t mapseglen, int fd)
-{
- if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
- _dl_debug_printf("\tmmap hole area:[%lx-%lx)\n", mapstart + mapseglen,
- mapstart + (next->mapstart - current->mapstart));
- return __mmap((void *)(mapstart + mapseglen),
- next->mapstart - (current->mapstart + mapseglen),
- PROT_NONE, MAP_FILE|MAP_PRIVATE|MAP_FIXED,
- fd, current->mapoff + mapseglen);
-}
-
static __always_inline const char *
_dl_map_segments_largein (struct link_map *l, int fd,
const ElfW(Ehdr) *header, int type,
@@ -541,13 +528,6 @@ _dl_map_segments_largein (struct link_map *l, int fd,
goto unmap_reserved_area;
}
- if (prev->mapstart + mapseglen < c->mapstart &&
- _mmap_hole(prev, c, map_addr, mapseglen, fd) == MAP_FAILED)
- {
- errstring = DL_MAP_SEGMENTS_ERROR_MAP_HOLE_FILL;
- goto unmap_reserved_area;
- }
-
map_addr += c->mapstart - prev->mapstart;
errstring = _mmap_segment(l, loadcmds, nloadcmds, c, map_addr, fd,
&mapseglen, hp_bitmap);
@@ -582,12 +562,6 @@ _dl_map_segments_largein (struct link_map *l, int fd,
goto unmap_reserved_area;
}
- if (c->mapstart + mapseglen < prev->mapstart &&
- _mmap_hole(c, prev, map_addr, mapseglen, fd) == MAP_FAILED)
- {
- errstring = DL_MAP_SEGMENTS_ERROR_MAP_HOLE_FILL;
- goto unmap_reserved_area;
- }
prev = c;
--c;
}
--
2.38.1

View File

@ -66,7 +66,7 @@
##############################################################################
Name: glibc
Version: 2.34
Release: 111
Release: 112
Summary: The GNU libc libraries
License: %{all_license}
URL: http://www.gnu.org/software/glibc/
@ -282,6 +282,9 @@ Patch9030: 6_6-LoongArch-Optimize-string-functions-strlen-strnlen.patch
Patch9031: math-Fix-asin-and-acos-invalid-exception-with-old-gc.patch
%endif
Patch9032: add-pthread_cond_clockwait-GLIBC_2_28.patch
Patch9033: 0001-ld.so-support-ld.so-mmap-hugetlb-hugepage-according-.patch
Patch9034: 0002-elf-ld.so-keep-compatible-with-the-original-policy-o.patch
Patch9035: 0003-elf-ld.so-remove-_mmap_hole-when-ld.so-mmap-PT_LOAD-.patch
Provides: ldconfig rtld(GNU_HASH) bundled(gnulib)
@ -1447,6 +1450,13 @@ fi
%endif
%changelog
* Sat Feb 25 2023 Lv Ying <lvying6@huawei.com> - 2.34-112
- elf/ld.so: ld.so mmap shared object use hugepage new feature and bugfix:
- feature: support HUGEPAGE_PROBE + hugepageedit mark shared object
specified segment, just try to use hugepage to mmap specified segment
instead of all the segments in the shared object
- bugfix: remove _mmap_hole when ld.so mmap PT_LOAD segment try to use hugepage
* Thu Feb 23 2023 Qingqing Li <liqingqing3@huawei.com> - 2.34-111
- gmon: Fix allocated buffer overflow (bug 29444)