From: Cupertino Miranda <cupertino.miranda@oracle.com>
To: Wilco Dijkstra <Wilco.Dijkstra@arm.com>
Cc: Adhemerval Zanella Netto <adhemerval.zanella@linaro.org>,
'GNU C Library' <libc-alpha@sourceware.org>
Subject: Re: [PATCH v5 1/1] Created tunable to force small pages on stack allocation.
Date: Fri, 14 Apr 2023 17:35:49 +0100 [thread overview]
Message-ID: <87r0smbevu.fsf@oracle.com> (raw)
In-Reply-To: <PAWPR08MB89825DAD686E58DB18DF86FC83999@PAWPR08MB8982.eurprd08.prod.outlook.com>
[-- Attachment #1: Type: text/plain, Size: 828 bytes --]
Hi Wilco,
I had a test application demonstrating the problem.
I include it in attach.
From my particular interest in the tunable this is the difference:
# GLIBC_TUNABLES=glibc.pthread.stack_hugetlb=0 ./tststackalloc 1
Page size: 4 kB, 2 MB huge pages
Will attempt to align allocations to make stacks eligible for huge pages
pid: 3482023 (/proc/3482023/smaps)
stack_size = 2097152, 0x200000
Creating 128 threads...
RSS: 448 pages (1835008 bytes = 1 MB)
Press enter to exit...
# GLIBC_TUNABLES=glibc.pthread.stack_hugetlb=1 ./tststackalloc 1
Page size: 4 kB, 2 MB huge pages
Will attempt to align allocations to make stacks eligible for huge pages
pid: 3482254 (/proc/3482254/smaps)
stack_size = 2097152, 0x200000
Creating 128 threads...
RSS: 65891 pages (269889536 bytes = 257 MB)
Press enter to exit...
Regards,
Cupertino
[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #2: tststackalloc.c --]
[-- Type: text/x-csrc, Size: 4666 bytes --]
// Compile & run:
// gcc -Wall -g -o tststackalloc tststackalloc.c $< -lpthread
// ./tststackalloc 1 # Attempt to use huge pages for stacks -> RSS bloat
// ./tststackalloc 0 # Do not attempt to use huge pages -> No RSS bloat
#define _GNU_SOURCE
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include <inttypes.h>
#include <sys/mman.h>
#include <fcntl.h>
// Number of threads to create
#define NOOF_THREADS (128)
// Size of a small page (hard-coded)
#define SMALL_PAGE_SIZE (4*1024)
// Size of a huge page (hard-coded)
#define HUGE_PAGE_SIZE (2*1024*1024)
// Total size of the thread stack, including the guard page(s)
#define STACK_SIZE_TOTAL (HUGE_PAGE_SIZE)
// Size of the guard page(s)
#define GUARD_SIZE (SMALL_PAGE_SIZE)
//#define PRINT_STACK_RANGES
//#define PRINT_PROC_SMAPS
// When enabled (set to non-zero), tries to align thread stacks on
// huge page boundaries, making them eligible for huge pages
static int huge_page_align_stacks;
static volatile int exit_thread = 0;
#if defined(PRINT_STACK_RANGES)
static void print_stack_range(void) {
pthread_attr_t attr;
void* bottom;
size_t size;
int err;
err = pthread_getattr_np(pthread_self(), &attr);
if (err != 0) {
fprintf(stderr, "Error looking up attr\n");
exit(1);
}
err = pthread_attr_getstack(&attr, &bottom, &size);
if (err != 0) {
fprintf(stderr, "Cannot locate current stack attributes!\n");
exit(1);
}
pthread_attr_destroy(&attr);
fprintf(stderr, "Stack: %p-%p (0x%zx/%zd)\n", bottom, bottom + size, size, size);
}
#endif
static void* start(void* arg) {
#if defined(PRINT_STACK_RANGES)
print_stack_range();
#endif
while(!exit_thread) {
sleep(1);
}
exit(2);
}
#if defined(PRINT_PROC_SMAPS)
static void print_proc_file(const char* file) {
char path[128];
snprintf(path, sizeof(path), "/proc/self/%s", file);
int smap = open(path, O_RDONLY);
char buf[4096];
int x = 0;
while ((x = read(smap, buf, sizeof(buf))) > 0) {
write(1, buf, x);
}
close(smap);
}
#endif
static size_t get_rss(void) {
FILE* stat = fopen("/proc/self/statm", "r");
long rss;
fscanf(stat, "%*d %ld", &rss);
return rss;
}
uintptr_t align_down(uintptr_t value, uintptr_t alignment) {
return value & ~(alignment - 1);
}
// Do a series of small, single page mmap calls to attempt to set
// everything up so that the next mmap call (glibc allocating the
// stack) returns a 2MB aligned range. The kernel "expands" vmas from
// higher to lower addresses (subsequent calls return ranges starting
// at lower addresses), so this function keeps calling mmap until it a
// huge page aligned address is returned. The next range (the stack)
// will then end on that same address.
static void align_next_on(uintptr_t alignment) {
uintptr_t p;
do {
p = (uintptr_t)mmap(NULL, SMALL_PAGE_SIZE, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
} while (p != align_down(p, HUGE_PAGE_SIZE));
}
int main(int argc, char* argv[]) {
pthread_t t[NOOF_THREADS];
pthread_attr_t attr;
int i;
if (argc != 2) {
printf("Usage: %s <huge page stacks>\n", argv[0]);
printf(" huge page stacks = 1 - attempt to use huge pages for stacks\n");
exit(1);
}
huge_page_align_stacks = atoi(argv[1]);
void* dummy = malloc(1024);
free(dummy);
fprintf(stderr, "Page size: %d kB, %d MB huge pages\n", SMALL_PAGE_SIZE / 1024, HUGE_PAGE_SIZE / (1024 * 1024));
if (huge_page_align_stacks) {
fprintf(stderr, "Will attempt to align allocations to make stacks eligible for huge pages\n");
}
pid_t pid = getpid();
fprintf(stderr, "pid: %d (/proc/%d/smaps)\n", pid, pid);
size_t guard_size = GUARD_SIZE;
size_t stack_size = STACK_SIZE_TOTAL;
pthread_attr_init(&attr);
pthread_attr_setstacksize(&attr, stack_size);
pthread_attr_setguardsize(&attr, guard_size);
fprintf(stderr, "stack_size = %d, 0x%x\n", stack_size, stack_size);
fprintf(stderr, "Creating %d threads...\n", NOOF_THREADS);
for (i = 0; i < NOOF_THREADS; i++) {
if (huge_page_align_stacks) {
// align (next) allocation on huge page boundary
align_next_on(HUGE_PAGE_SIZE);
}
pthread_create(&t[i], &attr, start, NULL);
}
sleep(1);
#if defined(PRINT_PROC_SMAPS)
print_proc_file("smaps");
#endif
size_t rss = get_rss();
fprintf(stderr, "RSS: %zd pages (%zd bytes = %zd MB)\n", rss, rss * SMALL_PAGE_SIZE, rss * SMALL_PAGE_SIZE / 1024 / 1024);
fprintf(stderr, "Press enter to exit...\n");
getchar();
exit_thread = 1;
for (i = 0; i < NOOF_THREADS; i++) {
pthread_join(t[i], NULL);
}
return 0;
}
[-- Attachment #3: Type: text/plain, Size: 460 bytes --]
Wilco Dijkstra writes:
> Hi,
>
>> The next question is whether the splitting of one huge page causes the whole
>> stack mmap to use small pages too.
>
> Btw if it wasn't obvious, you can trivially check this by creating threads that use
> 3MB of stack. Then the RSS per thread should be either 3MB (only small pages,
> no RSS loss!), 4MB (2 large pages, no RSS loss), 5 MB (3MB small pages, 2MB loss)
> or 6MB (2 large pages, 2MB loss).
>
> Cheers,
> Wilco
next prev parent reply other threads:[~2023-04-14 16:36 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-04-13 15:43 Wilco Dijkstra
2023-04-13 16:23 ` Cupertino Miranda
2023-04-13 17:48 ` Adhemerval Zanella Netto
2023-04-14 11:28 ` Cupertino Miranda
2023-04-14 13:24 ` Wilco Dijkstra
2023-04-14 14:49 ` Cupertino Miranda
2023-04-14 15:32 ` Wilco Dijkstra
2023-04-14 16:03 ` Wilco Dijkstra
2023-04-14 16:35 ` Cupertino Miranda [this message]
2023-04-14 23:10 ` Wilco Dijkstra
2023-04-14 16:27 ` Cupertino Miranda
-- strict thread matches above, loose matches on Subject: below --
2023-03-28 15:22 [PATCH v5 0/1] *** " Cupertino Miranda
2023-03-28 15:22 ` [PATCH v5 1/1] " Cupertino Miranda
2023-04-11 19:56 ` Adhemerval Zanella Netto
2023-04-12 8:53 ` Cupertino Miranda
2023-04-12 14:10 ` Adhemerval Zanella Netto
2023-04-13 16:13 ` Cupertino Miranda
2023-04-14 11:41 ` Adhemerval Zanella Netto
2023-04-14 12:27 ` Cupertino Miranda
2023-04-14 13:06 ` Adhemerval Zanella Netto
2023-04-14 14:33 ` Cupertino Miranda
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=87r0smbevu.fsf@oracle.com \
--to=cupertino.miranda@oracle.com \
--cc=Wilco.Dijkstra@arm.com \
--cc=adhemerval.zanella@linaro.org \
--cc=libc-alpha@sourceware.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).