From: Carlos O'Donell <carlos@redhat.com>
To: "H.J. Lu" <hjl.tools@gmail.com>, libc-alpha@sourceware.org
Subject: Re: [PATCH 1/2] Update tunable min/max values
Date: Fri, 3 Jul 2020 15:49:16 -0400 [thread overview]
Message-ID: <cc15fba0-0fc1-d0fa-3895-fda3bafb8f1b@redhat.com> (raw)
In-Reply-To: <20200703175220.1178840-2-hjl.tools@gmail.com>
On 7/3/20 1:52 PM, H.J. Lu wrote:
> Add __tunable_update_val to update tunable min/max values and move x86
> processor cache info to cpu_features.
Nees a little more cleanup. I'd like to avoid more macro APIs if possible.
Post v2 please.
> ---
> elf/dl-tunables.c | 51 ++-
> elf/dl-tunables.h | 15 +
> sysdeps/i386/cacheinfo.c | 3 -
> sysdeps/x86/Makefile | 2 +-
> sysdeps/x86/cacheinfo.c | 852 +++--------------------------------
> sysdeps/x86/cpu-features.c | 19 +-
> sysdeps/x86/cpu-features.h | 26 ++
> sysdeps/x86/dl-cacheinfo.c | 888 +++++++++++++++++++++++++++++++++++++
> sysdeps/x86/init-arch.h | 3 +
> 9 files changed, 1024 insertions(+), 835 deletions(-)
> delete mode 100644 sysdeps/i386/cacheinfo.c
> create mode 100644 sysdeps/x86/dl-cacheinfo.c
>
> diff --git a/elf/dl-tunables.c b/elf/dl-tunables.c
> index 26e6e26612..7c9f1ca31f 100644
> --- a/elf/dl-tunables.c
> +++ b/elf/dl-tunables.c
> @@ -100,31 +100,39 @@ get_next_env (char **envp, char **name, size_t *namelen, char **val,
> } \
> })
>
> +#define TUNABLE_UPDATE_VAL(__cur, __val, __min, __max, __type) \
> +({ \
> + (__cur)->type.min = (__min); \
> + (__cur)->type.max = (__max); \
> + (__cur)->val.numval = (__val); \
> + (__cur)->initialized = true; \
> +})
Can we update TUNABLE_SET_VAL_IF_VALID_RANGE to take the extra parameters?
If the value is not within [__min, __max] it should not update numval.
> +
> static void
> -do_tunable_update_val (tunable_t *cur, const void *valp)
> +do_tunable_update_val (tunable_t *cur, const void *valp,
> + const void *minp, const void *maxp)
> {
> - uint64_t val;
> + uint64_t val, min, max;
>
> if (cur->type.type_code != TUNABLE_TYPE_STRING)
> - val = *((int64_t *) valp);
> + {
> + val = *((int64_t *) valp);
> + if (minp)
> + min = *((int64_t *) minp);
> + if (maxp)
> + max = *((int64_t *) maxp);
> + }
OK.
>
> switch (cur->type.type_code)
> {
> case TUNABLE_TYPE_INT_32:
> - {
> - TUNABLE_SET_VAL_IF_VALID_RANGE (cur, val, int64_t);
> - break;
> - }
> case TUNABLE_TYPE_UINT_64:
> - {
> - TUNABLE_SET_VAL_IF_VALID_RANGE (cur, val, uint64_t);
> - break;
> - }
> case TUNABLE_TYPE_SIZE_T:
> - {
> - TUNABLE_SET_VAL_IF_VALID_RANGE (cur, val, uint64_t);
> - break;
> - }
> + if (minp && maxp)
> + TUNABLE_UPDATE_VAL (cur, val, min, max, int64_t);
> + else
> + TUNABLE_SET_VAL_IF_VALID_RANGE (cur, val, int64_t);
> + break;
Merging the two would result in a one call here and only one macro API
for set/update. I don't see the requirement for a different set and update.
> case TUNABLE_TYPE_STRING:
> {
> cur->val.strval = valp;
> @@ -153,7 +161,7 @@ tunable_initialize (tunable_t *cur, const char *strval)
> cur->initialized = true;
> valp = strval;
> }
> - do_tunable_update_val (cur, valp);
> + do_tunable_update_val (cur, valp, NULL, NULL);
> }
>
> void
> @@ -161,8 +169,17 @@ __tunable_set_val (tunable_id_t id, void *valp)
> {
> tunable_t *cur = &tunable_list[id];
>
> - do_tunable_update_val (cur, valp);
> + do_tunable_update_val (cur, valp, NULL, NULL);
> +}
> +
> +void
> +__tunable_update_val (tunable_id_t id, void *valp, void *minp, void *maxp)
> +{
> + tunable_t *cur = &tunable_list[id];
> +
> + do_tunable_update_val (cur, valp, minp, maxp);
> }
> +rtld_hidden_def (__tunable_update_val)
>
> #if TUNABLES_FRONTEND == TUNABLES_FRONTEND_valstring
> /* Parse the tunable string TUNESTR and adjust it to drop any tunables that may
> diff --git a/elf/dl-tunables.h b/elf/dl-tunables.h
> index f05eb50c2f..f6bf7379af 100644
> --- a/elf/dl-tunables.h
> +++ b/elf/dl-tunables.h
> @@ -71,8 +71,10 @@ typedef struct _tunable tunable_t;
> extern void __tunables_init (char **);
> extern void __tunable_get_val (tunable_id_t, void *, tunable_callback_t);
> extern void __tunable_set_val (tunable_id_t, void *);
> +extern void __tunable_update_val (tunable_id_t, void *, void *, void *);
> rtld_hidden_proto (__tunables_init)
> rtld_hidden_proto (__tunable_get_val)
> +rtld_hidden_proto (__tunable_update_val)
>
> /* Define TUNABLE_GET and TUNABLE_SET in short form if TOP_NAMESPACE and
> TUNABLE_NAMESPACE are defined. This is useful shorthand to get and set
> @@ -82,11 +84,16 @@ rtld_hidden_proto (__tunable_get_val)
> TUNABLE_GET_FULL (TOP_NAMESPACE, TUNABLE_NAMESPACE, __id, __type, __cb)
> # define TUNABLE_SET(__id, __type, __val) \
> TUNABLE_SET_FULL (TOP_NAMESPACE, TUNABLE_NAMESPACE, __id, __type, __val)
> +# define TUNABLE_UPDATE(__id, __type, __val, __min, __max) \
> + TUNABLE_UPDATE_FULL (TOP_NAMESPACE, TUNABLE_NAMESPACE, __id, __type, \
> + __val, __min, __max)
Please use TUNABLE_SET?
I would like avoid needing new macro APIs.
There doesn't need to be symmetry between TUNABLE_GET and TUNABLE_SET.
The TUNABLE_GET is for getting the value of the tunable during subsystem
startup, and the tunable framework handles: setting the default value,
processing user settings, enforcing minimums, enforcing maximums etc.
The TUNABLE_SET side doesn't have to be symmetric it may need to set more
than just the tunable value e.g. min/max.
> #else
> # define TUNABLE_GET(__top, __ns, __id, __type, __cb) \
> TUNABLE_GET_FULL (__top, __ns, __id, __type, __cb)
> # define TUNABLE_SET(__top, __ns, __id, __type, __val) \
> TUNABLE_SET_FULL (__top, __ns, __id, __type, __val)
> +# define TUNABLE_UPDATE(__top, __ns, __id, __type, __val, __min, __max) \
> + TUNABLE_UPDATE_FULL (__top, __ns, __id, __type, __val, __min, __max)
> #endif
>
> /* Get and return a tunable value. If the tunable was set externally and __CB
> @@ -106,6 +113,14 @@ rtld_hidden_proto (__tunable_get_val)
> & (__type) {__val}); \
> })
>
> +/* Update a tunable value. */
> +# define TUNABLE_UPDATE_FULL(__top, __ns, __id, __type, __val, __min, __max) \
> +({ \
> + __tunable_update_val (TUNABLE_ENUM_NAME (__top, __ns, __id), \
> + & (__type) {__val}, & (__type) {__min}, \
> + & (__type) {__max}); \
> +})
> +
> /* Namespace sanity for callback functions. Use this macro to keep the
> namespace of the modules clean. */
> # define TUNABLE_CALLBACK(__name) _dl_tunable_ ## __name
> diff --git a/sysdeps/i386/cacheinfo.c b/sysdeps/i386/cacheinfo.c
> deleted file mode 100644
> index f15fe0779a..0000000000
> --- a/sysdeps/i386/cacheinfo.c
> +++ /dev/null
> @@ -1,3 +0,0 @@
> -#define DISABLE_PREFETCHW
> -
> -#include <sysdeps/x86/cacheinfo.c>
> diff --git a/sysdeps/x86/Makefile b/sysdeps/x86/Makefile
> index beab426f67..0872e0e655 100644
> --- a/sysdeps/x86/Makefile
> +++ b/sysdeps/x86/Makefile
> @@ -3,7 +3,7 @@ gen-as-const-headers += cpu-features-offsets.sym
> endif
>
> ifeq ($(subdir),elf)
> -sysdep-dl-routines += dl-get-cpu-features
> +sysdep-dl-routines += dl-get-cpu-features dl-cacheinfo
>
> tests += tst-get-cpu-features tst-get-cpu-features-static
> tests-static += tst-get-cpu-features-static
> diff --git a/sysdeps/x86/cacheinfo.c b/sysdeps/x86/cacheinfo.c
> index 311502dee3..8c4c7f9972 100644
> --- a/sysdeps/x86/cacheinfo.c
> +++ b/sysdeps/x86/cacheinfo.c
> @@ -18,498 +18,9 @@
>
> #if IS_IN (libc)
>
> -#include <assert.h>
> -#include <stdbool.h>
> -#include <stdlib.h>
> #include <unistd.h>
> -#include <cpuid.h>
> #include <init-arch.h>
>
> -static const struct intel_02_cache_info
> -{
> - unsigned char idx;
> - unsigned char assoc;
> - unsigned char linesize;
> - unsigned char rel_name;
> - unsigned int size;
> -} intel_02_known [] =
> - {
> -#define M(sc) ((sc) - _SC_LEVEL1_ICACHE_SIZE)
> - { 0x06, 4, 32, M(_SC_LEVEL1_ICACHE_SIZE), 8192 },
> - { 0x08, 4, 32, M(_SC_LEVEL1_ICACHE_SIZE), 16384 },
> - { 0x09, 4, 32, M(_SC_LEVEL1_ICACHE_SIZE), 32768 },
> - { 0x0a, 2, 32, M(_SC_LEVEL1_DCACHE_SIZE), 8192 },
> - { 0x0c, 4, 32, M(_SC_LEVEL1_DCACHE_SIZE), 16384 },
> - { 0x0d, 4, 64, M(_SC_LEVEL1_DCACHE_SIZE), 16384 },
> - { 0x0e, 6, 64, M(_SC_LEVEL1_DCACHE_SIZE), 24576 },
> - { 0x21, 8, 64, M(_SC_LEVEL2_CACHE_SIZE), 262144 },
> - { 0x22, 4, 64, M(_SC_LEVEL3_CACHE_SIZE), 524288 },
> - { 0x23, 8, 64, M(_SC_LEVEL3_CACHE_SIZE), 1048576 },
> - { 0x25, 8, 64, M(_SC_LEVEL3_CACHE_SIZE), 2097152 },
> - { 0x29, 8, 64, M(_SC_LEVEL3_CACHE_SIZE), 4194304 },
> - { 0x2c, 8, 64, M(_SC_LEVEL1_DCACHE_SIZE), 32768 },
> - { 0x30, 8, 64, M(_SC_LEVEL1_ICACHE_SIZE), 32768 },
> - { 0x39, 4, 64, M(_SC_LEVEL2_CACHE_SIZE), 131072 },
> - { 0x3a, 6, 64, M(_SC_LEVEL2_CACHE_SIZE), 196608 },
> - { 0x3b, 2, 64, M(_SC_LEVEL2_CACHE_SIZE), 131072 },
> - { 0x3c, 4, 64, M(_SC_LEVEL2_CACHE_SIZE), 262144 },
> - { 0x3d, 6, 64, M(_SC_LEVEL2_CACHE_SIZE), 393216 },
> - { 0x3e, 4, 64, M(_SC_LEVEL2_CACHE_SIZE), 524288 },
> - { 0x3f, 2, 64, M(_SC_LEVEL2_CACHE_SIZE), 262144 },
> - { 0x41, 4, 32, M(_SC_LEVEL2_CACHE_SIZE), 131072 },
> - { 0x42, 4, 32, M(_SC_LEVEL2_CACHE_SIZE), 262144 },
> - { 0x43, 4, 32, M(_SC_LEVEL2_CACHE_SIZE), 524288 },
> - { 0x44, 4, 32, M(_SC_LEVEL2_CACHE_SIZE), 1048576 },
> - { 0x45, 4, 32, M(_SC_LEVEL2_CACHE_SIZE), 2097152 },
> - { 0x46, 4, 64, M(_SC_LEVEL3_CACHE_SIZE), 4194304 },
> - { 0x47, 8, 64, M(_SC_LEVEL3_CACHE_SIZE), 8388608 },
> - { 0x48, 12, 64, M(_SC_LEVEL2_CACHE_SIZE), 3145728 },
> - { 0x49, 16, 64, M(_SC_LEVEL2_CACHE_SIZE), 4194304 },
> - { 0x4a, 12, 64, M(_SC_LEVEL3_CACHE_SIZE), 6291456 },
> - { 0x4b, 16, 64, M(_SC_LEVEL3_CACHE_SIZE), 8388608 },
> - { 0x4c, 12, 64, M(_SC_LEVEL3_CACHE_SIZE), 12582912 },
> - { 0x4d, 16, 64, M(_SC_LEVEL3_CACHE_SIZE), 16777216 },
> - { 0x4e, 24, 64, M(_SC_LEVEL2_CACHE_SIZE), 6291456 },
> - { 0x60, 8, 64, M(_SC_LEVEL1_DCACHE_SIZE), 16384 },
> - { 0x66, 4, 64, M(_SC_LEVEL1_DCACHE_SIZE), 8192 },
> - { 0x67, 4, 64, M(_SC_LEVEL1_DCACHE_SIZE), 16384 },
> - { 0x68, 4, 64, M(_SC_LEVEL1_DCACHE_SIZE), 32768 },
> - { 0x78, 8, 64, M(_SC_LEVEL2_CACHE_SIZE), 1048576 },
> - { 0x79, 8, 64, M(_SC_LEVEL2_CACHE_SIZE), 131072 },
> - { 0x7a, 8, 64, M(_SC_LEVEL2_CACHE_SIZE), 262144 },
> - { 0x7b, 8, 64, M(_SC_LEVEL2_CACHE_SIZE), 524288 },
> - { 0x7c, 8, 64, M(_SC_LEVEL2_CACHE_SIZE), 1048576 },
> - { 0x7d, 8, 64, M(_SC_LEVEL2_CACHE_SIZE), 2097152 },
> - { 0x7f, 2, 64, M(_SC_LEVEL2_CACHE_SIZE), 524288 },
> - { 0x80, 8, 64, M(_SC_LEVEL2_CACHE_SIZE), 524288 },
> - { 0x82, 8, 32, M(_SC_LEVEL2_CACHE_SIZE), 262144 },
> - { 0x83, 8, 32, M(_SC_LEVEL2_CACHE_SIZE), 524288 },
> - { 0x84, 8, 32, M(_SC_LEVEL2_CACHE_SIZE), 1048576 },
> - { 0x85, 8, 32, M(_SC_LEVEL2_CACHE_SIZE), 2097152 },
> - { 0x86, 4, 64, M(_SC_LEVEL2_CACHE_SIZE), 524288 },
> - { 0x87, 8, 64, M(_SC_LEVEL2_CACHE_SIZE), 1048576 },
> - { 0xd0, 4, 64, M(_SC_LEVEL3_CACHE_SIZE), 524288 },
> - { 0xd1, 4, 64, M(_SC_LEVEL3_CACHE_SIZE), 1048576 },
> - { 0xd2, 4, 64, M(_SC_LEVEL3_CACHE_SIZE), 2097152 },
> - { 0xd6, 8, 64, M(_SC_LEVEL3_CACHE_SIZE), 1048576 },
> - { 0xd7, 8, 64, M(_SC_LEVEL3_CACHE_SIZE), 2097152 },
> - { 0xd8, 8, 64, M(_SC_LEVEL3_CACHE_SIZE), 4194304 },
> - { 0xdc, 12, 64, M(_SC_LEVEL3_CACHE_SIZE), 2097152 },
> - { 0xdd, 12, 64, M(_SC_LEVEL3_CACHE_SIZE), 4194304 },
> - { 0xde, 12, 64, M(_SC_LEVEL3_CACHE_SIZE), 8388608 },
> - { 0xe2, 16, 64, M(_SC_LEVEL3_CACHE_SIZE), 2097152 },
> - { 0xe3, 16, 64, M(_SC_LEVEL3_CACHE_SIZE), 4194304 },
> - { 0xe4, 16, 64, M(_SC_LEVEL3_CACHE_SIZE), 8388608 },
> - { 0xea, 24, 64, M(_SC_LEVEL3_CACHE_SIZE), 12582912 },
> - { 0xeb, 24, 64, M(_SC_LEVEL3_CACHE_SIZE), 18874368 },
> - { 0xec, 24, 64, M(_SC_LEVEL3_CACHE_SIZE), 25165824 },
> - };
> -
> -#define nintel_02_known (sizeof (intel_02_known) / sizeof (intel_02_known [0]))
> -
> -static int
> -intel_02_known_compare (const void *p1, const void *p2)
> -{
> - const struct intel_02_cache_info *i1;
> - const struct intel_02_cache_info *i2;
> -
> - i1 = (const struct intel_02_cache_info *) p1;
> - i2 = (const struct intel_02_cache_info *) p2;
> -
> - if (i1->idx == i2->idx)
> - return 0;
> -
> - return i1->idx < i2->idx ? -1 : 1;
> -}
> -
> -
> -static long int
> -__attribute__ ((noinline))
> -intel_check_word (int name, unsigned int value, bool *has_level_2,
> - bool *no_level_2_or_3,
> - const struct cpu_features *cpu_features)
> -{
> - if ((value & 0x80000000) != 0)
> - /* The register value is reserved. */
> - return 0;
> -
> - /* Fold the name. The _SC_ constants are always in the order SIZE,
> - ASSOC, LINESIZE. */
> - int folded_rel_name = (M(name) / 3) * 3;
> -
> - while (value != 0)
> - {
> - unsigned int byte = value & 0xff;
> -
> - if (byte == 0x40)
> - {
> - *no_level_2_or_3 = true;
> -
> - if (folded_rel_name == M(_SC_LEVEL3_CACHE_SIZE))
> - /* No need to look further. */
> - break;
> - }
> - else if (byte == 0xff)
> - {
> - /* CPUID leaf 0x4 contains all the information. We need to
> - iterate over it. */
> - unsigned int eax;
> - unsigned int ebx;
> - unsigned int ecx;
> - unsigned int edx;
> -
> - unsigned int round = 0;
> - while (1)
> - {
> - __cpuid_count (4, round, eax, ebx, ecx, edx);
> -
> - enum { null = 0, data = 1, inst = 2, uni = 3 } type = eax & 0x1f;
> - if (type == null)
> - /* That was the end. */
> - break;
> -
> - unsigned int level = (eax >> 5) & 0x7;
> -
> - if ((level == 1 && type == data
> - && folded_rel_name == M(_SC_LEVEL1_DCACHE_SIZE))
> - || (level == 1 && type == inst
> - && folded_rel_name == M(_SC_LEVEL1_ICACHE_SIZE))
> - || (level == 2 && folded_rel_name == M(_SC_LEVEL2_CACHE_SIZE))
> - || (level == 3 && folded_rel_name == M(_SC_LEVEL3_CACHE_SIZE))
> - || (level == 4 && folded_rel_name == M(_SC_LEVEL4_CACHE_SIZE)))
> - {
> - unsigned int offset = M(name) - folded_rel_name;
> -
> - if (offset == 0)
> - /* Cache size. */
> - return (((ebx >> 22) + 1)
> - * (((ebx >> 12) & 0x3ff) + 1)
> - * ((ebx & 0xfff) + 1)
> - * (ecx + 1));
> - if (offset == 1)
> - return (ebx >> 22) + 1;
> -
> - assert (offset == 2);
> - return (ebx & 0xfff) + 1;
> - }
> -
> - ++round;
> - }
> - /* There is no other cache information anywhere else. */
> - break;
> - }
> - else
> - {
> - if (byte == 0x49 && folded_rel_name == M(_SC_LEVEL3_CACHE_SIZE))
> - {
> - /* Intel reused this value. For family 15, model 6 it
> - specifies the 3rd level cache. Otherwise the 2nd
> - level cache. */
> - unsigned int family = cpu_features->basic.family;
> - unsigned int model = cpu_features->basic.model;
> -
> - if (family == 15 && model == 6)
> - {
> - /* The level 3 cache is encoded for this model like
> - the level 2 cache is for other models. Pretend
> - the caller asked for the level 2 cache. */
> - name = (_SC_LEVEL2_CACHE_SIZE
> - + (name - _SC_LEVEL3_CACHE_SIZE));
> - folded_rel_name = M(_SC_LEVEL2_CACHE_SIZE);
> - }
> - }
> -
> - struct intel_02_cache_info *found;
> - struct intel_02_cache_info search;
> -
> - search.idx = byte;
> - found = bsearch (&search, intel_02_known, nintel_02_known,
> - sizeof (intel_02_known[0]), intel_02_known_compare);
> - if (found != NULL)
> - {
> - if (found->rel_name == folded_rel_name)
> - {
> - unsigned int offset = M(name) - folded_rel_name;
> -
> - if (offset == 0)
> - /* Cache size. */
> - return found->size;
> - if (offset == 1)
> - return found->assoc;
> -
> - assert (offset == 2);
> - return found->linesize;
> - }
> -
> - if (found->rel_name == M(_SC_LEVEL2_CACHE_SIZE))
> - *has_level_2 = true;
> - }
> - }
> -
> - /* Next byte for the next round. */
> - value >>= 8;
> - }
> -
> - /* Nothing found. */
> - return 0;
> -}
> -
> -
> -static long int __attribute__ ((noinline))
> -handle_intel (int name, const struct cpu_features *cpu_features)
> -{
> - unsigned int maxidx = cpu_features->basic.max_cpuid;
> -
> - /* Return -1 for older CPUs. */
> - if (maxidx < 2)
> - return -1;
> -
> - /* OK, we can use the CPUID instruction to get all info about the
> - caches. */
> - unsigned int cnt = 0;
> - unsigned int max = 1;
> - long int result = 0;
> - bool no_level_2_or_3 = false;
> - bool has_level_2 = false;
> -
> - while (cnt++ < max)
> - {
> - unsigned int eax;
> - unsigned int ebx;
> - unsigned int ecx;
> - unsigned int edx;
> - __cpuid (2, eax, ebx, ecx, edx);
> -
> - /* The low byte of EAX in the first round contain the number of
> - rounds we have to make. At least one, the one we are already
> - doing. */
> - if (cnt == 1)
> - {
> - max = eax & 0xff;
> - eax &= 0xffffff00;
> - }
> -
> - /* Process the individual registers' value. */
> - result = intel_check_word (name, eax, &has_level_2,
> - &no_level_2_or_3, cpu_features);
> - if (result != 0)
> - return result;
> -
> - result = intel_check_word (name, ebx, &has_level_2,
> - &no_level_2_or_3, cpu_features);
> - if (result != 0)
> - return result;
> -
> - result = intel_check_word (name, ecx, &has_level_2,
> - &no_level_2_or_3, cpu_features);
> - if (result != 0)
> - return result;
> -
> - result = intel_check_word (name, edx, &has_level_2,
> - &no_level_2_or_3, cpu_features);
> - if (result != 0)
> - return result;
> - }
> -
> - if (name >= _SC_LEVEL2_CACHE_SIZE && name <= _SC_LEVEL3_CACHE_LINESIZE
> - && no_level_2_or_3)
> - return -1;
> -
> - return 0;
> -}
> -
> -
> -static long int __attribute__ ((noinline))
> -handle_amd (int name)
> -{
> - unsigned int eax;
> - unsigned int ebx;
> - unsigned int ecx;
> - unsigned int edx;
> - __cpuid (0x80000000, eax, ebx, ecx, edx);
> -
> - /* No level 4 cache (yet). */
> - if (name > _SC_LEVEL3_CACHE_LINESIZE)
> - return 0;
> -
> - unsigned int fn = 0x80000005 + (name >= _SC_LEVEL2_CACHE_SIZE);
> - if (eax < fn)
> - return 0;
> -
> - __cpuid (fn, eax, ebx, ecx, edx);
> -
> - if (name < _SC_LEVEL1_DCACHE_SIZE)
> - {
> - name += _SC_LEVEL1_DCACHE_SIZE - _SC_LEVEL1_ICACHE_SIZE;
> - ecx = edx;
> - }
> -
> - switch (name)
> - {
> - case _SC_LEVEL1_DCACHE_SIZE:
> - return (ecx >> 14) & 0x3fc00;
> -
> - case _SC_LEVEL1_DCACHE_ASSOC:
> - ecx >>= 16;
> - if ((ecx & 0xff) == 0xff)
> - /* Fully associative. */
> - return (ecx << 2) & 0x3fc00;
> - return ecx & 0xff;
> -
> - case _SC_LEVEL1_DCACHE_LINESIZE:
> - return ecx & 0xff;
> -
> - case _SC_LEVEL2_CACHE_SIZE:
> - return (ecx & 0xf000) == 0 ? 0 : (ecx >> 6) & 0x3fffc00;
> -
> - case _SC_LEVEL2_CACHE_ASSOC:
> - switch ((ecx >> 12) & 0xf)
> - {
> - case 0:
> - case 1:
> - case 2:
> - case 4:
> - return (ecx >> 12) & 0xf;
> - case 6:
> - return 8;
> - case 8:
> - return 16;
> - case 10:
> - return 32;
> - case 11:
> - return 48;
> - case 12:
> - return 64;
> - case 13:
> - return 96;
> - case 14:
> - return 128;
> - case 15:
> - return ((ecx >> 6) & 0x3fffc00) / (ecx & 0xff);
> - default:
> - return 0;
> - }
> - /* NOTREACHED */
> -
> - case _SC_LEVEL2_CACHE_LINESIZE:
> - return (ecx & 0xf000) == 0 ? 0 : ecx & 0xff;
> -
> - case _SC_LEVEL3_CACHE_SIZE:
> - return (edx & 0xf000) == 0 ? 0 : (edx & 0x3ffc0000) << 1;
> -
> - case _SC_LEVEL3_CACHE_ASSOC:
> - switch ((edx >> 12) & 0xf)
> - {
> - case 0:
> - case 1:
> - case 2:
> - case 4:
> - return (edx >> 12) & 0xf;
> - case 6:
> - return 8;
> - case 8:
> - return 16;
> - case 10:
> - return 32;
> - case 11:
> - return 48;
> - case 12:
> - return 64;
> - case 13:
> - return 96;
> - case 14:
> - return 128;
> - case 15:
> - return ((edx & 0x3ffc0000) << 1) / (edx & 0xff);
> - default:
> - return 0;
> - }
> - /* NOTREACHED */
> -
> - case _SC_LEVEL3_CACHE_LINESIZE:
> - return (edx & 0xf000) == 0 ? 0 : edx & 0xff;
> -
> - default:
> - assert (! "cannot happen");
> - }
> - return -1;
> -}
> -
> -
> -static long int __attribute__ ((noinline))
> -handle_zhaoxin (int name)
> -{
> - unsigned int eax;
> - unsigned int ebx;
> - unsigned int ecx;
> - unsigned int edx;
> -
> - int folded_rel_name = (M(name) / 3) * 3;
> -
> - unsigned int round = 0;
> - while (1)
> - {
> - __cpuid_count (4, round, eax, ebx, ecx, edx);
> -
> - enum { null = 0, data = 1, inst = 2, uni = 3 } type = eax & 0x1f;
> - if (type == null)
> - break;
> -
> - unsigned int level = (eax >> 5) & 0x7;
> -
> - if ((level == 1 && type == data
> - && folded_rel_name == M(_SC_LEVEL1_DCACHE_SIZE))
> - || (level == 1 && type == inst
> - && folded_rel_name == M(_SC_LEVEL1_ICACHE_SIZE))
> - || (level == 2 && folded_rel_name == M(_SC_LEVEL2_CACHE_SIZE))
> - || (level == 3 && folded_rel_name == M(_SC_LEVEL3_CACHE_SIZE)))
> - {
> - unsigned int offset = M(name) - folded_rel_name;
> -
> - if (offset == 0)
> - /* Cache size. */
> - return (((ebx >> 22) + 1)
> - * (((ebx >> 12) & 0x3ff) + 1)
> - * ((ebx & 0xfff) + 1)
> - * (ecx + 1));
> - if (offset == 1)
> - return (ebx >> 22) + 1;
> -
> - assert (offset == 2);
> - return (ebx & 0xfff) + 1;
> - }
> -
> - ++round;
> - }
> -
> - /* Nothing found. */
> - return 0;
> -}
> -
> -
> -/* Get the value of the system variable NAME. */
> -long int
> -attribute_hidden
> -__cache_sysconf (int name)
> -{
> - const struct cpu_features *cpu_features = __get_cpu_features ();
> -
> - if (cpu_features->basic.kind == arch_kind_intel)
> - return handle_intel (name, cpu_features);
> -
> - if (cpu_features->basic.kind == arch_kind_amd)
> - return handle_amd (name);
> -
> - if (cpu_features->basic.kind == arch_kind_zhaoxin)
> - return handle_zhaoxin (name);
> -
> - // XXX Fill in more vendors.
> -
> - /* CPU not known, we have no information. */
> - return 0;
> -}
> -
> -
OK.
> /* Data cache size for use in memory and string routines, typically
> L1 size, rounded to multiple of 256 bytes. */
> long int __x86_data_cache_size_half attribute_hidden = 32 * 1024 / 2;
> @@ -530,348 +41,85 @@ long int __x86_raw_shared_cache_size attribute_hidden = 1024 * 1024;
> /* Threshold to use non temporal store. */
> long int __x86_shared_non_temporal_threshold attribute_hidden;
>
> -#ifndef DISABLE_PREFETCHW
> +#ifndef __x86_64__
> /* PREFETCHW support flag for use in memory and string routines. */
> int __x86_prefetchw attribute_hidden;
> #endif
>
> -
> -static void
> -get_common_cache_info (long int *shared_ptr, unsigned int *threads_ptr,
> - long int core)
> +/* Get the value of the system variable NAME. */
> +long int
> +attribute_hidden
> +__cache_sysconf (int name)
> {
> - unsigned int eax;
> - unsigned int ebx;
> - unsigned int ecx;
> - unsigned int edx;
> -
> - /* Number of logical processors sharing L2 cache. */
> - int threads_l2;
> -
> - /* Number of logical processors sharing L3 cache. */
> - int threads_l3;
> -
> const struct cpu_features *cpu_features = __get_cpu_features ();
> - int max_cpuid = cpu_features->basic.max_cpuid;
> - unsigned int family = cpu_features->basic.family;
> - unsigned int model = cpu_features->basic.model;
> - long int shared = *shared_ptr;
> - unsigned int threads = *threads_ptr;
> - bool inclusive_cache = true;
> - bool support_count_mask = true;
> -
> - /* Try L3 first. */
> - unsigned int level = 3;
> -
> - if (cpu_features->basic.kind == arch_kind_zhaoxin && family == 6)
> - support_count_mask = false;
> -
> - if (shared <= 0)
> - {
> - /* Try L2 otherwise. */
> - level = 2;
> - shared = core;
> - threads_l2 = 0;
> - threads_l3 = -1;
> - }
> - else
> - {
> - threads_l2 = 0;
> - threads_l3 = 0;
> - }
> -
> - /* A value of 0 for the HTT bit indicates there is only a single
> - logical processor. */
> - if (HAS_CPU_FEATURE (HTT))
> + switch (name)
> {
> - /* Figure out the number of logical threads that share the
> - highest cache level. */
> - if (max_cpuid >= 4)
> - {
> - int i = 0;
> -
> - /* Query until cache level 2 and 3 are enumerated. */
> - int check = 0x1 | (threads_l3 == 0) << 1;
> - do
> - {
> - __cpuid_count (4, i++, eax, ebx, ecx, edx);
> + case _SC_LEVEL1_ICACHE_SIZE:
> + return cpu_features->level1_icache_size;
>
> - /* There seems to be a bug in at least some Pentium Ds
> - which sometimes fail to iterate all cache parameters.
> - Do not loop indefinitely here, stop in this case and
> - assume there is no such information. */
> - if (cpu_features->basic.kind == arch_kind_intel
> - && (eax & 0x1f) == 0 )
> - goto intel_bug_no_cache_info;
> + case _SC_LEVEL1_DCACHE_SIZE:
> + return cpu_features->level1_dcache_size;
>
> - switch ((eax >> 5) & 0x7)
> - {
> - default:
> - break;
> - case 2:
> - if ((check & 0x1))
> - {
> - /* Get maximum number of logical processors
> - sharing L2 cache. */
> - threads_l2 = (eax >> 14) & 0x3ff;
> - check &= ~0x1;
> - }
> - break;
> - case 3:
> - if ((check & (0x1 << 1)))
> - {
> - /* Get maximum number of logical processors
> - sharing L3 cache. */
> - threads_l3 = (eax >> 14) & 0x3ff;
> + case _SC_LEVEL1_DCACHE_ASSOC:
> + return cpu_features->level1_dcache_assoc;
>
> - /* Check if L2 and L3 caches are inclusive. */
> - inclusive_cache = (edx & 0x2) != 0;
> - check &= ~(0x1 << 1);
> - }
> - break;
> - }
> - }
> - while (check);
> + case _SC_LEVEL1_DCACHE_LINESIZE:
> + return cpu_features->level1_dcache_linesize;
>
> - /* If max_cpuid >= 11, THREADS_L2/THREADS_L3 are the maximum
> - numbers of addressable IDs for logical processors sharing
> - the cache, instead of the maximum number of threads
> - sharing the cache. */
> - if (max_cpuid >= 11 && support_count_mask)
> - {
> - /* Find the number of logical processors shipped in
> - one core and apply count mask. */
> - i = 0;
> + case _SC_LEVEL2_CACHE_SIZE:
> + return cpu_features->level2_cache_size;
>
> - /* Count SMT only if there is L3 cache. Always count
> - core if there is no L3 cache. */
> - int count = ((threads_l2 > 0 && level == 3)
> - | ((threads_l3 > 0
> - || (threads_l2 > 0 && level == 2)) << 1));
> + case _SC_LEVEL2_CACHE_ASSOC:
> + return cpu_features->level2_cache_assoc;
>
> - while (count)
> - {
> - __cpuid_count (11, i++, eax, ebx, ecx, edx);
> + case _SC_LEVEL2_CACHE_LINESIZE:
> + return cpu_features->level2_cache_linesize;
>
> - int shipped = ebx & 0xff;
> - int type = ecx & 0xff00;
> - if (shipped == 0 || type == 0)
> - break;
> - else if (type == 0x100)
> - {
> - /* Count SMT. */
> - if ((count & 0x1))
> - {
> - int count_mask;
> + case _SC_LEVEL3_CACHE_SIZE:
> + return cpu_features->level3_cache_size;
>
> - /* Compute count mask. */
> - asm ("bsr %1, %0"
> - : "=r" (count_mask) : "g" (threads_l2));
> - count_mask = ~(-1 << (count_mask + 1));
> - threads_l2 = (shipped - 1) & count_mask;
> - count &= ~0x1;
> - }
> - }
> - else if (type == 0x200)
> - {
> - /* Count core. */
> - if ((count & (0x1 << 1)))
> - {
> - int count_mask;
> - int threads_core
> - = (level == 2 ? threads_l2 : threads_l3);
> + case _SC_LEVEL3_CACHE_ASSOC:
> + return cpu_features->level3_cache_assoc;
>
> - /* Compute count mask. */
> - asm ("bsr %1, %0"
> - : "=r" (count_mask) : "g" (threads_core));
> - count_mask = ~(-1 << (count_mask + 1));
> - threads_core = (shipped - 1) & count_mask;
> - if (level == 2)
> - threads_l2 = threads_core;
> - else
> - threads_l3 = threads_core;
> - count &= ~(0x1 << 1);
> - }
> - }
> - }
> - }
> - if (threads_l2 > 0)
> - threads_l2 += 1;
> - if (threads_l3 > 0)
> - threads_l3 += 1;
> - if (level == 2)
> - {
> - if (threads_l2)
> - {
> - threads = threads_l2;
> - if (cpu_features->basic.kind == arch_kind_intel
> - && threads > 2
> - && family == 6)
> - switch (model)
> - {
> - case 0x37:
> - case 0x4a:
> - case 0x4d:
> - case 0x5a:
> - case 0x5d:
> - /* Silvermont has L2 cache shared by 2 cores. */
> - threads = 2;
> - break;
> - default:
> - break;
> - }
> - }
> - }
> - else if (threads_l3)
> - threads = threads_l3;
> - }
> - else
> - {
> -intel_bug_no_cache_info:
> - /* Assume that all logical threads share the highest cache
> - level. */
> - threads
> - = ((cpu_features->cpuid[COMMON_CPUID_INDEX_1].ebx
> - >> 16) & 0xff);
> - }
> + case _SC_LEVEL3_CACHE_LINESIZE:
> + return cpu_features->level3_cache_linesize;
>
> - /* Cap usage of highest cache level to the number of supported
> - threads. */
> - if (shared > 0 && threads > 0)
> - shared /= threads;
> - }
> + case _SC_LEVEL4_CACHE_SIZE:
> + return cpu_features->level4_cache_size;
>
> - /* Account for non-inclusive L2 and L3 caches. */
> - if (!inclusive_cache)
> - {
> - if (threads_l2 > 0)
> - core /= threads_l2;
> - shared += core;
> + default:
> + break;
> }
> -
> - *shared_ptr = shared;
> - *threads_ptr = threads;
> + return -1;
> }
>
> -
> static void
> __attribute__((constructor))
OK. I would eventually like to see this constructor go away.
> init_cacheinfo (void)
> {
> - /* Find out what brand of processor. */
> - unsigned int ebx;
> - unsigned int ecx;
> - unsigned int edx;
> - int max_cpuid_ex;
> - long int data = -1;
> - long int shared = -1;
> - long int core;
> - unsigned int threads = 0;
> const struct cpu_features *cpu_features = __get_cpu_features ();
> + long int data = cpu_features->data_cache_size;
> + __x86_raw_data_cache_size_half = data / 2;
> + __x86_raw_data_cache_size = data;
> + /* Round data cache size to multiple of 256 bytes. */
> + data = data & ~255L;
> + __x86_data_cache_size_half = data / 2;
> + __x86_data_cache_size = data;
> +
> + long int shared = cpu_features->shared_cache_size;
> + __x86_raw_shared_cache_size_half = shared / 2;
> + __x86_raw_shared_cache_size = shared;
> + /* Round shared cache size to multiple of 256 bytes. */
> + shared = shared & ~255L;
> + __x86_shared_cache_size_half = shared / 2;
> + __x86_shared_cache_size = shared;
>
> - if (cpu_features->basic.kind == arch_kind_intel)
> - {
> - data = handle_intel (_SC_LEVEL1_DCACHE_SIZE, cpu_features);
> - core = handle_intel (_SC_LEVEL2_CACHE_SIZE, cpu_features);
> - shared = handle_intel (_SC_LEVEL3_CACHE_SIZE, cpu_features);
> -
> - get_common_cache_info (&shared, &threads, core);
> - }
> - else if (cpu_features->basic.kind == arch_kind_zhaoxin)
> - {
> - data = handle_zhaoxin (_SC_LEVEL1_DCACHE_SIZE);
> - core = handle_zhaoxin (_SC_LEVEL2_CACHE_SIZE);
> - shared = handle_zhaoxin (_SC_LEVEL3_CACHE_SIZE);
> -
> - get_common_cache_info (&shared, &threads, core);
> - }
> - else if (cpu_features->basic.kind == arch_kind_amd)
> - {
> - data = handle_amd (_SC_LEVEL1_DCACHE_SIZE);
> - long int core = handle_amd (_SC_LEVEL2_CACHE_SIZE);
> - shared = handle_amd (_SC_LEVEL3_CACHE_SIZE);
> -
> - /* Get maximum extended function. */
> - __cpuid (0x80000000, max_cpuid_ex, ebx, ecx, edx);
> -
> - if (shared <= 0)
> - /* No shared L3 cache. All we have is the L2 cache. */
> - shared = core;
> - else
> - {
> - /* Figure out the number of logical threads that share L3. */
> - if (max_cpuid_ex >= 0x80000008)
> - {
> - /* Get width of APIC ID. */
> - __cpuid (0x80000008, max_cpuid_ex, ebx, ecx, edx);
> - threads = 1 << ((ecx >> 12) & 0x0f);
> - }
> -
> - if (threads == 0)
> - {
> - /* If APIC ID width is not available, use logical
> - processor count. */
> - __cpuid (0x00000001, max_cpuid_ex, ebx, ecx, edx);
> -
> - if ((edx & (1 << 28)) != 0)
> - threads = (ebx >> 16) & 0xff;
> - }
> -
> - /* Cap usage of highest cache level to the number of
> - supported threads. */
> - if (threads > 0)
> - shared /= threads;
> -
> - /* Account for exclusive L2 and L3 caches. */
> - shared += core;
> - }
> + __x86_shared_non_temporal_threshold
> + = cpu_features->non_temporal_threshold;
>
> -#ifndef DISABLE_PREFETCHW
> - if (max_cpuid_ex >= 0x80000001)
> - {
> - unsigned int eax;
> - __cpuid (0x80000001, eax, ebx, ecx, edx);
> - /* PREFETCHW || 3DNow! */
> - if ((ecx & 0x100) || (edx & 0x80000000))
> - __x86_prefetchw = -1;
> - }
> +#ifndef __x86_64__
> + __x86_prefetchw = cpu_features->prefetchw;
> #endif
> - }
> -
> - if (cpu_features->data_cache_size != 0)
> - data = cpu_features->data_cache_size;
> -
> - if (data > 0)
> - {
> - __x86_raw_data_cache_size_half = data / 2;
> - __x86_raw_data_cache_size = data;
> - /* Round data cache size to multiple of 256 bytes. */
> - data = data & ~255L;
> - __x86_data_cache_size_half = data / 2;
> - __x86_data_cache_size = data;
> - }
> -
> - if (cpu_features->shared_cache_size != 0)
> - shared = cpu_features->shared_cache_size;
> -
> - if (shared > 0)
> - {
> - __x86_raw_shared_cache_size_half = shared / 2;
> - __x86_raw_shared_cache_size = shared;
> - /* Round shared cache size to multiple of 256 bytes. */
> - shared = shared & ~255L;
> - __x86_shared_cache_size_half = shared / 2;
> - __x86_shared_cache_size = shared;
> - }
> -
> - /* The large memcpy micro benchmark in glibc shows that 6 times of
> - shared cache size is the approximate value above which non-temporal
> - store becomes faster on a 8-core processor. This is the 3/4 of the
> - total shared cache size. */
> - __x86_shared_non_temporal_threshold
> - = (cpu_features->non_temporal_threshold != 0
> - ? cpu_features->non_temporal_threshold
> - : __x86_shared_cache_size * threads * 3 / 4);
> }
>
> #endif
> diff --git a/sysdeps/x86/cpu-features.c b/sysdeps/x86/cpu-features.c
> index c351bdd54a..e718204c18 100644
> --- a/sysdeps/x86/cpu-features.c
> +++ b/sysdeps/x86/cpu-features.c
> @@ -19,6 +19,7 @@
> #include <cpuid.h>
> #include <cpu-features.h>
> #include <dl-hwcap.h>
> +#include <init-arch.h>
> #include <libc-pointer-arith.h>
>
> #if HAVE_TUNABLES
> @@ -602,20 +603,14 @@ no_cpuid:
> cpu_features->basic.model = model;
> cpu_features->basic.stepping = stepping;
>
> + __init_cacheinfo ();
OK.
> +
> #if HAVE_TUNABLES
> TUNABLE_GET (hwcaps, tunable_val_t *, TUNABLE_CALLBACK (set_hwcaps));
> - cpu_features->non_temporal_threshold
> - = TUNABLE_GET (x86_non_temporal_threshold, long int, NULL);
> - cpu_features->data_cache_size
> - = TUNABLE_GET (x86_data_cache_size, long int, NULL);
> - cpu_features->shared_cache_size
> - = TUNABLE_GET (x86_shared_cache_size, long int, NULL);
> -#endif
> -
> - /* Reuse dl_platform, dl_hwcap and dl_hwcap_mask for x86. */
> -#if !HAVE_TUNABLES && defined SHARED
> - /* The glibc.cpu.hwcap_mask tunable is initialized already, so no need to do
> - this. */
> +#elif defined SHARED
> + /* Reuse dl_platform, dl_hwcap and dl_hwcap_mask for x86. The
> + glibc.cpu.hwcap_mask tunable is initialized already, so no
> + need to do this. */
> GLRO(dl_hwcap_mask) = HWCAP_IMPORTANT;
> #endif
>
> diff --git a/sysdeps/x86/cpu-features.h b/sysdeps/x86/cpu-features.h
> index d66dc206f7..3aaed33cbc 100644
> --- a/sysdeps/x86/cpu-features.h
> +++ b/sysdeps/x86/cpu-features.h
> @@ -102,6 +102,32 @@ struct cpu_features
> unsigned long int shared_cache_size;
> /* Threshold to use non temporal store. */
> unsigned long int non_temporal_threshold;
> + /* _SC_LEVEL1_ICACHE_SIZE. */
> + unsigned long int level1_icache_size;
> + /* _SC_LEVEL1_DCACHE_SIZE. */
> + unsigned long int level1_dcache_size;
> + /* _SC_LEVEL1_DCACHE_ASSOC. */
> + unsigned long int level1_dcache_assoc;
> + /* _SC_LEVEL1_DCACHE_LINESIZE. */
> + unsigned long int level1_dcache_linesize;
> + /* _SC_LEVEL2_CACHE_ASSOC. */
> + unsigned long int level2_cache_size;
> + /* _SC_LEVEL2_DCACHE_ASSOC. */
> + unsigned long int level2_cache_assoc;
> + /* _SC_LEVEL2_CACHE_LINESIZE. */
> + unsigned long int level2_cache_linesize;
> + /* /_SC_LEVEL3_CACHE_SIZE. */
> + unsigned long int level3_cache_size;
> + /* _SC_LEVEL3_CACHE_ASSOC. */
> + unsigned long int level3_cache_assoc;
> + /* _SC_LEVEL3_CACHE_LINESIZE. */
> + unsigned long int level3_cache_linesize;
> + /* /_SC_LEVEL4_CACHE_SIZE. */
> + unsigned long int level4_cache_size;
> +#ifndef __x86_64__
> + /* PREFETCHW support flag for use in memory and string routines. */
> + unsigned long int prefetchw;
> +#endif
> };
>
> /* Used from outside of glibc to get access to the CPU features
> diff --git a/sysdeps/x86/dl-cacheinfo.c b/sysdeps/x86/dl-cacheinfo.c
> new file mode 100644
> index 0000000000..8e2a6f552c
> --- /dev/null
> +++ b/sysdeps/x86/dl-cacheinfo.c
> @@ -0,0 +1,888 @@
> +/* x86 cache info.
> + Copyright (C) 2020 Free Software Foundation, Inc.
> + This file is part of the GNU C Library.
> +
> + The GNU C Library is free software; you can redistribute it and/or
> + modify it under the terms of the GNU Lesser General Public
> + License as published by the Free Software Foundation; either
> + version 2.1 of the License, or (at your option) any later version.
> +
> + The GNU C Library is distributed in the hope that it will be useful,
> + but WITHOUT ANY WARRANTY; without even the implied warranty of
> + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> + Lesser General Public License for more details.
> +
> + You should have received a copy of the GNU Lesser General Public
> + License along with the GNU C Library; if not, see
> + <https://www.gnu.org/licenses/>. */
> +
> +#include <assert.h>
> +#include <stdbool.h>
> +#include <stdlib.h>
> +#include <unistd.h>
> +#include <cpuid.h>
> +#include <init-arch.h>
> +#if HAVE_TUNABLES
> +# define TUNABLE_NAMESPACE cpu
> +# include <elf/dl-tunables.h>
> +#endif
> +
> +static const struct intel_02_cache_info
> +{
> + unsigned char idx;
> + unsigned char assoc;
> + unsigned char linesize;
> + unsigned char rel_name;
> + unsigned int size;
> +} intel_02_known [] =
> + {
> +#define M(sc) ((sc) - _SC_LEVEL1_ICACHE_SIZE)
> + { 0x06, 4, 32, M(_SC_LEVEL1_ICACHE_SIZE), 8192 },
> + { 0x08, 4, 32, M(_SC_LEVEL1_ICACHE_SIZE), 16384 },
> + { 0x09, 4, 32, M(_SC_LEVEL1_ICACHE_SIZE), 32768 },
> + { 0x0a, 2, 32, M(_SC_LEVEL1_DCACHE_SIZE), 8192 },
> + { 0x0c, 4, 32, M(_SC_LEVEL1_DCACHE_SIZE), 16384 },
> + { 0x0d, 4, 64, M(_SC_LEVEL1_DCACHE_SIZE), 16384 },
> + { 0x0e, 6, 64, M(_SC_LEVEL1_DCACHE_SIZE), 24576 },
> + { 0x21, 8, 64, M(_SC_LEVEL2_CACHE_SIZE), 262144 },
> + { 0x22, 4, 64, M(_SC_LEVEL3_CACHE_SIZE), 524288 },
> + { 0x23, 8, 64, M(_SC_LEVEL3_CACHE_SIZE), 1048576 },
> + { 0x25, 8, 64, M(_SC_LEVEL3_CACHE_SIZE), 2097152 },
> + { 0x29, 8, 64, M(_SC_LEVEL3_CACHE_SIZE), 4194304 },
> + { 0x2c, 8, 64, M(_SC_LEVEL1_DCACHE_SIZE), 32768 },
> + { 0x30, 8, 64, M(_SC_LEVEL1_ICACHE_SIZE), 32768 },
> + { 0x39, 4, 64, M(_SC_LEVEL2_CACHE_SIZE), 131072 },
> + { 0x3a, 6, 64, M(_SC_LEVEL2_CACHE_SIZE), 196608 },
> + { 0x3b, 2, 64, M(_SC_LEVEL2_CACHE_SIZE), 131072 },
> + { 0x3c, 4, 64, M(_SC_LEVEL2_CACHE_SIZE), 262144 },
> + { 0x3d, 6, 64, M(_SC_LEVEL2_CACHE_SIZE), 393216 },
> + { 0x3e, 4, 64, M(_SC_LEVEL2_CACHE_SIZE), 524288 },
> + { 0x3f, 2, 64, M(_SC_LEVEL2_CACHE_SIZE), 262144 },
> + { 0x41, 4, 32, M(_SC_LEVEL2_CACHE_SIZE), 131072 },
> + { 0x42, 4, 32, M(_SC_LEVEL2_CACHE_SIZE), 262144 },
> + { 0x43, 4, 32, M(_SC_LEVEL2_CACHE_SIZE), 524288 },
> + { 0x44, 4, 32, M(_SC_LEVEL2_CACHE_SIZE), 1048576 },
> + { 0x45, 4, 32, M(_SC_LEVEL2_CACHE_SIZE), 2097152 },
> + { 0x46, 4, 64, M(_SC_LEVEL3_CACHE_SIZE), 4194304 },
> + { 0x47, 8, 64, M(_SC_LEVEL3_CACHE_SIZE), 8388608 },
> + { 0x48, 12, 64, M(_SC_LEVEL2_CACHE_SIZE), 3145728 },
> + { 0x49, 16, 64, M(_SC_LEVEL2_CACHE_SIZE), 4194304 },
> + { 0x4a, 12, 64, M(_SC_LEVEL3_CACHE_SIZE), 6291456 },
> + { 0x4b, 16, 64, M(_SC_LEVEL3_CACHE_SIZE), 8388608 },
> + { 0x4c, 12, 64, M(_SC_LEVEL3_CACHE_SIZE), 12582912 },
> + { 0x4d, 16, 64, M(_SC_LEVEL3_CACHE_SIZE), 16777216 },
> + { 0x4e, 24, 64, M(_SC_LEVEL2_CACHE_SIZE), 6291456 },
> + { 0x60, 8, 64, M(_SC_LEVEL1_DCACHE_SIZE), 16384 },
> + { 0x66, 4, 64, M(_SC_LEVEL1_DCACHE_SIZE), 8192 },
> + { 0x67, 4, 64, M(_SC_LEVEL1_DCACHE_SIZE), 16384 },
> + { 0x68, 4, 64, M(_SC_LEVEL1_DCACHE_SIZE), 32768 },
> + { 0x78, 8, 64, M(_SC_LEVEL2_CACHE_SIZE), 1048576 },
> + { 0x79, 8, 64, M(_SC_LEVEL2_CACHE_SIZE), 131072 },
> + { 0x7a, 8, 64, M(_SC_LEVEL2_CACHE_SIZE), 262144 },
> + { 0x7b, 8, 64, M(_SC_LEVEL2_CACHE_SIZE), 524288 },
> + { 0x7c, 8, 64, M(_SC_LEVEL2_CACHE_SIZE), 1048576 },
> + { 0x7d, 8, 64, M(_SC_LEVEL2_CACHE_SIZE), 2097152 },
> + { 0x7f, 2, 64, M(_SC_LEVEL2_CACHE_SIZE), 524288 },
> + { 0x80, 8, 64, M(_SC_LEVEL2_CACHE_SIZE), 524288 },
> + { 0x82, 8, 32, M(_SC_LEVEL2_CACHE_SIZE), 262144 },
> + { 0x83, 8, 32, M(_SC_LEVEL2_CACHE_SIZE), 524288 },
> + { 0x84, 8, 32, M(_SC_LEVEL2_CACHE_SIZE), 1048576 },
> + { 0x85, 8, 32, M(_SC_LEVEL2_CACHE_SIZE), 2097152 },
> + { 0x86, 4, 64, M(_SC_LEVEL2_CACHE_SIZE), 524288 },
> + { 0x87, 8, 64, M(_SC_LEVEL2_CACHE_SIZE), 1048576 },
> + { 0xd0, 4, 64, M(_SC_LEVEL3_CACHE_SIZE), 524288 },
> + { 0xd1, 4, 64, M(_SC_LEVEL3_CACHE_SIZE), 1048576 },
> + { 0xd2, 4, 64, M(_SC_LEVEL3_CACHE_SIZE), 2097152 },
> + { 0xd6, 8, 64, M(_SC_LEVEL3_CACHE_SIZE), 1048576 },
> + { 0xd7, 8, 64, M(_SC_LEVEL3_CACHE_SIZE), 2097152 },
> + { 0xd8, 8, 64, M(_SC_LEVEL3_CACHE_SIZE), 4194304 },
> + { 0xdc, 12, 64, M(_SC_LEVEL3_CACHE_SIZE), 2097152 },
> + { 0xdd, 12, 64, M(_SC_LEVEL3_CACHE_SIZE), 4194304 },
> + { 0xde, 12, 64, M(_SC_LEVEL3_CACHE_SIZE), 8388608 },
> + { 0xe2, 16, 64, M(_SC_LEVEL3_CACHE_SIZE), 2097152 },
> + { 0xe3, 16, 64, M(_SC_LEVEL3_CACHE_SIZE), 4194304 },
> + { 0xe4, 16, 64, M(_SC_LEVEL3_CACHE_SIZE), 8388608 },
> + { 0xea, 24, 64, M(_SC_LEVEL3_CACHE_SIZE), 12582912 },
> + { 0xeb, 24, 64, M(_SC_LEVEL3_CACHE_SIZE), 18874368 },
> + { 0xec, 24, 64, M(_SC_LEVEL3_CACHE_SIZE), 25165824 },
> + };
> +
> +#define nintel_02_known (sizeof (intel_02_known) / sizeof (intel_02_known [0]))
> +
> +static int
> +intel_02_known_compare (const void *p1, const void *p2)
> +{
> + const struct intel_02_cache_info *i1;
> + const struct intel_02_cache_info *i2;
> +
> + i1 = (const struct intel_02_cache_info *) p1;
> + i2 = (const struct intel_02_cache_info *) p2;
> +
> + if (i1->idx == i2->idx)
> + return 0;
> +
> + return i1->idx < i2->idx ? -1 : 1;
> +}
> +
> +
> +static long int
> +__attribute__ ((noinline))
> +intel_check_word (int name, unsigned int value, bool *has_level_2,
> + bool *no_level_2_or_3,
> + const struct cpu_features *cpu_features)
> +{
> + if ((value & 0x80000000) != 0)
> + /* The register value is reserved. */
> + return 0;
> +
> + /* Fold the name. The _SC_ constants are always in the order SIZE,
> + ASSOC, LINESIZE. */
> + int folded_rel_name = (M(name) / 3) * 3;
> +
> + while (value != 0)
> + {
> + unsigned int byte = value & 0xff;
> +
> + if (byte == 0x40)
> + {
> + *no_level_2_or_3 = true;
> +
> + if (folded_rel_name == M(_SC_LEVEL3_CACHE_SIZE))
> + /* No need to look further. */
> + break;
> + }
> + else if (byte == 0xff)
> + {
> + /* CPUID leaf 0x4 contains all the information. We need to
> + iterate over it. */
> + unsigned int eax;
> + unsigned int ebx;
> + unsigned int ecx;
> + unsigned int edx;
> +
> + unsigned int round = 0;
> + while (1)
> + {
> + __cpuid_count (4, round, eax, ebx, ecx, edx);
> +
> + enum { null = 0, data = 1, inst = 2, uni = 3 } type = eax & 0x1f;
> + if (type == null)
> + /* That was the end. */
> + break;
> +
> + unsigned int level = (eax >> 5) & 0x7;
> +
> + if ((level == 1 && type == data
> + && folded_rel_name == M(_SC_LEVEL1_DCACHE_SIZE))
> + || (level == 1 && type == inst
> + && folded_rel_name == M(_SC_LEVEL1_ICACHE_SIZE))
> + || (level == 2 && folded_rel_name == M(_SC_LEVEL2_CACHE_SIZE))
> + || (level == 3 && folded_rel_name == M(_SC_LEVEL3_CACHE_SIZE))
> + || (level == 4 && folded_rel_name == M(_SC_LEVEL4_CACHE_SIZE)))
> + {
> + unsigned int offset = M(name) - folded_rel_name;
> +
> + if (offset == 0)
> + /* Cache size. */
> + return (((ebx >> 22) + 1)
> + * (((ebx >> 12) & 0x3ff) + 1)
> + * ((ebx & 0xfff) + 1)
> + * (ecx + 1));
> + if (offset == 1)
> + return (ebx >> 22) + 1;
> +
> + assert (offset == 2);
> + return (ebx & 0xfff) + 1;
> + }
> +
> + ++round;
> + }
> + /* There is no other cache information anywhere else. */
> + break;
> + }
> + else
> + {
> + if (byte == 0x49 && folded_rel_name == M(_SC_LEVEL3_CACHE_SIZE))
> + {
> + /* Intel reused this value. For family 15, model 6 it
> + specifies the 3rd level cache. Otherwise the 2nd
> + level cache. */
> + unsigned int family = cpu_features->basic.family;
> + unsigned int model = cpu_features->basic.model;
> +
> + if (family == 15 && model == 6)
> + {
> + /* The level 3 cache is encoded for this model like
> + the level 2 cache is for other models. Pretend
> + the caller asked for the level 2 cache. */
> + name = (_SC_LEVEL2_CACHE_SIZE
> + + (name - _SC_LEVEL3_CACHE_SIZE));
> + folded_rel_name = M(_SC_LEVEL2_CACHE_SIZE);
> + }
> + }
> +
> + struct intel_02_cache_info *found;
> + struct intel_02_cache_info search;
> +
> + search.idx = byte;
> + found = bsearch (&search, intel_02_known, nintel_02_known,
> + sizeof (intel_02_known[0]), intel_02_known_compare);
> + if (found != NULL)
> + {
> + if (found->rel_name == folded_rel_name)
> + {
> + unsigned int offset = M(name) - folded_rel_name;
> +
> + if (offset == 0)
> + /* Cache size. */
> + return found->size;
> + if (offset == 1)
> + return found->assoc;
> +
> + assert (offset == 2);
> + return found->linesize;
> + }
> +
> + if (found->rel_name == M(_SC_LEVEL2_CACHE_SIZE))
> + *has_level_2 = true;
> + }
> + }
> +
> + /* Next byte for the next round. */
> + value >>= 8;
> + }
> +
> + /* Nothing found. */
> + return 0;
> +}
> +
> +
> +static long int __attribute__ ((noinline))
> +handle_intel (int name, const struct cpu_features *cpu_features)
> +{
> + unsigned int maxidx = cpu_features->basic.max_cpuid;
> +
> + /* Return -1 for older CPUs. */
> + if (maxidx < 2)
> + return -1;
> +
> + /* OK, we can use the CPUID instruction to get all info about the
> + caches. */
> + unsigned int cnt = 0;
> + unsigned int max = 1;
> + long int result = 0;
> + bool no_level_2_or_3 = false;
> + bool has_level_2 = false;
> +
> + while (cnt++ < max)
> + {
> + unsigned int eax;
> + unsigned int ebx;
> + unsigned int ecx;
> + unsigned int edx;
> + __cpuid (2, eax, ebx, ecx, edx);
> +
> + /* The low byte of EAX in the first round contain the number of
> + rounds we have to make. At least one, the one we are already
> + doing. */
> + if (cnt == 1)
> + {
> + max = eax & 0xff;
> + eax &= 0xffffff00;
> + }
> +
> + /* Process the individual registers' value. */
> + result = intel_check_word (name, eax, &has_level_2,
> + &no_level_2_or_3, cpu_features);
> + if (result != 0)
> + return result;
> +
> + result = intel_check_word (name, ebx, &has_level_2,
> + &no_level_2_or_3, cpu_features);
> + if (result != 0)
> + return result;
> +
> + result = intel_check_word (name, ecx, &has_level_2,
> + &no_level_2_or_3, cpu_features);
> + if (result != 0)
> + return result;
> +
> + result = intel_check_word (name, edx, &has_level_2,
> + &no_level_2_or_3, cpu_features);
> + if (result != 0)
> + return result;
> + }
> +
> + if (name >= _SC_LEVEL2_CACHE_SIZE && name <= _SC_LEVEL3_CACHE_LINESIZE
> + && no_level_2_or_3)
> + return -1;
> +
> + return 0;
> +}
> +
> +
> +static long int __attribute__ ((noinline))
> +handle_amd (int name)
> +{
> + unsigned int eax;
> + unsigned int ebx;
> + unsigned int ecx;
> + unsigned int edx;
> + __cpuid (0x80000000, eax, ebx, ecx, edx);
> +
> + /* No level 4 cache (yet). */
> + if (name > _SC_LEVEL3_CACHE_LINESIZE)
> + return 0;
> +
> + unsigned int fn = 0x80000005 + (name >= _SC_LEVEL2_CACHE_SIZE);
> + if (eax < fn)
> + return 0;
> +
> + __cpuid (fn, eax, ebx, ecx, edx);
> +
> + if (name < _SC_LEVEL1_DCACHE_SIZE)
> + {
> + name += _SC_LEVEL1_DCACHE_SIZE - _SC_LEVEL1_ICACHE_SIZE;
> + ecx = edx;
> + }
> +
> + switch (name)
> + {
> + case _SC_LEVEL1_DCACHE_SIZE:
> + return (ecx >> 14) & 0x3fc00;
> +
> + case _SC_LEVEL1_DCACHE_ASSOC:
> + ecx >>= 16;
> + if ((ecx & 0xff) == 0xff)
> + /* Fully associative. */
> + return (ecx << 2) & 0x3fc00;
> + return ecx & 0xff;
> +
> + case _SC_LEVEL1_DCACHE_LINESIZE:
> + return ecx & 0xff;
> +
> + case _SC_LEVEL2_CACHE_SIZE:
> + return (ecx & 0xf000) == 0 ? 0 : (ecx >> 6) & 0x3fffc00;
> +
> + case _SC_LEVEL2_CACHE_ASSOC:
> + switch ((ecx >> 12) & 0xf)
> + {
> + case 0:
> + case 1:
> + case 2:
> + case 4:
> + return (ecx >> 12) & 0xf;
> + case 6:
> + return 8;
> + case 8:
> + return 16;
> + case 10:
> + return 32;
> + case 11:
> + return 48;
> + case 12:
> + return 64;
> + case 13:
> + return 96;
> + case 14:
> + return 128;
> + case 15:
> + return ((ecx >> 6) & 0x3fffc00) / (ecx & 0xff);
> + default:
> + return 0;
> + }
> + /* NOTREACHED */
> +
> + case _SC_LEVEL2_CACHE_LINESIZE:
> + return (ecx & 0xf000) == 0 ? 0 : ecx & 0xff;
> +
> + case _SC_LEVEL3_CACHE_SIZE:
> + return (edx & 0xf000) == 0 ? 0 : (edx & 0x3ffc0000) << 1;
> +
> + case _SC_LEVEL3_CACHE_ASSOC:
> + switch ((edx >> 12) & 0xf)
> + {
> + case 0:
> + case 1:
> + case 2:
> + case 4:
> + return (edx >> 12) & 0xf;
> + case 6:
> + return 8;
> + case 8:
> + return 16;
> + case 10:
> + return 32;
> + case 11:
> + return 48;
> + case 12:
> + return 64;
> + case 13:
> + return 96;
> + case 14:
> + return 128;
> + case 15:
> + return ((edx & 0x3ffc0000) << 1) / (edx & 0xff);
> + default:
> + return 0;
> + }
> + /* NOTREACHED */
> +
> + case _SC_LEVEL3_CACHE_LINESIZE:
> + return (edx & 0xf000) == 0 ? 0 : edx & 0xff;
> +
> + default:
> + assert (! "cannot happen");
> + }
> + return -1;
> +}
> +
> +
> +static long int __attribute__ ((noinline))
> +handle_zhaoxin (int name)
> +{
> + unsigned int eax;
> + unsigned int ebx;
> + unsigned int ecx;
> + unsigned int edx;
> +
> + int folded_rel_name = (M(name) / 3) * 3;
> +
> + unsigned int round = 0;
> + while (1)
> + {
> + __cpuid_count (4, round, eax, ebx, ecx, edx);
> +
> + enum { null = 0, data = 1, inst = 2, uni = 3 } type = eax & 0x1f;
> + if (type == null)
> + break;
> +
> + unsigned int level = (eax >> 5) & 0x7;
> +
> + if ((level == 1 && type == data
> + && folded_rel_name == M(_SC_LEVEL1_DCACHE_SIZE))
> + || (level == 1 && type == inst
> + && folded_rel_name == M(_SC_LEVEL1_ICACHE_SIZE))
> + || (level == 2 && folded_rel_name == M(_SC_LEVEL2_CACHE_SIZE))
> + || (level == 3 && folded_rel_name == M(_SC_LEVEL3_CACHE_SIZE)))
> + {
> + unsigned int offset = M(name) - folded_rel_name;
> +
> + if (offset == 0)
> + /* Cache size. */
> + return (((ebx >> 22) + 1)
> + * (((ebx >> 12) & 0x3ff) + 1)
> + * ((ebx & 0xfff) + 1)
> + * (ecx + 1));
> + if (offset == 1)
> + return (ebx >> 22) + 1;
> +
> + assert (offset == 2);
> + return (ebx & 0xfff) + 1;
> + }
> +
> + ++round;
> + }
> +
> + /* Nothing found. */
> + return 0;
> +}
> +
> +
> +static void
> +get_common_cache_info (long int *shared_ptr, unsigned int *threads_ptr,
> + long int core)
> +{
> + unsigned int eax;
> + unsigned int ebx;
> + unsigned int ecx;
> + unsigned int edx;
> +
> + /* Number of logical processors sharing L2 cache. */
> + int threads_l2;
> +
> + /* Number of logical processors sharing L3 cache. */
> + int threads_l3;
> +
> + const struct cpu_features *cpu_features = __get_cpu_features ();
> + int max_cpuid = cpu_features->basic.max_cpuid;
> + unsigned int family = cpu_features->basic.family;
> + unsigned int model = cpu_features->basic.model;
> + long int shared = *shared_ptr;
> + unsigned int threads = *threads_ptr;
> + bool inclusive_cache = true;
> + bool support_count_mask = true;
> +
> + /* Try L3 first. */
> + unsigned int level = 3;
> +
> + if (cpu_features->basic.kind == arch_kind_zhaoxin && family == 6)
> + support_count_mask = false;
> +
> + if (shared <= 0)
> + {
> + /* Try L2 otherwise. */
> + level = 2;
> + shared = core;
> + threads_l2 = 0;
> + threads_l3 = -1;
> + }
> + else
> + {
> + threads_l2 = 0;
> + threads_l3 = 0;
> + }
> +
> + /* A value of 0 for the HTT bit indicates there is only a single
> + logical processor. */
> + if (HAS_CPU_FEATURE (HTT))
> + {
> + /* Figure out the number of logical threads that share the
> + highest cache level. */
> + if (max_cpuid >= 4)
> + {
> + int i = 0;
> +
> + /* Query until cache level 2 and 3 are enumerated. */
> + int check = 0x1 | (threads_l3 == 0) << 1;
> + do
> + {
> + __cpuid_count (4, i++, eax, ebx, ecx, edx);
> +
> + /* There seems to be a bug in at least some Pentium Ds
> + which sometimes fail to iterate all cache parameters.
> + Do not loop indefinitely here, stop in this case and
> + assume there is no such information. */
> + if (cpu_features->basic.kind == arch_kind_intel
> + && (eax & 0x1f) == 0 )
> + goto intel_bug_no_cache_info;
> +
> + switch ((eax >> 5) & 0x7)
> + {
> + default:
> + break;
> + case 2:
> + if ((check & 0x1))
> + {
> + /* Get maximum number of logical processors
> + sharing L2 cache. */
> + threads_l2 = (eax >> 14) & 0x3ff;
> + check &= ~0x1;
> + }
> + break;
> + case 3:
> + if ((check & (0x1 << 1)))
> + {
> + /* Get maximum number of logical processors
> + sharing L3 cache. */
> + threads_l3 = (eax >> 14) & 0x3ff;
> +
> + /* Check if L2 and L3 caches are inclusive. */
> + inclusive_cache = (edx & 0x2) != 0;
> + check &= ~(0x1 << 1);
> + }
> + break;
> + }
> + }
> + while (check);
> +
> + /* If max_cpuid >= 11, THREADS_L2/THREADS_L3 are the maximum
> + numbers of addressable IDs for logical processors sharing
> + the cache, instead of the maximum number of threads
> + sharing the cache. */
> + if (max_cpuid >= 11 && support_count_mask)
> + {
> + /* Find the number of logical processors shipped in
> + one core and apply count mask. */
> + i = 0;
> +
> + /* Count SMT only if there is L3 cache. Always count
> + core if there is no L3 cache. */
> + int count = ((threads_l2 > 0 && level == 3)
> + | ((threads_l3 > 0
> + || (threads_l2 > 0 && level == 2)) << 1));
> +
> + while (count)
> + {
> + __cpuid_count (11, i++, eax, ebx, ecx, edx);
> +
> + int shipped = ebx & 0xff;
> + int type = ecx & 0xff00;
> + if (shipped == 0 || type == 0)
> + break;
> + else if (type == 0x100)
> + {
> + /* Count SMT. */
> + if ((count & 0x1))
> + {
> + int count_mask;
> +
> + /* Compute count mask. */
> + asm ("bsr %1, %0"
> + : "=r" (count_mask) : "g" (threads_l2));
> + count_mask = ~(-1 << (count_mask + 1));
> + threads_l2 = (shipped - 1) & count_mask;
> + count &= ~0x1;
> + }
> + }
> + else if (type == 0x200)
> + {
> + /* Count core. */
> + if ((count & (0x1 << 1)))
> + {
> + int count_mask;
> + int threads_core
> + = (level == 2 ? threads_l2 : threads_l3);
> +
> + /* Compute count mask. */
> + asm ("bsr %1, %0"
> + : "=r" (count_mask) : "g" (threads_core));
> + count_mask = ~(-1 << (count_mask + 1));
> + threads_core = (shipped - 1) & count_mask;
> + if (level == 2)
> + threads_l2 = threads_core;
> + else
> + threads_l3 = threads_core;
> + count &= ~(0x1 << 1);
> + }
> + }
> + }
> + }
> + if (threads_l2 > 0)
> + threads_l2 += 1;
> + if (threads_l3 > 0)
> + threads_l3 += 1;
> + if (level == 2)
> + {
> + if (threads_l2)
> + {
> + threads = threads_l2;
> + if (cpu_features->basic.kind == arch_kind_intel
> + && threads > 2
> + && family == 6)
> + switch (model)
> + {
> + case 0x37:
> + case 0x4a:
> + case 0x4d:
> + case 0x5a:
> + case 0x5d:
> + /* Silvermont has L2 cache shared by 2 cores. */
> + threads = 2;
> + break;
> + default:
> + break;
> + }
> + }
> + }
> + else if (threads_l3)
> + threads = threads_l3;
> + }
> + else
> + {
> +intel_bug_no_cache_info:
> + /* Assume that all logical threads share the highest cache
> + level. */
> + threads
> + = ((cpu_features->cpuid[COMMON_CPUID_INDEX_1].ebx
> + >> 16) & 0xff);
> + }
> +
> + /* Cap usage of highest cache level to the number of supported
> + threads. */
> + if (shared > 0 && threads > 0)
> + shared /= threads;
> + }
> +
> + /* Account for non-inclusive L2 and L3 caches. */
> + if (!inclusive_cache)
> + {
> + if (threads_l2 > 0)
> + core /= threads_l2;
> + shared += core;
> + }
> +
> + *shared_ptr = shared;
> + *threads_ptr = threads;
> +}
> +
> +void
> +__init_cacheinfo (void)
> +{
> + /* Find out what brand of processor. */
> + unsigned int ebx;
> + unsigned int ecx;
> + unsigned int edx;
> + int max_cpuid_ex;
> + long int data = -1;
> + long int shared = -1;
> + long int core;
> + unsigned int threads = 0;
> + unsigned long int level1_icache_size = -1;
> + unsigned long int level1_dcache_size = -1;
> + unsigned long int level1_dcache_assoc = -1;
> + unsigned long int level1_dcache_linesize = -1;
> + unsigned long int level2_cache_size = -1;
> + unsigned long int level2_cache_assoc = -1;
> + unsigned long int level2_cache_linesize = -1;
> + unsigned long int level3_cache_size = -1;
> + unsigned long int level3_cache_assoc = -1;
> + unsigned long int level3_cache_linesize = -1;
> + unsigned long int level4_cache_size = -1;
> + struct cpu_features *cpu_features = __get_cpu_features ();
> +
> + if (cpu_features->basic.kind == arch_kind_intel)
> + {
> + data = handle_intel (_SC_LEVEL1_DCACHE_SIZE, cpu_features);
> + core = handle_intel (_SC_LEVEL2_CACHE_SIZE, cpu_features);
> + shared = handle_intel (_SC_LEVEL3_CACHE_SIZE, cpu_features);
> +
> + level1_icache_size
> + = handle_intel (_SC_LEVEL1_ICACHE_SIZE, cpu_features);
> + level1_dcache_size = data;
> + level1_dcache_assoc
> + = handle_intel (_SC_LEVEL1_DCACHE_ASSOC, cpu_features);
> + level1_dcache_linesize
> + = handle_intel (_SC_LEVEL1_DCACHE_LINESIZE, cpu_features);
> + level2_cache_size = core;
> + level2_cache_assoc
> + = handle_intel (_SC_LEVEL2_CACHE_ASSOC, cpu_features);
> + level2_cache_linesize
> + = handle_intel (_SC_LEVEL2_CACHE_LINESIZE, cpu_features);
> + level3_cache_size = shared;
> + level3_cache_assoc
> + = handle_intel (_SC_LEVEL3_CACHE_ASSOC, cpu_features);
> + level3_cache_linesize
> + = handle_intel (_SC_LEVEL3_CACHE_LINESIZE, cpu_features);
> + level4_cache_size
> + = handle_intel (_SC_LEVEL4_CACHE_SIZE, cpu_features);
> +
> + get_common_cache_info (&shared, &threads, core);
> + }
> + else if (cpu_features->basic.kind == arch_kind_zhaoxin)
> + {
> + data = handle_zhaoxin (_SC_LEVEL1_DCACHE_SIZE);
> + core = handle_zhaoxin (_SC_LEVEL2_CACHE_SIZE);
> + shared = handle_zhaoxin (_SC_LEVEL3_CACHE_SIZE);
> +
> + level1_icache_size = handle_zhaoxin (_SC_LEVEL1_ICACHE_SIZE);
> + level1_dcache_size = data;
> + level1_dcache_assoc = handle_zhaoxin (_SC_LEVEL1_DCACHE_ASSOC);
> + level1_dcache_linesize = handle_zhaoxin (_SC_LEVEL1_DCACHE_LINESIZE);
> + level2_cache_size = core;
> + level2_cache_assoc = handle_zhaoxin (_SC_LEVEL2_CACHE_ASSOC);
> + level2_cache_linesize = handle_zhaoxin (_SC_LEVEL2_CACHE_LINESIZE);
> + level3_cache_size = shared;
> + level3_cache_assoc = handle_zhaoxin (_SC_LEVEL3_CACHE_ASSOC);
> + level3_cache_linesize = handle_zhaoxin (_SC_LEVEL3_CACHE_LINESIZE);
> +
> + get_common_cache_info (&shared, &threads, core);
> + }
> + else if (cpu_features->basic.kind == arch_kind_amd)
> + {
> + data = handle_amd (_SC_LEVEL1_DCACHE_SIZE);
> + core = handle_amd (_SC_LEVEL2_CACHE_SIZE);
> + shared = handle_amd (_SC_LEVEL3_CACHE_SIZE);
> +
> + level1_icache_size = handle_amd (_SC_LEVEL1_ICACHE_SIZE);
> + level1_dcache_size = data;
> + level1_dcache_assoc = handle_amd (_SC_LEVEL1_DCACHE_ASSOC);
> + level1_dcache_linesize = handle_amd (_SC_LEVEL1_DCACHE_LINESIZE);
> + level2_cache_size = core;
> + level2_cache_assoc = handle_amd (_SC_LEVEL2_CACHE_ASSOC);
> + level2_cache_linesize = handle_amd (_SC_LEVEL2_CACHE_LINESIZE);
> + level3_cache_size = shared;
> + level3_cache_assoc = handle_amd (_SC_LEVEL3_CACHE_ASSOC);
> + level3_cache_linesize = handle_amd (_SC_LEVEL3_CACHE_LINESIZE);
> +
> + /* Get maximum extended function. */
> + __cpuid (0x80000000, max_cpuid_ex, ebx, ecx, edx);
> +
> + if (shared <= 0)
> + /* No shared L3 cache. All we have is the L2 cache. */
> + shared = core;
> + else
> + {
> + /* Figure out the number of logical threads that share L3. */
> + if (max_cpuid_ex >= 0x80000008)
> + {
> + /* Get width of APIC ID. */
> + __cpuid (0x80000008, max_cpuid_ex, ebx, ecx, edx);
> + threads = 1 << ((ecx >> 12) & 0x0f);
> + }
> +
> + if (threads == 0)
> + {
> + /* If APIC ID width is not available, use logical
> + processor count. */
> + __cpuid (0x00000001, max_cpuid_ex, ebx, ecx, edx);
> +
> + if ((edx & (1 << 28)) != 0)
> + threads = (ebx >> 16) & 0xff;
> + }
> +
> + /* Cap usage of highest cache level to the number of
> + supported threads. */
> + if (threads > 0)
> + shared /= threads;
> +
> + /* Account for exclusive L2 and L3 caches. */
> + shared += core;
> + }
> +
> +#ifndef __x86_64__
> + if (max_cpuid_ex >= 0x80000001)
> + {
> + unsigned int eax;
> + __cpuid (0x80000001, eax, ebx, ecx, edx);
> + /* PREFETCHW || 3DNow! */
> + if ((ecx & 0x100) || (edx & 0x80000000))
> + cpu_features->prefetchw = -1;
> + }
> +#endif
> + }
> +
> + cpu_features->level1_icache_size = level1_icache_size;
> + cpu_features->level1_dcache_size = level1_dcache_size;
> + cpu_features->level1_dcache_assoc = level1_dcache_assoc;
> + cpu_features->level1_dcache_linesize = level1_dcache_linesize;
> + cpu_features->level2_cache_size = level2_cache_size;
> + cpu_features->level2_cache_assoc = level2_cache_assoc;
> + cpu_features->level2_cache_linesize = level2_cache_linesize;
> + cpu_features->level3_cache_size = level3_cache_size;
> + cpu_features->level3_cache_assoc = level3_cache_assoc;
> + cpu_features->level3_cache_linesize = level3_cache_linesize;
> + cpu_features->level4_cache_size = level4_cache_size;
> +
> + /* The large memcpy micro benchmark in glibc shows that 6 times of
> + shared cache size is the approximate value above which non-temporal
> + store becomes faster on a 8-core processor. This is the 3/4 of the
> + total shared cache size. */
> + unsigned long int non_temporal_threshold = (shared * threads * 3 / 4);
> +
> +#if HAVE_TUNABLES
> + long int tunable_size;
> + tunable_size = TUNABLE_GET (x86_data_cache_size, long int, NULL);
> + if (tunable_size != 0)
> + data = tunable_size;
> + tunable_size = TUNABLE_GET (x86_shared_cache_size, long int, NULL);
> + if (tunable_size != 0)
> + shared = tunable_size;
> + tunable_size = TUNABLE_GET (x86_non_temporal_threshold, long int, NULL);
> + if (tunable_size != 0)
> + non_temporal_threshold = tunable_size;
This is wrong, you have excluded 0 from the list of allowed options?
Why not raise the dl-tunables.list minimum to 1?
Then the above code just becomes what we used to have.
e.g.
cpu_features->non_temporal_threshold
= TUNABLE_GET (x86_non_temporal_threshold, long int, NULL);
The tunables framework should make it so you don't have to write this
boilerplate.
There should be only *one* variable that you have to handle specially
for the dynamic minimum with a TUNABLE_GET + TUNABLE_SET.
> +#endif
> +
> + cpu_features->data_cache_size = data;
> + cpu_features->shared_cache_size = shared;
> + cpu_features->non_temporal_threshold = non_temporal_threshold;
Not needed, you can just fold the cpu_features->foo update like we used
to have it. The ranges are not being updated to any dynamic values.
> +
> +#if HAVE_TUNABLES
> + TUNABLE_UPDATE (x86_data_cache_size, long int,
> + data, 0, (long int) -1);
> + TUNABLE_UPDATE (x86_shared_cache_size, long int,
> + shared, 0, (long int) -1);
> + TUNABLE_UPDATE (x86_non_temporal_threshold, long int,
> + non_temporal_threshold, 0, (long int) -1);
Delete all of these. They don't need updating?
> +#endif
> +}
> diff --git a/sysdeps/x86/init-arch.h b/sysdeps/x86/init-arch.h
> index d6f59cf962..272ed10902 100644
> --- a/sysdeps/x86/init-arch.h
> +++ b/sysdeps/x86/init-arch.h
> @@ -23,6 +23,9 @@
> #include <ifunc-init.h>
> #include <isa.h>
>
> +extern void __init_cacheinfo (void)
> + __attribute__ ((visibility ("hidden")));
> +
> #ifndef __x86_64__
> /* Due to the reordering and the other nifty extensions in i686, it is
> not really good to use heavily i586 optimized code on an i686. It's
>
--
Cheers,
Carlos.
next prev parent reply other threads:[~2020-07-03 19:49 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-07-03 17:52 [PATCH 0/2] x86: Add thresholds for "rep movsb/stosb" to tunables H.J. Lu
2020-07-03 17:52 ` [PATCH 1/2] Update tunable min/max values H.J. Lu
2020-07-03 19:49 ` Carlos O'Donell [this message]
2020-07-03 23:09 ` V2 " H.J. Lu
2020-07-06 12:47 ` Carlos O'Donell
2020-07-06 13:13 ` H.J. Lu
2020-07-06 13:15 ` Carlos O'Donell
2020-07-06 13:42 ` H.J. Lu
2020-07-03 17:52 ` [PATCH 2/2] x86: Add thresholds for "rep movsb/stosb" to tunables H.J. Lu
2020-07-03 19:49 ` Carlos O'Donell
2020-07-04 12:03 ` V2 [PATCH] " H.J. Lu
2020-07-06 12:59 ` Carlos O'Donell
2020-07-06 16:44 ` V3 " H.J. Lu
2020-07-06 18:18 ` Carlos O'Donell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=cc15fba0-0fc1-d0fa-3895-fda3bafb8f1b@redhat.com \
--to=carlos@redhat.com \
--cc=hjl.tools@gmail.com \
--cc=libc-alpha@sourceware.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).