Discussion:
[dpdk-dev] [PATCH] mem: balanced allocation of hugepages
(too old to reply)
Ilya Maximets
2017-02-16 13:01:10 UTC
Permalink
Currently EAL allocates hugepages one by one not paying
attention from which NUMA node allocation was done.

Such behaviour leads to allocation failure if number of
available hugepages for application limited by cgroups
or hugetlbfs and memory requested not only from the first
socket.

Example:
# 90 x 1GB hugepages availavle in a system

cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...

EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
PANIC in rte_eal_init():
Cannot init memory

This happens beacause all allocated pages are
on socket 0.

Fix this issue by setting mempolicy MPOL_PREFERRED for each
hugepage to one of requested nodes in a round-robin fashion.
In this case all allocated pages will be fairly distributed
between all requested nodes.

New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
introduced and disabled by default because of external
dependency from libnuma.

Cc: <***@dpdk.org>
Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")

Signed-off-by: Ilya Maximets <***@samsung.com>
---
config/common_base | 1 +
lib/librte_eal/Makefile | 4 ++
lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
mk/rte.app.mk | 3 ++
4 files changed, 74 insertions(+)

diff --git a/config/common_base b/config/common_base
index 71a4fcb..fbcebbd 100644
--- a/config/common_base
+++ b/config/common_base
@@ -97,6 +97,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=n
CONFIG_RTE_MALLOC_DEBUG=n
+CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n

# Default driver path (or "" to disable)
CONFIG_RTE_EAL_PMD_PATH=""
diff --git a/lib/librte_eal/Makefile b/lib/librte_eal/Makefile
index cf11a09..5ae3846 100644
--- a/lib/librte_eal/Makefile
+++ b/lib/librte_eal/Makefile
@@ -35,4 +35,8 @@ DIRS-y += common
DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += linuxapp
DIRS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += bsdapp

+ifeq ($(CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+LDLIBS += -lnuma
+endif
+
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index a956bb2..8536a36 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -82,6 +82,9 @@
#include <sys/time.h>
#include <signal.h>
#include <setjmp.h>
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numaif.h>
+#endif

#include <rte_log.h>
#include <rte_memory.h>
@@ -359,6 +362,21 @@ static int huge_wrap_sigsetjmp(void)
return sigsetjmp(huge_jmpenv, 1);
}

+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+#ifndef ULONG_SIZE
+#define ULONG_SIZE sizeof(unsigned long)
+#endif
+#ifndef ULONG_BITS
+#define ULONG_BITS (ULONG_SIZE * CHAR_BIT)
+#endif
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, ULONG_SIZE)
+#endif
+#endif
+
/*
* Mmap all hugepages of hugepage table: it first open a file in
* hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -375,10 +393,48 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
void *virtaddr;
void *vma_addr = NULL;
size_t vma_len = 0;
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+ unsigned long nodemask[BITS_TO_LONGS(RTE_MAX_NUMA_NODES)] = {0UL};
+ unsigned long maxnode = 0;
+ int node_id = -1;
+
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ if (internal_config.socket_mem[i])
+ maxnode = i + 1;
+#endif

for (i = 0; i < hpi->num_pages[0]; i++) {
uint64_t hugepage_sz = hpi->hugepage_sz;

+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ node_id = (node_id + 1) % RTE_MAX_NUMA_NODES;
+ while (!internal_config.socket_mem[node_id])
+ node_id = (node_id + 1) % RTE_MAX_NUMA_NODES;
+
+ nodemask[node_id / ULONG_BITS] =
+ 1UL << (node_id % ULONG_BITS);
+
+ RTE_LOG(DEBUG, EAL,
+ "Setting policy MPOL_PREFERRED for socket %d\n",
+ node_id);
+ /*
+ * Due to old linux kernel bug (feature?) we have to
+ * increase maxnode by 1. It will be unconditionally
+ * decreased back to normal value inside the syscall
+ * handler.
+ */
+ if (set_mempolicy(MPOL_PREFERRED,
+ nodemask, maxnode + 1) < 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to set policy MPOL_PREFERRED: "
+ "%s\n", strerror(errno));
+ return i;
+ }
+
+ nodemask[node_id / ULONG_BITS] = 0UL;
+ }
+#endif
if (orig) {
hugepg_tbl[i].file_id = i;
hugepg_tbl[i].size = hugepage_sz;
@@ -489,6 +545,10 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
vma_len -= hugepage_sz;
}

+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode && set_mempolicy(MPOL_DEFAULT, NULL, 0) < 0)
+ RTE_LOG(ERR, EAL, "Failed to set mempolicy MPOL_DEFAULT\n");
+#endif
return i;
}

@@ -573,6 +634,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
if (hugepg_tbl[i].orig_va == va) {
hugepg_tbl[i].socket_id = socket_id;
hp_count++;
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+ RTE_LOG(DEBUG, EAL,
+ "Hugepage %s is on socket %d\n",
+ hugepg_tbl[i].filepath, socket_id);
+#endif
}
}
}
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 92f3635..c2153b9 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -159,6 +159,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
# The static libraries do not know their dependencies.
# So linking with static library requires explicit dependencies.
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrt
+ifeq ($(CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lnuma
+endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lm
--
2.7.4
Tan, Jianfeng
2017-02-16 13:26:26 UTC
Permalink
Hi,
-----Original Message-----
Sent: Thursday, February 16, 2017 9:01 PM
Cc: Heetae Ahn; Yuanhan Liu; Tan, Jianfeng; Neil Horman; Pei, Yulong; Ilya
Subject: [PATCH] mem: balanced allocation of hugepages
Currently EAL allocates hugepages one by one not paying
attention from which NUMA node allocation was done.
Such behaviour leads to allocation failure if number of
available hugepages for application limited by cgroups
or hugetlbfs and memory requested not only from the first
socket.
# 90 x 1GB hugepages availavle in a system
cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
Cannot init memory
This happens beacause all allocated pages are
on socket 0.
For such an use case, why not just use "numactl --interleave=0,1 <DPDK app> xxx"?

Do you see use case like --socket-mem 2048,1024 and only three 1GB-hugepage are allowed?

Thanks,
Jianfeng
Fix this issue by setting mempolicy MPOL_PREFERRED for each
hugepage to one of requested nodes in a round-robin fashion.
In this case all allocated pages will be fairly distributed
between all requested nodes.
New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
introduced and disabled by default because of external
dependency from libnuma.
Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
---
config/common_base | 1 +
lib/librte_eal/Makefile | 4 ++
lib/librte_eal/linuxapp/eal/eal_memory.c | 66
++++++++++++++++++++++++++++++++
mk/rte.app.mk | 3 ++
4 files changed, 74 insertions(+)
diff --git a/config/common_base b/config/common_base
index 71a4fcb..fbcebbd 100644
--- a/config/common_base
+++ b/config/common_base
@@ -97,6 +97,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=n
CONFIG_RTE_MALLOC_DEBUG=n
+CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n
# Default driver path (or "" to disable)
CONFIG_RTE_EAL_PMD_PATH=""
diff --git a/lib/librte_eal/Makefile b/lib/librte_eal/Makefile
index cf11a09..5ae3846 100644
--- a/lib/librte_eal/Makefile
+++ b/lib/librte_eal/Makefile
@@ -35,4 +35,8 @@ DIRS-y += common
DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += linuxapp
DIRS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += bsdapp
+ifeq ($(CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+LDLIBS += -lnuma
+endif
+
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c
b/lib/librte_eal/linuxapp/eal/eal_memory.c
index a956bb2..8536a36 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -82,6 +82,9 @@
#include <sys/time.h>
#include <signal.h>
#include <setjmp.h>
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numaif.h>
+#endif
#include <rte_log.h>
#include <rte_memory.h>
@@ -359,6 +362,21 @@ static int huge_wrap_sigsetjmp(void)
return sigsetjmp(huge_jmpenv, 1);
}
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+#ifndef ULONG_SIZE
+#define ULONG_SIZE sizeof(unsigned long)
+#endif
+#ifndef ULONG_BITS
+#define ULONG_BITS (ULONG_SIZE * CHAR_BIT)
+#endif
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, ULONG_SIZE)
+#endif
+#endif
+
/*
* Mmap all hugepages of hugepage table: it first open a file in
* hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -375,10 +393,48 @@ map_all_hugepages(struct hugepage_file
*hugepg_tbl,
void *virtaddr;
void *vma_addr = NULL;
size_t vma_len = 0;
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+ unsigned long
nodemask[BITS_TO_LONGS(RTE_MAX_NUMA_NODES)] = {0UL};
+ unsigned long maxnode = 0;
+ int node_id = -1;
+
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ if (internal_config.socket_mem[i])
+ maxnode = i + 1;
+#endif
for (i = 0; i < hpi->num_pages[0]; i++) {
uint64_t hugepage_sz = hpi->hugepage_sz;
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ node_id = (node_id + 1) % RTE_MAX_NUMA_NODES;
+ while (!internal_config.socket_mem[node_id])
+ node_id = (node_id + 1) %
RTE_MAX_NUMA_NODES;
+
+ nodemask[node_id / ULONG_BITS] =
+ 1UL << (node_id %
ULONG_BITS);
+
+ RTE_LOG(DEBUG, EAL,
+ "Setting policy MPOL_PREFERRED for
socket %d\n",
+ node_id);
+ /*
+ * Due to old linux kernel bug (feature?) we have to
+ * increase maxnode by 1. It will be unconditionally
+ * decreased back to normal value inside the syscall
+ * handler.
+ */
+ if (set_mempolicy(MPOL_PREFERRED,
+ nodemask, maxnode + 1) < 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to set policy
MPOL_PREFERRED: "
+ "%s\n", strerror(errno));
+ return i;
+ }
+
+ nodemask[node_id / ULONG_BITS] = 0UL;
+ }
+#endif
if (orig) {
hugepg_tbl[i].file_id = i;
hugepg_tbl[i].size = hugepage_sz;
@@ -489,6 +545,10 @@ map_all_hugepages(struct hugepage_file
*hugepg_tbl,
vma_len -= hugepage_sz;
}
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode && set_mempolicy(MPOL_DEFAULT, NULL, 0) < 0)
+ RTE_LOG(ERR, EAL, "Failed to set mempolicy
MPOL_DEFAULT\n");
+#endif
return i;
}
@@ -573,6 +634,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl,
struct hugepage_info *hpi)
if (hugepg_tbl[i].orig_va == va) {
hugepg_tbl[i].socket_id = socket_id;
hp_count++;
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+ RTE_LOG(DEBUG, EAL,
+ "Hugepage %s is on socket %d\n",
+ hugepg_tbl[i].filepath, socket_id);
+#endif
}
}
}
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 92f3635..c2153b9 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -159,6 +159,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
# The static libraries do not know their dependencies.
# So linking with static library requires explicit dependencies.
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrt
+ifeq ($(CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lnuma
+endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lm
--
2.7.4
Ilya Maximets
2017-02-16 13:55:57 UTC
Permalink
Hi,
Post by Tan, Jianfeng
Hi,
-----Original Message-----
Sent: Thursday, February 16, 2017 9:01 PM
Cc: Heetae Ahn; Yuanhan Liu; Tan, Jianfeng; Neil Horman; Pei, Yulong; Ilya
Subject: [PATCH] mem: balanced allocation of hugepages
Currently EAL allocates hugepages one by one not paying
attention from which NUMA node allocation was done.
Such behaviour leads to allocation failure if number of
available hugepages for application limited by cgroups
or hugetlbfs and memory requested not only from the first
socket.
# 90 x 1GB hugepages availavle in a system
cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
Cannot init memory
This happens beacause all allocated pages are
on socket 0.
For such an use case, why not just use "numactl --interleave=0,1 <DPDK app> xxx"?
Unfortunately, interleave policy doesn't work for me. I suspect kernel configuration
blocks this or I don't understand something in kernel internals.
I'm using 3.10 rt kernel from rhel7.

I tried to set up MPOL_INTERLEAVE in code and it doesn't work for me. Your example
with numactl doesn't work too:

# Limited to 8GB of hugepages
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096

EAL: Setting up physically contiguous memory...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 8 not 90 hugepages of size 1024 MB allocated
EAL: Hugepage /dev/hugepages/rtemap_0 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_1 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_2 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_3 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_4 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_5 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_6 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_7 is on socket 0
EAL: Not enough memory available on socket 1! Requested: 4096MB, available: 0MB
PANIC in rte_eal_init():
Cannot init memory

Also, using numactl will affect all the allocations in application. This may
cause additional unexpected issues.
Post by Tan, Jianfeng
Do you see use case like --socket-mem 2048,1024 and only three 1GB-hugepage are allowed?
This case will work with my patch.
But the opposite one '--socket-mem=1024,2048' will fail.
To be clear, we need to allocate all required memory at first
from each numa node and then allocate all other available pages
in round-robin fashion. But such solution looks a little ugly.

What do you think?

Best regards, Ilya Maximets.
Ilya Maximets
2017-02-16 13:57:36 UTC
Permalink
Post by Ilya Maximets
Hi,
Post by Tan, Jianfeng
Hi,
-----Original Message-----
Sent: Thursday, February 16, 2017 9:01 PM
Cc: Heetae Ahn; Yuanhan Liu; Tan, Jianfeng; Neil Horman; Pei, Yulong; Ilya
Subject: [PATCH] mem: balanced allocation of hugepages
Currently EAL allocates hugepages one by one not paying
attention from which NUMA node allocation was done.
Such behaviour leads to allocation failure if number of
available hugepages for application limited by cgroups
or hugetlbfs and memory requested not only from the first
socket.
# 90 x 1GB hugepages availavle in a system
cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
Cannot init memory
This happens beacause all allocated pages are
on socket 0.
For such an use case, why not just use "numactl --interleave=0,1 <DPDK app> xxx"?
Unfortunately, interleave policy doesn't work for me. I suspect kernel configuration
blocks this or I don't understand something in kernel internals.
I'm using 3.10 rt kernel from rhel7.
I tried to set up MPOL_INTERLEAVE in code and it doesn't work for me. Your example
# Limited to 8GB of hugepages
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096
Sorry,
cgexec -g hugetlb:test numactl --interleave=0,1 ./testpmd --socket-mem=4096,4096 ..
Post by Ilya Maximets
EAL: Setting up physically contiguous memory...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 8 not 90 hugepages of size 1024 MB allocated
EAL: Hugepage /dev/hugepages/rtemap_0 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_1 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_2 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_3 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_4 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_5 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_6 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_7 is on socket 0
EAL: Not enough memory available on socket 1! Requested: 4096MB, available: 0MB
Cannot init memory
Also, using numactl will affect all the allocations in application. This may
cause additional unexpected issues.
Post by Tan, Jianfeng
Do you see use case like --socket-mem 2048,1024 and only three 1GB-hugepage are allowed?
This case will work with my patch.
But the opposite one '--socket-mem=1024,2048' will fail.
To be clear, we need to allocate all required memory at first
from each numa node and then allocate all other available pages
in round-robin fashion. But such solution looks a little ugly.
What do you think?
Best regards, Ilya Maximets.
Bruce Richardson
2017-02-16 13:31:57 UTC
Permalink
Post by Ilya Maximets
Currently EAL allocates hugepages one by one not paying
attention from which NUMA node allocation was done.
Such behaviour leads to allocation failure if number of
available hugepages for application limited by cgroups
or hugetlbfs and memory requested not only from the first
socket.
# 90 x 1GB hugepages availavle in a system
cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
Cannot init memory
This happens beacause all allocated pages are
on socket 0.
Fix this issue by setting mempolicy MPOL_PREFERRED for each
hugepage to one of requested nodes in a round-robin fashion.
In this case all allocated pages will be fairly distributed
between all requested nodes.
New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
introduced and disabled by default because of external
dependency from libnuma.
I think this highlights a general technical problem we need to resolve
in DPDK. If we want to add support for a new feature in DPDK by
leveraging functionality in an existing library, we are caught in a sort
of catch-22:
* If we want to leverage the existing library, we have to have the
feature off-by-default, as we don't want to increase the minimum
requirements for DPDK.
* If we want the feature enabled by default we need to avoid the
dependency, and so reimplement some or all of the functionality inside
DPDK itself. That will be rejected on the basis that it duplicates
existing library functionality.

I suspect the solution to this is more dynamic build-time configuration
to start enabling things based on installed dependencies, but I'm open
to other opinions. I see a gap here, however.

/Bruce
Ilya Maximets
2017-03-06 09:34:42 UTC
Permalink
Hi all.

So, what about this change?

Best regards, Ilya Maximets.
Post by Ilya Maximets
Currently EAL allocates hugepages one by one not paying
attention from which NUMA node allocation was done.
Such behaviour leads to allocation failure if number of
available hugepages for application limited by cgroups
or hugetlbfs and memory requested not only from the first
socket.
# 90 x 1GB hugepages availavle in a system
cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
Cannot init memory
This happens beacause all allocated pages are
on socket 0.
Fix this issue by setting mempolicy MPOL_PREFERRED for each
hugepage to one of requested nodes in a round-robin fashion.
In this case all allocated pages will be fairly distributed
between all requested nodes.
New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
introduced and disabled by default because of external
dependency from libnuma.
Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
---
config/common_base | 1 +
lib/librte_eal/Makefile | 4 ++
lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
mk/rte.app.mk | 3 ++
4 files changed, 74 insertions(+)
Sergio Gonzalez Monroy
2017-03-08 13:46:26 UTC
Permalink
Hi Ilya,

I have done similar tests and as you already pointed out, 'numactl
--interleave' does not seem to work as expected.
I have also checked that the issue can be reproduced with quota limit on
hugetlbfs mount point.

I would be inclined towards *adding libnuma as dependency* to DPDK to
make memory allocation a bit more reliable.

Currently at a high level regarding hugepages per numa node:
1) Try to map all free hugepages. The total number of mapped hugepages
depends if there were any limits, such as cgroups or quota in mount point.
2) Find out numa node of each hugepage.
3) Check if we have enough hugepages for requested memory in each numa
socket/node.

Using libnuma we could try to allocate hugepages per numa:
1) Try to map as many hugepages from numa 0.
2) Check if we have enough hugepages for requested memory in numa 0.
3) Try to map as many hugepages from numa 1.
4) Check if we have enough hugepages for requested memory in numa 1.

This approach would improve failing scenarios caused by limits but It
would still not fix issues regarding non-contiguous hugepages (worst
case each hugepage is a memseg).
The non-contiguous hugepages issues are not as critical now that
mempools can span over multiple memsegs/hugepages, but it is still a
problem for any other library requiring big chunks of memory.

Potentially if we were to add an option such as 'iommu-only' when all
devices are bound to vfio-pci, we could have a reliable way to allocate
hugepages by just requesting the number of pages from each numa.

Thoughts?

Sergio
Post by Ilya Maximets
Hi all.
So, what about this change?
Best regards, Ilya Maximets.
Post by Ilya Maximets
Currently EAL allocates hugepages one by one not paying
attention from which NUMA node allocation was done.
Such behaviour leads to allocation failure if number of
available hugepages for application limited by cgroups
or hugetlbfs and memory requested not only from the first
socket.
# 90 x 1GB hugepages availavle in a system
cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
Cannot init memory
This happens beacause all allocated pages are
on socket 0.
Fix this issue by setting mempolicy MPOL_PREFERRED for each
hugepage to one of requested nodes in a round-robin fashion.
In this case all allocated pages will be fairly distributed
between all requested nodes.
New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
introduced and disabled by default because of external
dependency from libnuma.
Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
---
config/common_base | 1 +
lib/librte_eal/Makefile | 4 ++
lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
mk/rte.app.mk | 3 ++
4 files changed, 74 insertions(+)
Ilya Maximets
2017-03-09 12:57:24 UTC
Permalink
Post by Sergio Gonzalez Monroy
Hi Ilya,
I have done similar tests and as you already pointed out, 'numactl --interleave' does not seem to work as expected.
I have also checked that the issue can be reproduced with quota limit on hugetlbfs mount point.
I would be inclined towards *adding libnuma as dependency* to DPDK to make memory allocation a bit more reliable.
1) Try to map all free hugepages. The total number of mapped hugepages depends if there were any limits, such as cgroups or quota in mount point.
2) Find out numa node of each hugepage.
3) Check if we have enough hugepages for requested memory in each numa socket/node.
1) Try to map as many hugepages from numa 0.
2) Check if we have enough hugepages for requested memory in numa 0.
3) Try to map as many hugepages from numa 1.
4) Check if we have enough hugepages for requested memory in numa 1.
This approach would improve failing scenarios caused by limits but It would still not fix issues regarding non-contiguous hugepages (worst case each hugepage is a memseg).
The non-contiguous hugepages issues are not as critical now that mempools can span over multiple memsegs/hugepages, but it is still a problem for any other library requiring big chunks of memory.
Potentially if we were to add an option such as 'iommu-only' when all devices are bound to vfio-pci, we could have a reliable way to allocate hugepages by just requesting the number of pages from each numa.
Thoughts?
Hi Sergio,

Thanks for your attention to this.

For now, as we have some issues with non-contiguous
hugepages, I'm thinking about following hybrid schema:
1) Allocate essential hugepages:
1.1) Allocate as many hugepages from numa N to
only fit requested memory for this numa.
1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
fashion like in this patch.
3) Sort pages and choose the most suitable.

This solution should decrease number of issues connected with
non-contiguous memory.

Best regards, Ilya Maximets.
Post by Sergio Gonzalez Monroy
Post by Ilya Maximets
Hi all.
So, what about this change?
Best regards, Ilya Maximets.
Post by Ilya Maximets
Currently EAL allocates hugepages one by one not paying
attention from which NUMA node allocation was done.
Such behaviour leads to allocation failure if number of
available hugepages for application limited by cgroups
or hugetlbfs and memory requested not only from the first
socket.
# 90 x 1GB hugepages availavle in a system
cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
Cannot init memory
This happens beacause all allocated pages are
on socket 0.
Fix this issue by setting mempolicy MPOL_PREFERRED for each
hugepage to one of requested nodes in a round-robin fashion.
In this case all allocated pages will be fairly distributed
between all requested nodes.
New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
introduced and disabled by default because of external
dependency from libnuma.
Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
---
config/common_base | 1 +
lib/librte_eal/Makefile | 4 ++
lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
mk/rte.app.mk | 3 ++
4 files changed, 74 insertions(+)
Sergio Gonzalez Monroy
2017-03-27 13:01:59 UTC
Permalink
Post by Ilya Maximets
Post by Sergio Gonzalez Monroy
Hi Ilya,
I have done similar tests and as you already pointed out, 'numactl --interleave' does not seem to work as expected.
I have also checked that the issue can be reproduced with quota limit on hugetlbfs mount point.
I would be inclined towards *adding libnuma as dependency* to DPDK to make memory allocation a bit more reliable.
1) Try to map all free hugepages. The total number of mapped hugepages depends if there were any limits, such as cgroups or quota in mount point.
2) Find out numa node of each hugepage.
3) Check if we have enough hugepages for requested memory in each numa socket/node.
1) Try to map as many hugepages from numa 0.
2) Check if we have enough hugepages for requested memory in numa 0.
3) Try to map as many hugepages from numa 1.
4) Check if we have enough hugepages for requested memory in numa 1.
This approach would improve failing scenarios caused by limits but It would still not fix issues regarding non-contiguous hugepages (worst case each hugepage is a memseg).
The non-contiguous hugepages issues are not as critical now that mempools can span over multiple memsegs/hugepages, but it is still a problem for any other library requiring big chunks of memory.
Potentially if we were to add an option such as 'iommu-only' when all devices are bound to vfio-pci, we could have a reliable way to allocate hugepages by just requesting the number of pages from each numa.
Thoughts?
Hi Sergio,
Thanks for your attention to this.
For now, as we have some issues with non-contiguous
1.1) Allocate as many hugepages from numa N to
only fit requested memory for this numa.
1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
fashion like in this patch.
3) Sort pages and choose the most suitable.
This solution should decrease number of issues connected with
non-contiguous memory.
Sorry for late reply, I was hoping for more comments from the community.

IMHO this should be default behavior, which means no config option and
libnuma as EAL dependency.
I think your proposal is good, could you consider implementing such
approach on next release?

Regards.
Post by Ilya Maximets
Best regards, Ilya Maximets.
Post by Sergio Gonzalez Monroy
Post by Ilya Maximets
Hi all.
So, what about this change?
Best regards, Ilya Maximets.
Post by Ilya Maximets
Currently EAL allocates hugepages one by one not paying
attention from which NUMA node allocation was done.
Such behaviour leads to allocation failure if number of
available hugepages for application limited by cgroups
or hugetlbfs and memory requested not only from the first
socket.
# 90 x 1GB hugepages availavle in a system
cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
Cannot init memory
This happens beacause all allocated pages are
on socket 0.
Fix this issue by setting mempolicy MPOL_PREFERRED for each
hugepage to one of requested nodes in a round-robin fashion.
In this case all allocated pages will be fairly distributed
between all requested nodes.
New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
introduced and disabled by default because of external
dependency from libnuma.
Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
---
config/common_base | 1 +
lib/librte_eal/Makefile | 4 ++
lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
mk/rte.app.mk | 3 ++
4 files changed, 74 insertions(+)
Acked-by: Sergio Gonzalez Monroy <***@intel.com>
Ilya Maximets
2017-03-27 14:43:15 UTC
Permalink
Post by Sergio Gonzalez Monroy
Post by Ilya Maximets
Post by Sergio Gonzalez Monroy
Hi Ilya,
I have done similar tests and as you already pointed out, 'numactl --interleave' does not seem to work as expected.
I have also checked that the issue can be reproduced with quota limit on hugetlbfs mount point.
I would be inclined towards *adding libnuma as dependency* to DPDK to make memory allocation a bit more reliable.
1) Try to map all free hugepages. The total number of mapped hugepages depends if there were any limits, such as cgroups or quota in mount point.
2) Find out numa node of each hugepage.
3) Check if we have enough hugepages for requested memory in each numa socket/node.
1) Try to map as many hugepages from numa 0.
2) Check if we have enough hugepages for requested memory in numa 0.
3) Try to map as many hugepages from numa 1.
4) Check if we have enough hugepages for requested memory in numa 1.
This approach would improve failing scenarios caused by limits but It would still not fix issues regarding non-contiguous hugepages (worst case each hugepage is a memseg).
The non-contiguous hugepages issues are not as critical now that mempools can span over multiple memsegs/hugepages, but it is still a problem for any other library requiring big chunks of memory.
Potentially if we were to add an option such as 'iommu-only' when all devices are bound to vfio-pci, we could have a reliable way to allocate hugepages by just requesting the number of pages from each numa.
Thoughts?
Hi Sergio,
Thanks for your attention to this.
For now, as we have some issues with non-contiguous
1.1) Allocate as many hugepages from numa N to
only fit requested memory for this numa.
1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
fashion like in this patch.
3) Sort pages and choose the most suitable.
This solution should decrease number of issues connected with
non-contiguous memory.
Sorry for late reply, I was hoping for more comments from the community.
IMHO this should be default behavior, which means no config option and libnuma as EAL dependency.
I think your proposal is good, could you consider implementing such approach on next release?
Sure, I can implement this for 17.08 release.
Post by Sergio Gonzalez Monroy
Post by Ilya Maximets
Post by Sergio Gonzalez Monroy
Post by Ilya Maximets
Hi all.
So, what about this change?
Best regards, Ilya Maximets.
Post by Ilya Maximets
Currently EAL allocates hugepages one by one not paying
attention from which NUMA node allocation was done.
Such behaviour leads to allocation failure if number of
available hugepages for application limited by cgroups
or hugetlbfs and memory requested not only from the first
socket.
# 90 x 1GB hugepages availavle in a system
cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
Cannot init memory
This happens beacause all allocated pages are
on socket 0.
Fix this issue by setting mempolicy MPOL_PREFERRED for each
hugepage to one of requested nodes in a round-robin fashion.
In this case all allocated pages will be fairly distributed
between all requested nodes.
New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
introduced and disabled by default because of external
dependency from libnuma.
Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
---
config/common_base | 1 +
lib/librte_eal/Makefile | 4 ++
lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
mk/rte.app.mk | 3 ++
4 files changed, 74 insertions(+)
Thanks.

Best regards, Ilya Maximets.
Ilya Maximets
2017-04-07 15:14:43 UTC
Permalink
Hi All.

I wanted to ask (just to clarify current status):
Will this patch be included in current release (acked by maintainer)
and then I will upgrade it to hybrid logic or I will just prepare v3
with hybrid logic for 17.08 ?

Best regards, Ilya Maximets.
Post by Ilya Maximets
Post by Sergio Gonzalez Monroy
Post by Ilya Maximets
Post by Sergio Gonzalez Monroy
Hi Ilya,
I have done similar tests and as you already pointed out, 'numactl --interleave' does not seem to work as expected.
I have also checked that the issue can be reproduced with quota limit on hugetlbfs mount point.
I would be inclined towards *adding libnuma as dependency* to DPDK to make memory allocation a bit more reliable.
1) Try to map all free hugepages. The total number of mapped hugepages depends if there were any limits, such as cgroups or quota in mount point.
2) Find out numa node of each hugepage.
3) Check if we have enough hugepages for requested memory in each numa socket/node.
1) Try to map as many hugepages from numa 0.
2) Check if we have enough hugepages for requested memory in numa 0.
3) Try to map as many hugepages from numa 1.
4) Check if we have enough hugepages for requested memory in numa 1.
This approach would improve failing scenarios caused by limits but It would still not fix issues regarding non-contiguous hugepages (worst case each hugepage is a memseg).
The non-contiguous hugepages issues are not as critical now that mempools can span over multiple memsegs/hugepages, but it is still a problem for any other library requiring big chunks of memory.
Potentially if we were to add an option such as 'iommu-only' when all devices are bound to vfio-pci, we could have a reliable way to allocate hugepages by just requesting the number of pages from each numa.
Thoughts?
Hi Sergio,
Thanks for your attention to this.
For now, as we have some issues with non-contiguous
1.1) Allocate as many hugepages from numa N to
only fit requested memory for this numa.
1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
fashion like in this patch.
3) Sort pages and choose the most suitable.
This solution should decrease number of issues connected with
non-contiguous memory.
Sorry for late reply, I was hoping for more comments from the community.
IMHO this should be default behavior, which means no config option and libnuma as EAL dependency.
I think your proposal is good, could you consider implementing such approach on next release?
Sure, I can implement this for 17.08 release.
Post by Sergio Gonzalez Monroy
Post by Ilya Maximets
Post by Sergio Gonzalez Monroy
Post by Ilya Maximets
Hi all.
So, what about this change?
Best regards, Ilya Maximets.
Post by Ilya Maximets
Currently EAL allocates hugepages one by one not paying
attention from which NUMA node allocation was done.
Such behaviour leads to allocation failure if number of
available hugepages for application limited by cgroups
or hugetlbfs and memory requested not only from the first
socket.
# 90 x 1GB hugepages availavle in a system
cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
Cannot init memory
This happens beacause all allocated pages are
on socket 0.
Fix this issue by setting mempolicy MPOL_PREFERRED for each
hugepage to one of requested nodes in a round-robin fashion.
In this case all allocated pages will be fairly distributed
between all requested nodes.
New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
introduced and disabled by default because of external
dependency from libnuma.
Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
---
config/common_base | 1 +
lib/librte_eal/Makefile | 4 ++
lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
mk/rte.app.mk | 3 ++
4 files changed, 74 insertions(+)
Thanks.
Best regards, Ilya Maximets.
Thomas Monjalon
2017-04-07 15:44:56 UTC
Permalink
Post by Ilya Maximets
Hi All.
Will this patch be included in current release (acked by maintainer)
and then I will upgrade it to hybrid logic or I will just prepare v3
with hybrid logic for 17.08 ?
What is your preferred option Ilya?
Sergio?
Post by Ilya Maximets
Post by Ilya Maximets
Post by Sergio Gonzalez Monroy
Post by Ilya Maximets
Post by Sergio Gonzalez Monroy
Hi Ilya,
I have done similar tests and as you already pointed out, 'numactl --interleave' does not seem to work as expected.
I have also checked that the issue can be reproduced with quota limit on hugetlbfs mount point.
I would be inclined towards *adding libnuma as dependency* to DPDK to make memory allocation a bit more reliable.
1) Try to map all free hugepages. The total number of mapped hugepages depends if there were any limits, such as cgroups or quota in mount point.
2) Find out numa node of each hugepage.
3) Check if we have enough hugepages for requested memory in each numa socket/node.
1) Try to map as many hugepages from numa 0.
2) Check if we have enough hugepages for requested memory in numa 0.
3) Try to map as many hugepages from numa 1.
4) Check if we have enough hugepages for requested memory in numa 1.
This approach would improve failing scenarios caused by limits but It would still not fix issues regarding non-contiguous hugepages (worst case each hugepage is a memseg).
The non-contiguous hugepages issues are not as critical now that mempools can span over multiple memsegs/hugepages, but it is still a problem for any other library requiring big chunks of memory.
Potentially if we were to add an option such as 'iommu-only' when all devices are bound to vfio-pci, we could have a reliable way to allocate hugepages by just requesting the number of pages from each numa.
Thoughts?
Hi Sergio,
Thanks for your attention to this.
For now, as we have some issues with non-contiguous
1.1) Allocate as many hugepages from numa N to
only fit requested memory for this numa.
1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
fashion like in this patch.
3) Sort pages and choose the most suitable.
This solution should decrease number of issues connected with
non-contiguous memory.
Sorry for late reply, I was hoping for more comments from the community.
IMHO this should be default behavior, which means no config option and libnuma as EAL dependency.
I think your proposal is good, could you consider implementing such approach on next release?
Sure, I can implement this for 17.08 release.
Post by Sergio Gonzalez Monroy
Post by Ilya Maximets
Post by Sergio Gonzalez Monroy
Post by Ilya Maximets
Hi all.
So, what about this change?
Best regards, Ilya Maximets.
Post by Ilya Maximets
Currently EAL allocates hugepages one by one not paying
attention from which NUMA node allocation was done.
Such behaviour leads to allocation failure if number of
available hugepages for application limited by cgroups
or hugetlbfs and memory requested not only from the first
socket.
# 90 x 1GB hugepages availavle in a system
cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
Cannot init memory
This happens beacause all allocated pages are
on socket 0.
Fix this issue by setting mempolicy MPOL_PREFERRED for each
hugepage to one of requested nodes in a round-robin fashion.
In this case all allocated pages will be fairly distributed
between all requested nodes.
New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
introduced and disabled by default because of external
dependency from libnuma.
Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
---
config/common_base | 1 +
lib/librte_eal/Makefile | 4 ++
lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
mk/rte.app.mk | 3 ++
4 files changed, 74 insertions(+)
Thanks.
Best regards, Ilya Maximets.
Ilya Maximets
2017-04-10 07:11:39 UTC
Permalink
Post by Thomas Monjalon
Post by Ilya Maximets
Hi All.
Will this patch be included in current release (acked by maintainer)
and then I will upgrade it to hybrid logic or I will just prepare v3
with hybrid logic for 17.08 ?
What is your preferred option Ilya?
I have no strong opinion on this. One thought is that it could be
nice if someone else could test this functionality with current
release before enabling it by default in 17.08.

Tomorrow I'm going on vacation. So I'll post rebased version today
(there are few fuzzes with current master) and you with Sergio may
decide what to do.

Best regards, Ilya Maximets.
Post by Thomas Monjalon
Sergio?
Post by Ilya Maximets
Post by Ilya Maximets
Post by Sergio Gonzalez Monroy
Post by Ilya Maximets
Post by Sergio Gonzalez Monroy
Hi Ilya,
I have done similar tests and as you already pointed out, 'numactl --interleave' does not seem to work as expected.
I have also checked that the issue can be reproduced with quota limit on hugetlbfs mount point.
I would be inclined towards *adding libnuma as dependency* to DPDK to make memory allocation a bit more reliable.
1) Try to map all free hugepages. The total number of mapped hugepages depends if there were any limits, such as cgroups or quota in mount point.
2) Find out numa node of each hugepage.
3) Check if we have enough hugepages for requested memory in each numa socket/node.
1) Try to map as many hugepages from numa 0.
2) Check if we have enough hugepages for requested memory in numa 0.
3) Try to map as many hugepages from numa 1.
4) Check if we have enough hugepages for requested memory in numa 1.
This approach would improve failing scenarios caused by limits but It would still not fix issues regarding non-contiguous hugepages (worst case each hugepage is a memseg).
The non-contiguous hugepages issues are not as critical now that mempools can span over multiple memsegs/hugepages, but it is still a problem for any other library requiring big chunks of memory.
Potentially if we were to add an option such as 'iommu-only' when all devices are bound to vfio-pci, we could have a reliable way to allocate hugepages by just requesting the number of pages from each numa.
Thoughts?
Hi Sergio,
Thanks for your attention to this.
For now, as we have some issues with non-contiguous
1.1) Allocate as many hugepages from numa N to
only fit requested memory for this numa.
1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
fashion like in this patch.
3) Sort pages and choose the most suitable.
This solution should decrease number of issues connected with
non-contiguous memory.
Sorry for late reply, I was hoping for more comments from the community.
IMHO this should be default behavior, which means no config option and libnuma as EAL dependency.
I think your proposal is good, could you consider implementing such approach on next release?
Sure, I can implement this for 17.08 release.
Post by Sergio Gonzalez Monroy
Post by Ilya Maximets
Post by Sergio Gonzalez Monroy
Post by Ilya Maximets
Hi all.
So, what about this change?
Best regards, Ilya Maximets.
Post by Ilya Maximets
Currently EAL allocates hugepages one by one not paying
attention from which NUMA node allocation was done.
Such behaviour leads to allocation failure if number of
available hugepages for application limited by cgroups
or hugetlbfs and memory requested not only from the first
socket.
# 90 x 1GB hugepages availavle in a system
cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
Cannot init memory
This happens beacause all allocated pages are
on socket 0.
Fix this issue by setting mempolicy MPOL_PREFERRED for each
hugepage to one of requested nodes in a round-robin fashion.
In this case all allocated pages will be fairly distributed
between all requested nodes.
New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
introduced and disabled by default because of external
dependency from libnuma.
Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
---
config/common_base | 1 +
lib/librte_eal/Makefile | 4 ++
lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
mk/rte.app.mk | 3 ++
4 files changed, 74 insertions(+)
Thanks.
Best regards, Ilya Maximets.
Sergio Gonzalez Monroy
2017-04-10 07:51:52 UTC
Permalink
Post by Ilya Maximets
Post by Thomas Monjalon
Post by Ilya Maximets
Hi All.
Will this patch be included in current release (acked by maintainer)
and then I will upgrade it to hybrid logic or I will just prepare v3
with hybrid logic for 17.08 ?
What is your preferred option Ilya?
I have no strong opinion on this. One thought is that it could be
nice if someone else could test this functionality with current
release before enabling it by default in 17.08.
Tomorrow I'm going on vacation. So I'll post rebased version today
(there are few fuzzes with current master) and you with Sergio may
decide what to do.
Best regards, Ilya Maximets.
Post by Thomas Monjalon
Sergio?
I would be inclined towards v3 targeting v17.08. IMHO it would be more
clean this way.

Sergio
Post by Ilya Maximets
Post by Thomas Monjalon
Post by Ilya Maximets
Post by Ilya Maximets
Post by Sergio Gonzalez Monroy
Post by Ilya Maximets
Post by Sergio Gonzalez Monroy
Hi Ilya,
I have done similar tests and as you already pointed out, 'numactl --interleave' does not seem to work as expected.
I have also checked that the issue can be reproduced with quota limit on hugetlbfs mount point.
I would be inclined towards *adding libnuma as dependency* to DPDK to make memory allocation a bit more reliable.
1) Try to map all free hugepages. The total number of mapped hugepages depends if there were any limits, such as cgroups or quota in mount point.
2) Find out numa node of each hugepage.
3) Check if we have enough hugepages for requested memory in each numa socket/node.
1) Try to map as many hugepages from numa 0.
2) Check if we have enough hugepages for requested memory in numa 0.
3) Try to map as many hugepages from numa 1.
4) Check if we have enough hugepages for requested memory in numa 1.
This approach would improve failing scenarios caused by limits but It would still not fix issues regarding non-contiguous hugepages (worst case each hugepage is a memseg).
The non-contiguous hugepages issues are not as critical now that mempools can span over multiple memsegs/hugepages, but it is still a problem for any other library requiring big chunks of memory.
Potentially if we were to add an option such as 'iommu-only' when all devices are bound to vfio-pci, we could have a reliable way to allocate hugepages by just requesting the number of pages from each numa.
Thoughts?
Hi Sergio,
Thanks for your attention to this.
For now, as we have some issues with non-contiguous
1.1) Allocate as many hugepages from numa N to
only fit requested memory for this numa.
1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
fashion like in this patch.
3) Sort pages and choose the most suitable.
This solution should decrease number of issues connected with
non-contiguous memory.
Sorry for late reply, I was hoping for more comments from the community.
IMHO this should be default behavior, which means no config option and libnuma as EAL dependency.
I think your proposal is good, could you consider implementing such approach on next release?
Sure, I can implement this for 17.08 release.
Post by Sergio Gonzalez Monroy
Post by Ilya Maximets
Post by Sergio Gonzalez Monroy
Post by Ilya Maximets
Hi all.
So, what about this change?
Best regards, Ilya Maximets.
Post by Ilya Maximets
Currently EAL allocates hugepages one by one not paying
attention from which NUMA node allocation was done.
Such behaviour leads to allocation failure if number of
available hugepages for application limited by cgroups
or hugetlbfs and memory requested not only from the first
socket.
# 90 x 1GB hugepages availavle in a system
cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
Cannot init memory
This happens beacause all allocated pages are
on socket 0.
Fix this issue by setting mempolicy MPOL_PREFERRED for each
hugepage to one of requested nodes in a round-robin fashion.
In this case all allocated pages will be fairly distributed
between all requested nodes.
New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
introduced and disabled by default because of external
dependency from libnuma.
Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
---
config/common_base | 1 +
lib/librte_eal/Makefile | 4 ++
lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
mk/rte.app.mk | 3 ++
4 files changed, 74 insertions(+)
Thanks.
Best regards, Ilya Maximets.
Ilya Maximets
2017-04-10 08:05:56 UTC
Permalink
Post by Ilya Maximets
Post by Thomas Monjalon
Post by Ilya Maximets
Hi All.
Will this patch be included in current release (acked by maintainer)
and then I will upgrade it to hybrid logic or I will just prepare v3
with hybrid logic for 17.08 ?
What is your preferred option Ilya?
I have no strong opinion on this. One thought is that it could be
nice if someone else could test this functionality with current
release before enabling it by default in 17.08.
Tomorrow I'm going on vacation. So I'll post rebased version today
(there are few fuzzes with current master) and you with Sergio may
decide what to do.
Best regards, Ilya Maximets.
Post by Thomas Monjalon
Sergio?
I would be inclined towards v3 targeting v17.08. IMHO it would be more clean this way.
OK.
I've sent rebased version just in case.
Sergio
Post by Ilya Maximets
Post by Thomas Monjalon
Post by Ilya Maximets
Post by Ilya Maximets
Post by Sergio Gonzalez Monroy
Post by Ilya Maximets
Post by Sergio Gonzalez Monroy
Hi Ilya,
I have done similar tests and as you already pointed out, 'numactl --interleave' does not seem to work as expected.
I have also checked that the issue can be reproduced with quota limit on hugetlbfs mount point.
I would be inclined towards *adding libnuma as dependency* to DPDK to make memory allocation a bit more reliable.
1) Try to map all free hugepages. The total number of mapped hugepages depends if there were any limits, such as cgroups or quota in mount point.
2) Find out numa node of each hugepage.
3) Check if we have enough hugepages for requested memory in each numa socket/node.
1) Try to map as many hugepages from numa 0.
2) Check if we have enough hugepages for requested memory in numa 0.
3) Try to map as many hugepages from numa 1.
4) Check if we have enough hugepages for requested memory in numa 1.
This approach would improve failing scenarios caused by limits but It would still not fix issues regarding non-contiguous hugepages (worst case each hugepage is a memseg).
The non-contiguous hugepages issues are not as critical now that mempools can span over multiple memsegs/hugepages, but it is still a problem for any other library requiring big chunks of memory.
Potentially if we were to add an option such as 'iommu-only' when all devices are bound to vfio-pci, we could have a reliable way to allocate hugepages by just requesting the number of pages from each numa.
Thoughts?
Hi Sergio,
Thanks for your attention to this.
For now, as we have some issues with non-contiguous
1.1) Allocate as many hugepages from numa N to
only fit requested memory for this numa.
1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
fashion like in this patch.
3) Sort pages and choose the most suitable.
This solution should decrease number of issues connected with
non-contiguous memory.
Sorry for late reply, I was hoping for more comments from the community.
IMHO this should be default behavior, which means no config option and libnuma as EAL dependency.
I think your proposal is good, could you consider implementing such approach on next release?
Sure, I can implement this for 17.08 release.
Post by Sergio Gonzalez Monroy
Post by Ilya Maximets
Post by Sergio Gonzalez Monroy
Post by Ilya Maximets
Hi all.
So, what about this change?
Best regards, Ilya Maximets.
Post by Ilya Maximets
Currently EAL allocates hugepages one by one not paying
attention from which NUMA node allocation was done.
Such behaviour leads to allocation failure if number of
available hugepages for application limited by cgroups
or hugetlbfs and memory requested not only from the first
socket.
# 90 x 1GB hugepages availavle in a system
cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
Cannot init memory
This happens beacause all allocated pages are
on socket 0.
Fix this issue by setting mempolicy MPOL_PREFERRED for each
hugepage to one of requested nodes in a round-robin fashion.
In this case all allocated pages will be fairly distributed
between all requested nodes.
New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
introduced and disabled by default because of external
dependency from libnuma.
Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
---
config/common_base | 1 +
lib/librte_eal/Makefile | 4 ++
lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
mk/rte.app.mk | 3 ++
4 files changed, 74 insertions(+)
Thanks.
Best regards, Ilya Maximets.
Ilya Maximets
2017-04-10 08:04:19 UTC
Permalink
Currently EAL allocates hugepages one by one not paying
attention from which NUMA node allocation was done.

Such behaviour leads to allocation failure if number of
available hugepages for application limited by cgroups
or hugetlbfs and memory requested not only from the first
socket.

Example:
# 90 x 1GB hugepages availavle in a system

cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...

EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
PANIC in rte_eal_init():
Cannot init memory

This happens beacause all allocated pages are
on socket 0.

Fix this issue by setting mempolicy MPOL_PREFERRED for each
hugepage to one of requested nodes in a round-robin fashion.
In this case all allocated pages will be fairly distributed
between all requested nodes.

New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
introduced and disabled by default because of external
dependency from libnuma.

Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")

Signed-off-by: Ilya Maximets <***@samsung.com>
---

Version 2:
* rebased (fuzz in Makefile)

config/common_base | 1 +
lib/librte_eal/Makefile | 4 ++
lib/librte_eal/linuxapp/eal/eal_memory.c | 65 ++++++++++++++++++++++++++++++++
mk/rte.app.mk | 3 ++
4 files changed, 73 insertions(+)

diff --git a/config/common_base b/config/common_base
index 5f2ad94..09782ff 100644
--- a/config/common_base
+++ b/config/common_base
@@ -102,6 +102,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=n
CONFIG_RTE_MALLOC_DEBUG=n
+CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n

# Default driver path (or "" to disable)
CONFIG_RTE_EAL_PMD_PATH=""
diff --git a/lib/librte_eal/Makefile b/lib/librte_eal/Makefile
index 5690bb4..e5f552a 100644
--- a/lib/librte_eal/Makefile
+++ b/lib/librte_eal/Makefile
@@ -37,4 +37,8 @@ DEPDIRS-linuxapp := common
DIRS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += bsdapp
DEPDIRS-bsdapp := common

+ifeq ($(CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+LDLIBS += -lnuma
+endif
+
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index 657c6f4..8cb7432 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -83,6 +83,9 @@
#include <sys/time.h>
#include <signal.h>
#include <setjmp.h>
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numaif.h>
+#endif

#include <rte_log.h>
#include <rte_memory.h>
@@ -377,6 +380,21 @@ static int huge_wrap_sigsetjmp(void)
return sigsetjmp(huge_jmpenv, 1);
}

+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+#ifndef ULONG_SIZE
+#define ULONG_SIZE sizeof(unsigned long)
+#endif
+#ifndef ULONG_BITS
+#define ULONG_BITS (ULONG_SIZE * CHAR_BIT)
+#endif
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, ULONG_SIZE)
+#endif
+#endif
+
/*
* Mmap all hugepages of hugepage table: it first open a file in
* hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -393,10 +411,48 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
void *virtaddr;
void *vma_addr = NULL;
size_t vma_len = 0;
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+ unsigned long nodemask[BITS_TO_LONGS(RTE_MAX_NUMA_NODES)] = {0UL};
+ unsigned long maxnode = 0;
+ int node_id = -1;
+
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ if (internal_config.socket_mem[i])
+ maxnode = i + 1;
+#endif

for (i = 0; i < hpi->num_pages[0]; i++) {
uint64_t hugepage_sz = hpi->hugepage_sz;

+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ node_id = (node_id + 1) % RTE_MAX_NUMA_NODES;
+ while (!internal_config.socket_mem[node_id])
+ node_id = (node_id + 1) % RTE_MAX_NUMA_NODES;
+
+ nodemask[node_id / ULONG_BITS] =
+ 1UL << (node_id % ULONG_BITS);
+
+ RTE_LOG(DEBUG, EAL,
+ "Setting policy MPOL_PREFERRED for socket %d\n",
+ node_id);
+ /*
+ * Due to old linux kernel bug (feature?) we have to
+ * increase maxnode by 1. It will be unconditionally
+ * decreased back to normal value inside the syscall
+ * handler.
+ */
+ if (set_mempolicy(MPOL_PREFERRED,
+ nodemask, maxnode + 1) < 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to set policy MPOL_PREFERRED: "
+ "%s\n", strerror(errno));
+ return i;
+ }
+
+ nodemask[node_id / ULONG_BITS] = 0UL;
+ }
+#endif
if (orig) {
hugepg_tbl[i].file_id = i;
hugepg_tbl[i].size = hugepage_sz;
@@ -507,6 +563,10 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
vma_len -= hugepage_sz;
}

+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode && set_mempolicy(MPOL_DEFAULT, NULL, 0) < 0)
+ RTE_LOG(ERR, EAL, "Failed to set mempolicy MPOL_DEFAULT\n");
+#endif
return i;
}

@@ -591,6 +651,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
if (hugepg_tbl[i].orig_va == va) {
hugepg_tbl[i].socket_id = socket_id;
hp_count++;
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+ RTE_LOG(DEBUG, EAL,
+ "Hugepage %s is on socket %d\n",
+ hugepg_tbl[i].filepath, socket_id);
+#endif
}
}
}
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 4c659e9..ca8e5fe 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -173,6 +173,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
# The static libraries do not know their dependencies.
# So linking with static library requires explicit dependencies.
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrt
+ifeq ($(CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lnuma
+endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lm
--
2.7.4
Thomas Monjalon
2017-04-10 10:03:50 UTC
Permalink
Post by Ilya Maximets
Currently EAL allocates hugepages one by one not paying
attention from which NUMA node allocation was done.
Such behaviour leads to allocation failure if number of
available hugepages for application limited by cgroups
or hugetlbfs and memory requested not only from the first
socket.
# 90 x 1GB hugepages availavle in a system
cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
Cannot init memory
This happens beacause all allocated pages are
on socket 0.
Fix this issue by setting mempolicy MPOL_PREFERRED for each
hugepage to one of requested nodes in a round-robin fashion.
In this case all allocated pages will be fairly distributed
between all requested nodes.
New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
introduced and disabled by default because of external
dependency from libnuma.
Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
Status: Changes Requested
per Sergio advice: "I would be inclined towards v3 targeting v17.08."
Ilya Maximets
2017-06-06 06:22:16 UTC
Permalink
Version 3:
* Implemented hybrid schema for allocation.
* Fixed not needed mempolicy change while remapping. (orig = 0)
* Added patch to enable VHOST_NUMA by default.

Version 2:
* rebased (fuzz in Makefile)

Ilya Maximets (2):
mem: balanced allocation of hugepages
config: enable vhost numa awareness by default

config/common_base | 2 +-
lib/librte_eal/Makefile | 2 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 87 ++++++++++++++++++++++++++++++--
mk/rte.app.mk | 1 +
4 files changed, 87 insertions(+), 5 deletions(-)
--
2.7.4
Ilya Maximets
2017-06-06 06:22:17 UTC
Permalink
Currently EAL allocates hugepages one by one not paying attention
from which NUMA node allocation was done.

Such behaviour leads to allocation failure if number of available
hugepages for application limited by cgroups or hugetlbfs and
memory requested not only from the first socket.

Example:
# 90 x 1GB hugepages availavle in a system

cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...

EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
PANIC in rte_eal_init():
Cannot init memory

This happens beacause all allocated pages are
on socket 0.

Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
to one of requested nodes using following schema:

1) Allocate essential hugepages:
1.1) Allocate as many hugepages from numa N to
only fit requested memory for this numa.
1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
fashion.
3) Sort pages and choose the most suitable.

In this case all essential memory will be allocated and all remaining
pages will be fairly distributed between all requested nodes.

libnuma added as a general dependency for EAL.

Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")

Signed-off-by: Ilya Maximets <***@samsung.com>
---
lib/librte_eal/Makefile | 2 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 87 ++++++++++++++++++++++++++++++--
mk/rte.app.mk | 1 +
3 files changed, 86 insertions(+), 4 deletions(-)

diff --git a/lib/librte_eal/Makefile b/lib/librte_eal/Makefile
index 5690bb4..0a1af3a 100644
--- a/lib/librte_eal/Makefile
+++ b/lib/librte_eal/Makefile
@@ -37,4 +37,6 @@ DEPDIRS-linuxapp := common
DIRS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += bsdapp
DEPDIRS-bsdapp := common

+LDLIBS += -lnuma
+
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index 9c9baf6..35e5bce 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -54,6 +54,7 @@
#include <sys/time.h>
#include <signal.h>
#include <setjmp.h>
+#include <numaif.h>

#include <rte_log.h>
#include <rte_memory.h>
@@ -358,6 +359,19 @@ static int huge_wrap_sigsetjmp(void)
return sigsetjmp(huge_jmpenv, 1);
}

+#ifndef ULONG_SIZE
+#define ULONG_SIZE sizeof(unsigned long)
+#endif
+#ifndef ULONG_BITS
+#define ULONG_BITS (ULONG_SIZE * CHAR_BIT)
+#endif
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, ULONG_SIZE)
+#endif
+
/*
* Mmap all hugepages of hugepage table: it first open a file in
* hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -366,18 +380,71 @@ static int huge_wrap_sigsetjmp(void)
* map continguous physical blocks in contiguous virtual blocks.
*/
static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
- struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+ uint64_t *essential_memory, int orig)
{
int fd;
unsigned i;
void *virtaddr;
void *vma_addr = NULL;
size_t vma_len = 0;
+ unsigned long nodemask[BITS_TO_LONGS(RTE_MAX_NUMA_NODES)] = {0UL};
+ unsigned long maxnode = 0;
+ int node_id = -1;
+
+ if (orig) {
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ if (internal_config.socket_mem[i])
+ maxnode = i + 1;
+ }

for (i = 0; i < hpi->num_pages[0]; i++) {
uint64_t hugepage_sz = hpi->hugepage_sz;

+ if (maxnode) {
+ unsigned int j;
+
+ for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
+ if (essential_memory[j])
+ break;
+
+ if (j == RTE_MAX_NUMA_NODES) {
+ node_id = (node_id + 1) % RTE_MAX_NUMA_NODES;
+ while (!internal_config.socket_mem[node_id]) {
+ node_id++;
+ node_id %= RTE_MAX_NUMA_NODES;
+ }
+ } else {
+ node_id = j;
+ if (essential_memory[j] < hugepage_sz)
+ essential_memory[j] = 0;
+ else
+ essential_memory[j] -= hugepage_sz;
+ }
+
+ nodemask[node_id / ULONG_BITS] =
+ 1UL << (node_id % ULONG_BITS);
+
+ RTE_LOG(DEBUG, EAL,
+ "Setting policy MPOL_PREFERRED for socket %d\n",
+ node_id);
+ /*
+ * Due to old linux kernel bug (feature?) we have to
+ * increase maxnode by 1. It will be unconditionally
+ * decreased back to normal value inside the syscall
+ * handler.
+ */
+ if (set_mempolicy(MPOL_PREFERRED,
+ nodemask, maxnode + 1) < 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to set policy MPOL_PREFERRED: "
+ "%s\n", strerror(errno));
+ return i;
+ }
+
+ nodemask[node_id / ULONG_BITS] = 0UL;
+ }
+
if (orig) {
hugepg_tbl[i].file_id = i;
hugepg_tbl[i].size = hugepage_sz;
@@ -488,6 +555,9 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
vma_len -= hugepage_sz;
}

+ if (maxnode && set_mempolicy(MPOL_DEFAULT, NULL, 0) < 0)
+ RTE_LOG(ERR, EAL, "Failed to set mempolicy MPOL_DEFAULT\n");
+
return i;
}

@@ -572,6 +642,9 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
if (hugepg_tbl[i].orig_va == va) {
hugepg_tbl[i].socket_id = socket_id;
hp_count++;
+ RTE_LOG(DEBUG, EAL,
+ "Hugepage %s is on socket %d\n",
+ hugepg_tbl[i].filepath, socket_id);
}
}
}
@@ -1010,6 +1083,11 @@ rte_eal_hugepage_init(void)

huge_register_sigbus();

+ /* make a copy of socket_mem, needed for balanced allocation. */
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ memory[i] = internal_config.socket_mem[i];
+
+
/* map all hugepages and sort them */
for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
unsigned pages_old, pages_new;
@@ -1027,7 +1105,8 @@ rte_eal_hugepage_init(void)

/* map all hugepages available */
pages_old = hpi->num_pages[0];
- pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+ pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+ memory, 1);
if (pages_new < pages_old) {
RTE_LOG(DEBUG, EAL,
"%d not %d hugepages of size %u MB allocated\n",
@@ -1070,7 +1149,7 @@ rte_eal_hugepage_init(void)
sizeof(struct hugepage_file), cmp_physaddr);

/* remap all hugepages */
- if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+ if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
hpi->num_pages[0]) {
RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
(unsigned)(hpi->hugepage_sz / 0x100000));
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b3..b208e88 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -186,6 +186,7 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
# The static libraries do not know their dependencies.
# So linking with static library requires explicit dependencies.
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrt
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lnuma
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lm
--
2.7.4
Ilya Maximets
2017-06-06 06:22:18 UTC
Permalink
Since libnuma is added as a general dependency for EAL,
it is safe to enable LIBRTE_VHOST_NUMA by default.

Signed-off-by: Ilya Maximets <***@samsung.com>
---
config/common_base | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/config/common_base b/config/common_base
index c858769..db4cc1c 100644
--- a/config/common_base
+++ b/config/common_base
@@ -708,7 +708,7 @@ CONFIG_RTE_LIBRTE_PDUMP=y
# Compile vhost user library
#
CONFIG_RTE_LIBRTE_VHOST=n
-CONFIG_RTE_LIBRTE_VHOST_NUMA=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y
CONFIG_RTE_LIBRTE_VHOST_DEBUG=n

#
--
2.7.4
Ilya Maximets
2017-06-06 08:13:50 UTC
Permalink
Version 4:
* Fixed work on systems without NUMA by adding check for NUMA
support in kernel.

Version 3:
* Implemented hybrid schema for allocation.
* Fixed not needed mempolicy change while remapping. (orig = 0)
* Added patch to enable VHOST_NUMA by default.

Version 2:
* rebased (fuzz in Makefile)

Ilya Maximets (2):
mem: balanced allocation of hugepages
config: enable vhost numa awareness by default

config/common_base | 2 +-
lib/librte_eal/Makefile | 2 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 94 ++++++++++++++++++++++++++++++--
mk/rte.app.mk | 1 +
4 files changed, 94 insertions(+), 5 deletions(-)
--
2.7.4
Ilya Maximets
2017-06-06 08:13:51 UTC
Permalink
Currently EAL allocates hugepages one by one not paying attention
from which NUMA node allocation was done.

Such behaviour leads to allocation failure if number of available
hugepages for application limited by cgroups or hugetlbfs and
memory requested not only from the first socket.

Example:
# 90 x 1GB hugepages availavle in a system

cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...

EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
PANIC in rte_eal_init():
Cannot init memory

This happens beacause all allocated pages are
on socket 0.

Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
to one of requested nodes using following schema:

1) Allocate essential hugepages:
1.1) Allocate as many hugepages from numa N to
only fit requested memory for this numa.
1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
fashion.
3) Sort pages and choose the most suitable.

In this case all essential memory will be allocated and all remaining
pages will be fairly distributed between all requested nodes.

libnuma added as a general dependency for EAL.

Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")

Signed-off-by: Ilya Maximets <***@samsung.com>
---
lib/librte_eal/Makefile | 2 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 94 ++++++++++++++++++++++++++++++--
mk/rte.app.mk | 1 +
3 files changed, 93 insertions(+), 4 deletions(-)

diff --git a/lib/librte_eal/Makefile b/lib/librte_eal/Makefile
index 5690bb4..0a1af3a 100644
--- a/lib/librte_eal/Makefile
+++ b/lib/librte_eal/Makefile
@@ -37,4 +37,6 @@ DEPDIRS-linuxapp := common
DIRS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += bsdapp
DEPDIRS-bsdapp := common

+LDLIBS += -lnuma
+
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index 9c9baf6..5947434 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -54,6 +54,7 @@
#include <sys/time.h>
#include <signal.h>
#include <setjmp.h>
+#include <numaif.h>

#include <rte_log.h>
#include <rte_memory.h>
@@ -358,6 +359,19 @@ static int huge_wrap_sigsetjmp(void)
return sigsetjmp(huge_jmpenv, 1);
}

+#ifndef ULONG_SIZE
+#define ULONG_SIZE sizeof(unsigned long)
+#endif
+#ifndef ULONG_BITS
+#define ULONG_BITS (ULONG_SIZE * CHAR_BIT)
+#endif
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, ULONG_SIZE)
+#endif
+
/*
* Mmap all hugepages of hugepage table: it first open a file in
* hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -366,18 +380,78 @@ static int huge_wrap_sigsetjmp(void)
* map continguous physical blocks in contiguous virtual blocks.
*/
static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
- struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+ uint64_t *essential_memory, int orig)
{
int fd;
unsigned i;
void *virtaddr;
void *vma_addr = NULL;
size_t vma_len = 0;
+ unsigned long nodemask[BITS_TO_LONGS(RTE_MAX_NUMA_NODES)] = {0UL};
+ unsigned long maxnode = 0;
+ int node_id = -1;
+ bool numa_available = true;
+
+ /* Check if kernel supports NUMA. */
+ if (get_mempolicy(NULL, NULL, 0, 0, 0) < 0 && errno == ENOSYS) {
+ RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+ numa_available = false;
+ }
+
+ if (orig && numa_available) {
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ if (internal_config.socket_mem[i])
+ maxnode = i + 1;
+ }

for (i = 0; i < hpi->num_pages[0]; i++) {
uint64_t hugepage_sz = hpi->hugepage_sz;

+ if (maxnode) {
+ unsigned int j;
+
+ for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
+ if (essential_memory[j])
+ break;
+
+ if (j == RTE_MAX_NUMA_NODES) {
+ node_id = (node_id + 1) % RTE_MAX_NUMA_NODES;
+ while (!internal_config.socket_mem[node_id]) {
+ node_id++;
+ node_id %= RTE_MAX_NUMA_NODES;
+ }
+ } else {
+ node_id = j;
+ if (essential_memory[j] < hugepage_sz)
+ essential_memory[j] = 0;
+ else
+ essential_memory[j] -= hugepage_sz;
+ }
+
+ nodemask[node_id / ULONG_BITS] =
+ 1UL << (node_id % ULONG_BITS);
+
+ RTE_LOG(DEBUG, EAL,
+ "Setting policy MPOL_PREFERRED for socket %d\n",
+ node_id);
+ /*
+ * Due to old linux kernel bug (feature?) we have to
+ * increase maxnode by 1. It will be unconditionally
+ * decreased back to normal value inside the syscall
+ * handler.
+ */
+ if (set_mempolicy(MPOL_PREFERRED,
+ nodemask, maxnode + 1) < 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to set policy MPOL_PREFERRED: "
+ "%s\n", strerror(errno));
+ return i;
+ }
+
+ nodemask[node_id / ULONG_BITS] = 0UL;
+ }
+
if (orig) {
hugepg_tbl[i].file_id = i;
hugepg_tbl[i].size = hugepage_sz;
@@ -488,6 +562,9 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
vma_len -= hugepage_sz;
}

+ if (maxnode && set_mempolicy(MPOL_DEFAULT, NULL, 0) < 0)
+ RTE_LOG(ERR, EAL, "Failed to set mempolicy MPOL_DEFAULT\n");
+
return i;
}

@@ -572,6 +649,9 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
if (hugepg_tbl[i].orig_va == va) {
hugepg_tbl[i].socket_id = socket_id;
hp_count++;
+ RTE_LOG(DEBUG, EAL,
+ "Hugepage %s is on socket %d\n",
+ hugepg_tbl[i].filepath, socket_id);
}
}
}
@@ -1010,6 +1090,11 @@ rte_eal_hugepage_init(void)

huge_register_sigbus();

+ /* make a copy of socket_mem, needed for balanced allocation. */
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ memory[i] = internal_config.socket_mem[i];
+
+
/* map all hugepages and sort them */
for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
unsigned pages_old, pages_new;
@@ -1027,7 +1112,8 @@ rte_eal_hugepage_init(void)

/* map all hugepages available */
pages_old = hpi->num_pages[0];
- pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+ pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+ memory, 1);
if (pages_new < pages_old) {
RTE_LOG(DEBUG, EAL,
"%d not %d hugepages of size %u MB allocated\n",
@@ -1070,7 +1156,7 @@ rte_eal_hugepage_init(void)
sizeof(struct hugepage_file), cmp_physaddr);

/* remap all hugepages */
- if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+ if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
hpi->num_pages[0]) {
RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
(unsigned)(hpi->hugepage_sz / 0x100000));
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b3..b208e88 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -186,6 +186,7 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
# The static libraries do not know their dependencies.
# So linking with static library requires explicit dependencies.
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrt
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lnuma
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lm
--
2.7.4
Ilya Maximets
2017-06-06 08:13:52 UTC
Permalink
Since libnuma is added as a general dependency for EAL,
it is safe to enable LIBRTE_VHOST_NUMA by default.

Signed-off-by: Ilya Maximets <***@samsung.com>
---
config/common_base | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/config/common_base b/config/common_base
index c858769..db4cc1c 100644
--- a/config/common_base
+++ b/config/common_base
@@ -708,7 +708,7 @@ CONFIG_RTE_LIBRTE_PDUMP=y
# Compile vhost user library
#
CONFIG_RTE_LIBRTE_VHOST=n
-CONFIG_RTE_LIBRTE_VHOST_NUMA=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y
CONFIG_RTE_LIBRTE_VHOST_DEBUG=n

#
--
2.7.4
Ilya Maximets
2017-06-06 13:33:38 UTC
Permalink
Sorry for so frequent respinning of the series.

Version 5:
* Fixed shared build. (Automated build test will fail
anyway because libnuma-devel not installed on build servers)

Version 4:
* Fixed work on systems without NUMA by adding check for NUMA
support in kernel.

Version 3:
* Implemented hybrid schema for allocation.
* Fixed not needed mempolicy change while remapping. (orig = 0)
* Added patch to enable VHOST_NUMA by default.

Version 2:
* rebased (fuzz in Makefile)

Ilya Maximets (2):
mem: balanced allocation of hugepages
config: enable vhost numa awareness by default

config/common_base | 2 +-
lib/librte_eal/linuxapp/eal/Makefile | 1 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 94 ++++++++++++++++++++++++++++++--
mk/rte.app.mk | 3 +
4 files changed, 95 insertions(+), 5 deletions(-)
--
2.7.4
Ilya Maximets
2017-06-06 13:33:39 UTC
Permalink
Currently EAL allocates hugepages one by one not paying attention
from which NUMA node allocation was done.

Such behaviour leads to allocation failure if number of available
hugepages for application limited by cgroups or hugetlbfs and
memory requested not only from the first socket.

Example:
# 90 x 1GB hugepages availavle in a system

cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...

EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
PANIC in rte_eal_init():
Cannot init memory

This happens beacause all allocated pages are
on socket 0.

Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
to one of requested nodes using following schema:

1) Allocate essential hugepages:
1.1) Allocate as many hugepages from numa N to
only fit requested memory for this numa.
1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
fashion.
3) Sort pages and choose the most suitable.

In this case all essential memory will be allocated and all remaining
pages will be fairly distributed between all requested nodes.

libnuma added as a general dependency for EAL.

Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")

Signed-off-by: Ilya Maximets <***@samsung.com>
---
lib/librte_eal/linuxapp/eal/Makefile | 1 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 94 ++++++++++++++++++++++++++++++--
mk/rte.app.mk | 3 +
3 files changed, 94 insertions(+), 4 deletions(-)

diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index 640afd0..1440fc5 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -50,6 +50,7 @@ LDLIBS += -ldl
LDLIBS += -lpthread
LDLIBS += -lgcc_s
LDLIBS += -lrt
+LDLIBS += -lnuma

# specific to linuxapp exec-env
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index 9c9baf6..5947434 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -54,6 +54,7 @@
#include <sys/time.h>
#include <signal.h>
#include <setjmp.h>
+#include <numaif.h>

#include <rte_log.h>
#include <rte_memory.h>
@@ -358,6 +359,19 @@ static int huge_wrap_sigsetjmp(void)
return sigsetjmp(huge_jmpenv, 1);
}

+#ifndef ULONG_SIZE
+#define ULONG_SIZE sizeof(unsigned long)
+#endif
+#ifndef ULONG_BITS
+#define ULONG_BITS (ULONG_SIZE * CHAR_BIT)
+#endif
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, ULONG_SIZE)
+#endif
+
/*
* Mmap all hugepages of hugepage table: it first open a file in
* hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -366,18 +380,78 @@ static int huge_wrap_sigsetjmp(void)
* map continguous physical blocks in contiguous virtual blocks.
*/
static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
- struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+ uint64_t *essential_memory, int orig)
{
int fd;
unsigned i;
void *virtaddr;
void *vma_addr = NULL;
size_t vma_len = 0;
+ unsigned long nodemask[BITS_TO_LONGS(RTE_MAX_NUMA_NODES)] = {0UL};
+ unsigned long maxnode = 0;
+ int node_id = -1;
+ bool numa_available = true;
+
+ /* Check if kernel supports NUMA. */
+ if (get_mempolicy(NULL, NULL, 0, 0, 0) < 0 && errno == ENOSYS) {
+ RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+ numa_available = false;
+ }
+
+ if (orig && numa_available) {
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ if (internal_config.socket_mem[i])
+ maxnode = i + 1;
+ }

for (i = 0; i < hpi->num_pages[0]; i++) {
uint64_t hugepage_sz = hpi->hugepage_sz;

+ if (maxnode) {
+ unsigned int j;
+
+ for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
+ if (essential_memory[j])
+ break;
+
+ if (j == RTE_MAX_NUMA_NODES) {
+ node_id = (node_id + 1) % RTE_MAX_NUMA_NODES;
+ while (!internal_config.socket_mem[node_id]) {
+ node_id++;
+ node_id %= RTE_MAX_NUMA_NODES;
+ }
+ } else {
+ node_id = j;
+ if (essential_memory[j] < hugepage_sz)
+ essential_memory[j] = 0;
+ else
+ essential_memory[j] -= hugepage_sz;
+ }
+
+ nodemask[node_id / ULONG_BITS] =
+ 1UL << (node_id % ULONG_BITS);
+
+ RTE_LOG(DEBUG, EAL,
+ "Setting policy MPOL_PREFERRED for socket %d\n",
+ node_id);
+ /*
+ * Due to old linux kernel bug (feature?) we have to
+ * increase maxnode by 1. It will be unconditionally
+ * decreased back to normal value inside the syscall
+ * handler.
+ */
+ if (set_mempolicy(MPOL_PREFERRED,
+ nodemask, maxnode + 1) < 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to set policy MPOL_PREFERRED: "
+ "%s\n", strerror(errno));
+ return i;
+ }
+
+ nodemask[node_id / ULONG_BITS] = 0UL;
+ }
+
if (orig) {
hugepg_tbl[i].file_id = i;
hugepg_tbl[i].size = hugepage_sz;
@@ -488,6 +562,9 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
vma_len -= hugepage_sz;
}

+ if (maxnode && set_mempolicy(MPOL_DEFAULT, NULL, 0) < 0)
+ RTE_LOG(ERR, EAL, "Failed to set mempolicy MPOL_DEFAULT\n");
+
return i;
}

@@ -572,6 +649,9 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
if (hugepg_tbl[i].orig_va == va) {
hugepg_tbl[i].socket_id = socket_id;
hp_count++;
+ RTE_LOG(DEBUG, EAL,
+ "Hugepage %s is on socket %d\n",
+ hugepg_tbl[i].filepath, socket_id);
}
}
}
@@ -1010,6 +1090,11 @@ rte_eal_hugepage_init(void)

huge_register_sigbus();

+ /* make a copy of socket_mem, needed for balanced allocation. */
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ memory[i] = internal_config.socket_mem[i];
+
+
/* map all hugepages and sort them */
for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
unsigned pages_old, pages_new;
@@ -1027,7 +1112,8 @@ rte_eal_hugepage_init(void)

/* map all hugepages available */
pages_old = hpi->num_pages[0];
- pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+ pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+ memory, 1);
if (pages_new < pages_old) {
RTE_LOG(DEBUG, EAL,
"%d not %d hugepages of size %u MB allocated\n",
@@ -1070,7 +1156,7 @@ rte_eal_hugepage_init(void)
sizeof(struct hugepage_file), cmp_physaddr);

/* remap all hugepages */
- if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+ if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
hpi->num_pages[0]) {
RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
(unsigned)(hpi->hugepage_sz / 0x100000));
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b3..5f370c9 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -186,6 +186,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
# The static libraries do not know their dependencies.
# So linking with static library requires explicit dependencies.
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrt
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lnuma
+endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lm
--
2.7.4
Ilya Maximets
2017-06-06 13:33:40 UTC
Permalink
Since libnuma is added as a general dependency for EAL,
it is safe to enable LIBRTE_VHOST_NUMA by default.

Signed-off-by: Ilya Maximets <***@samsung.com>
---
config/common_base | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/config/common_base b/config/common_base
index c858769..db4cc1c 100644
--- a/config/common_base
+++ b/config/common_base
@@ -708,7 +708,7 @@ CONFIG_RTE_LIBRTE_PDUMP=y
# Compile vhost user library
#
CONFIG_RTE_LIBRTE_VHOST=n
-CONFIG_RTE_LIBRTE_VHOST_NUMA=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y
CONFIG_RTE_LIBRTE_VHOST_DEBUG=n

#
--
2.7.4
Ilya Maximets
2017-06-08 11:21:58 UTC
Permalink
Hi everyone,

I just want to clarify current status of these patches.
As I understand, moving to the new build system (for example,
meson+ninja as proposed[1] by Bruce) is a very long process.
But we have issues with imbalanced memory allocation now, and
IMHO it's better to fix them in a near future.

Latest version (v5) of balanced allocation patches adds linbuma
as general unconditional dependency which conflicts with the
current DPDK policies.

So, there are 2 option:

1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
from the first version of the patch and disable it by default.

2. Keep patch as it is now and make everyone install libnuma
for successful build.

I have no preferences about above options. I'm asking your opinions.

Bruce, Sergio, Thomas, what do you think?

[1] http://dpdk.org/ml/archives/dev/2017-June/067428.html

Best regards, Ilya Maximets.
Post by Ilya Maximets
Sorry for so frequent respinning of the series.
* Fixed shared build. (Automated build test will fail
anyway because libnuma-devel not installed on build servers)
* Fixed work on systems without NUMA by adding check for NUMA
support in kernel.
* Implemented hybrid schema for allocation.
* Fixed not needed mempolicy change while remapping. (orig = 0)
* Added patch to enable VHOST_NUMA by default.
* rebased (fuzz in Makefile)
mem: balanced allocation of hugepages
config: enable vhost numa awareness by default
config/common_base | 2 +-
lib/librte_eal/linuxapp/eal/Makefile | 1 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 94 ++++++++++++++++++++++++++++++--
mk/rte.app.mk | 3 +
4 files changed, 95 insertions(+), 5 deletions(-)
Bruce Richardson
2017-06-08 12:14:03 UTC
Permalink
Post by Ilya Maximets
Hi everyone,
I just want to clarify current status of these patches.
As I understand, moving to the new build system (for example,
meson+ninja as proposed[1] by Bruce) is a very long process.
But we have issues with imbalanced memory allocation now, and
IMHO it's better to fix them in a near future.
Latest version (v5) of balanced allocation patches adds linbuma
as general unconditional dependency which conflicts with the
current DPDK policies.
1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
from the first version of the patch and disable it by default.
2. Keep patch as it is now and make everyone install libnuma
for successful build.
I have no preferences about above options. I'm asking your opinions.
Bruce, Sergio, Thomas, what do you think?
[1] http://dpdk.org/ml/archives/dev/2017-June/067428.html
Best regards, Ilya Maximets.
I would be ok with having libnuma as a dependency, so I think I'd prefer
option 2 to 1, assuming libnuma is available in all major Linux distros.

/Bruce
Sergio Gonzalez Monroy
2017-06-08 15:44:49 UTC
Permalink
Post by Bruce Richardson
Post by Ilya Maximets
Hi everyone,
I just want to clarify current status of these patches.
As I understand, moving to the new build system (for example,
meson+ninja as proposed[1] by Bruce) is a very long process.
But we have issues with imbalanced memory allocation now, and
IMHO it's better to fix them in a near future.
Latest version (v5) of balanced allocation patches adds linbuma
as general unconditional dependency which conflicts with the
current DPDK policies.
1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
from the first version of the patch and disable it by default.
2. Keep patch as it is now and make everyone install libnuma
for successful build.
I have no preferences about above options. I'm asking your opinions.
Bruce, Sergio, Thomas, what do you think?
[1] http://dpdk.org/ml/archives/dev/2017-June/067428.html
Best regards, Ilya Maximets.
I would be ok with having libnuma as a dependency, so I think I'd prefer
option 2 to 1, assuming libnuma is available in all major Linux distros.
/Bruce
+1 on option 2 (current patch and libnuma as DPDK dependency).

Sergio
Ilya Maximets
2017-06-14 06:11:27 UTC
Permalink
Post by Sergio Gonzalez Monroy
Post by Bruce Richardson
Post by Ilya Maximets
Hi everyone,
I just want to clarify current status of these patches.
As I understand, moving to the new build system (for example,
meson+ninja as proposed[1] by Bruce) is a very long process.
But we have issues with imbalanced memory allocation now, and
IMHO it's better to fix them in a near future.
Latest version (v5) of balanced allocation patches adds linbuma
as general unconditional dependency which conflicts with the
current DPDK policies.
1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
from the first version of the patch and disable it by default.
2. Keep patch as it is now and make everyone install libnuma
for successful build.
I have no preferences about above options. I'm asking your opinions.
Bruce, Sergio, Thomas, what do you think?
[1] http://dpdk.org/ml/archives/dev/2017-June/067428.html
Best regards, Ilya Maximets.
I would be ok with having libnuma as a dependency, so I think I'd prefer
option 2 to 1, assuming libnuma is available in all major Linux distros.
/Bruce
+1 on option 2 (current patch and libnuma as DPDK dependency).
Sergio
Ok. In this case I'm waiting for review.

And someone need to install libnuma development package in automatic
build test environment. Otherwise there will be constant compilation
test failures like this:
http://dpdk.org/ml/archives/test-report/2017-June/021437.html

Best regards, Ilya Maximets.
Hemant Agrawal
2017-06-19 11:10:10 UTC
Permalink
Post by Ilya Maximets
Post by Sergio Gonzalez Monroy
Post by Bruce Richardson
Post by Ilya Maximets
Hi everyone,
I just want to clarify current status of these patches.
As I understand, moving to the new build system (for example,
meson+ninja as proposed[1] by Bruce) is a very long process.
But we have issues with imbalanced memory allocation now, and
IMHO it's better to fix them in a near future.
Latest version (v5) of balanced allocation patches adds linbuma
as general unconditional dependency which conflicts with the
current DPDK policies.
1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
from the first version of the patch and disable it by default.
2. Keep patch as it is now and make everyone install libnuma
for successful build.
I have no preferences about above options. I'm asking your opinions.
Bruce, Sergio, Thomas, what do you think?
[1] http://dpdk.org/ml/archives/dev/2017-June/067428.html
Best regards, Ilya Maximets.
I would be ok with having libnuma as a dependency, so I think I'd prefer
option 2 to 1, assuming libnuma is available in all major Linux distros.
/Bruce
+1 on option 2 (current patch and libnuma as DPDK dependency).
Sergio
Ok. In this case I'm waiting for review.
And someone need to install libnuma development package in automatic
build test environment. Otherwise there will be constant compilation
http://dpdk.org/ml/archives/test-report/2017-June/021437.html
Best regards, Ilya Maximets.
+1 for option 1
It will be a issue and undesired dependency for SoCs, not supporting
NUMA architecture.

It can be added to the config, who desired to use it by default.
Thomas Monjalon
2017-06-20 13:07:11 UTC
Permalink
Post by Hemant Agrawal
Post by Ilya Maximets
1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
from the first version of the patch and disable it by default.
2. Keep patch as it is now and make everyone install libnuma
for successful build.
+1 for option 1
It will be a issue and undesired dependency for SoCs, not supporting
NUMA architecture.
It can be added to the config, who desired to use it by default.
Yes I agree, it cannot be a dependency for architectures which
do not support NUMA.
Please can we rework the patch so that only one node is assumed
if NUMA is disabled for the architecture?
Ilya Maximets
2017-06-20 13:58:23 UTC
Permalink
Post by Thomas Monjalon
Post by Hemant Agrawal
Post by Ilya Maximets
1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
from the first version of the patch and disable it by default.
2. Keep patch as it is now and make everyone install libnuma
for successful build.
+1 for option 1
It will be a issue and undesired dependency for SoCs, not supporting
NUMA architecture.
It can be added to the config, who desired to use it by default.
Yes I agree, it cannot be a dependency for architectures which
do not support NUMA.
Please can we rework the patch so that only one node is assumed
if NUMA is disabled for the architecture?
We're still don't have dynamic build time configuration system.
To make get/set_mempolicy work we need to include <numaif.h>
and have libnuma for successful linkage.
This means that the only option to not have libnuma as dependency
is to return back configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
as it was in the first version of the patch.

There is, actually, the third option (besides 2 already described):

3. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
from the first version of the patch and *enable* it by default.
In this case anyone who doesn't want to have libnuma as dependency
will be able to disable the config option manually.

Thomas, what do you think? Bruce? Sergio?

P.S. We're always able to implement syscall wrappers by hands without any
external dependencies, but I don't think it's a good decision.
Thomas Monjalon
2017-06-20 14:35:28 UTC
Permalink
Post by Ilya Maximets
Post by Thomas Monjalon
Post by Hemant Agrawal
Post by Ilya Maximets
1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
from the first version of the patch and disable it by default.
2. Keep patch as it is now and make everyone install libnuma
for successful build.
+1 for option 1
It will be a issue and undesired dependency for SoCs, not supporting
NUMA architecture.
It can be added to the config, who desired to use it by default.
Yes I agree, it cannot be a dependency for architectures which
do not support NUMA.
Please can we rework the patch so that only one node is assumed
if NUMA is disabled for the architecture?
We're still don't have dynamic build time configuration system.
To make get/set_mempolicy work we need to include <numaif.h>
and have libnuma for successful linkage.
This means that the only option to not have libnuma as dependency
is to return back configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
as it was in the first version of the patch.
3. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
from the first version of the patch and *enable* it by default.
In this case anyone who doesn't want to have libnuma as dependency
will be able to disable the config option manually.
Thomas, what do you think? Bruce? Sergio?
It should be enabled on x86 and ppc, and disabled in other
default configurations (ARM for now).
Post by Ilya Maximets
P.S. We're always able to implement syscall wrappers by hands without any
external dependencies, but I don't think it's a good decision.
I agree to use libnuma instead of re-inventing the wheel.
Let's just make it optional at build time and fallback on one node
if disabled.
Sergio Gonzalez Monroy
2017-06-20 14:58:50 UTC
Permalink
Post by Thomas Monjalon
Post by Ilya Maximets
Post by Thomas Monjalon
Post by Hemant Agrawal
Post by Ilya Maximets
1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
from the first version of the patch and disable it by default.
2. Keep patch as it is now and make everyone install libnuma
for successful build.
+1 for option 1
It will be a issue and undesired dependency for SoCs, not supporting
NUMA architecture.
It can be added to the config, who desired to use it by default.
Yes I agree, it cannot be a dependency for architectures which
do not support NUMA.
Please can we rework the patch so that only one node is assumed
if NUMA is disabled for the architecture?
Ilya, I missed that libnuma is not supported on ARM.
Post by Thomas Monjalon
Post by Ilya Maximets
We're still don't have dynamic build time configuration system.
To make get/set_mempolicy work we need to include <numaif.h>
and have libnuma for successful linkage.
This means that the only option to not have libnuma as dependency
is to return back configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
as it was in the first version of the patch.
3. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
from the first version of the patch and *enable* it by default.
In this case anyone who doesn't want to have libnuma as dependency
will be able to disable the config option manually.
Thomas, what do you think? Bruce? Sergio?
It should be enabled on x86 and ppc, and disabled in other
default configurations (ARM for now).
Agree.
Post by Thomas Monjalon
Post by Ilya Maximets
P.S. We're always able to implement syscall wrappers by hands without any
external dependencies, but I don't think it's a good decision.
I agree to use libnuma instead of re-inventing the wheel.
Let's just make it optional at build time and fallback on one node
if disabled.
That is the simple way out.

Sergio
Sergio Gonzalez Monroy
2017-06-20 15:51:49 UTC
Permalink
-----Original Message-----
Date: Tue, 20 Jun 2017 15:58:50 +0100
Subject: Re: [dpdk-dev] [PATCH v5 0/2] Balanced allocation of hugepages
User-Agent: Mozilla/5.0 (Windows NT 6.3; WOW64; rv:45.0) Gecko/20100101
Thunderbird/45.1.1
Post by Thomas Monjalon
Post by Ilya Maximets
Post by Thomas Monjalon
Post by Hemant Agrawal
Post by Ilya Maximets
1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
from the first version of the patch and disable it by default.
2. Keep patch as it is now and make everyone install libnuma
for successful build.
+1 for option 1
It will be a issue and undesired dependency for SoCs, not supporting
NUMA architecture.
It can be added to the config, who desired to use it by default.
Yes I agree, it cannot be a dependency for architectures which
do not support NUMA.
Please can we rework the patch so that only one node is assumed
if NUMA is disabled for the architecture?
Ilya, I missed that libnuma is not supported on ARM.
It is supported on arm64 and arm64 has NUMA machines(thunderx, thunderx2) too.
[dpdk.org] $ dpkg-query -L libnuma-dev
/.
/usr
/usr/lib
/usr/lib/aarch64-linux-gnu
/usr/lib/aarch64-linux-gnu/libnuma.a
/usr/share
/usr/share/man
/usr/share/man/man3
/usr/share/man/man3/numa.3.gz
/usr/share/doc
/usr/share/doc/libnuma-dev
/usr/share/doc/libnuma-dev/copyright
/usr/include
/usr/include/numaif.h
/usr/include/numa.h
/usr/include/numacompat1.h
/usr/lib/aarch64-linux-gnu/libnuma.so
Is it ARMv7 then the only supported arch missing libnuma support?
Post by Thomas Monjalon
Post by Ilya Maximets
We're still don't have dynamic build time configuration system.
To make get/set_mempolicy work we need to include <numaif.h>
and have libnuma for successful linkage.
This means that the only option to not have libnuma as dependency
is to return back configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
as it was in the first version of the patch.
3. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
from the first version of the patch and *enable* it by default.
In this case anyone who doesn't want to have libnuma as dependency
will be able to disable the config option manually.
Thomas, what do you think? Bruce? Sergio?
It should be enabled on x86 and ppc, and disabled in other
default configurations (ARM for now).
Agree.
Post by Thomas Monjalon
Post by Ilya Maximets
P.S. We're always able to implement syscall wrappers by hands without any
external dependencies, but I don't think it's a good decision.
I agree to use libnuma instead of re-inventing the wheel.
Let's just make it optional at build time and fallback on one node
if disabled.
That is the simple way out.
Sergio
Hemant Agrawal
2017-06-21 08:14:20 UTC
Permalink
-----Original Message-----
Date: Tue, 20 Jun 2017 15:58:50 +0100
Subject: Re: [dpdk-dev] [PATCH v5 0/2] Balanced allocation of hugepages
User-Agent: Mozilla/5.0 (Windows NT 6.3; WOW64; rv:45.0) Gecko/20100101
Thunderbird/45.1.1
Post by Thomas Monjalon
Post by Ilya Maximets
Post by Thomas Monjalon
Post by Hemant Agrawal
Post by Ilya Maximets
1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
from the first version of the patch and disable it by default.
2. Keep patch as it is now and make everyone install libnuma
for successful build.
+1 for option 1
It will be a issue and undesired dependency for SoCs, not supporting
NUMA architecture.
It can be added to the config, who desired to use it by default.
Yes I agree, it cannot be a dependency for architectures which
do not support NUMA.
Please can we rework the patch so that only one node is assumed
if NUMA is disabled for the architecture?
Ilya, I missed that libnuma is not supported on ARM.
It is supported on arm64 and arm64 has NUMA machines(thunderx, thunderx2) too.
[dpdk.org] $ dpkg-query -L libnuma-dev
/.
/usr
/usr/lib
/usr/lib/aarch64-linux-gnu
/usr/lib/aarch64-linux-gnu/libnuma.a
/usr/share
/usr/share/man
/usr/share/man/man3
/usr/share/man/man3/numa.3.gz
/usr/share/doc
/usr/share/doc/libnuma-dev
/usr/share/doc/libnuma-dev/copyright
/usr/include
/usr/include/numaif.h
/usr/include/numa.h
/usr/include/numacompat1.h
/usr/lib/aarch64-linux-gnu/libnuma.so
1. There are many machines (arm/ppc), which do not support NUMA.

https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA

2. I could not locate it by default in Linaro toolchains.

3. Since this is not a common across all platform. This option should
not be added to the common_base or common configs. It can be added to
any architecture configuration, which needs it.

Regards,
Hemant
Post by Thomas Monjalon
Post by Ilya Maximets
We're still don't have dynamic build time configuration system.
To make get/set_mempolicy work we need to include <numaif.h>
and have libnuma for successful linkage.
This means that the only option to not have libnuma as dependency
is to return back configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
as it was in the first version of the patch.
3. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
from the first version of the patch and *enable* it by default.
In this case anyone who doesn't want to have libnuma as dependency
will be able to disable the config option manually.
Thomas, what do you think? Bruce? Sergio?
It should be enabled on x86 and ppc, and disabled in other
default configurations (ARM for now).
Agree.
Post by Thomas Monjalon
Post by Ilya Maximets
P.S. We're always able to implement syscall wrappers by hands without any
external dependencies, but I don't think it's a good decision.
I agree to use libnuma instead of re-inventing the wheel.
Let's just make it optional at build time and fallback on one node
if disabled.
That is the simple way out.
Sergio
Sergio Gonzalez Monroy
2017-06-21 08:25:25 UTC
Permalink
Post by Hemant Agrawal
-----Original Message-----
Date: Tue, 20 Jun 2017 15:58:50 +0100
Subject: Re: [dpdk-dev] [PATCH v5 0/2] Balanced allocation of hugepages
User-Agent: Mozilla/5.0 (Windows NT 6.3; WOW64; rv:45.0) Gecko/20100101
Thunderbird/45.1.1
Post by Thomas Monjalon
Post by Ilya Maximets
Post by Thomas Monjalon
Post by Hemant Agrawal
Post by Ilya Maximets
1. Return back config option
RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
from the first version of the patch and disable it by default.
2. Keep patch as it is now and make everyone install libnuma
for successful build.
+1 for option 1
It will be a issue and undesired dependency for SoCs, not supporting
NUMA architecture.
It can be added to the config, who desired to use it by default.
Yes I agree, it cannot be a dependency for architectures which
do not support NUMA.
Please can we rework the patch so that only one node is assumed
if NUMA is disabled for the architecture?
Ilya, I missed that libnuma is not supported on ARM.
It is supported on arm64 and arm64 has NUMA machines(thunderx, thunderx2) too.
[dpdk.org] $ dpkg-query -L libnuma-dev
/.
/usr
/usr/lib
/usr/lib/aarch64-linux-gnu
/usr/lib/aarch64-linux-gnu/libnuma.a
/usr/share
/usr/share/man
/usr/share/man/man3
/usr/share/man/man3/numa.3.gz
/usr/share/doc
/usr/share/doc/libnuma-dev
/usr/share/doc/libnuma-dev/copyright
/usr/include
/usr/include/numaif.h
/usr/include/numa.h
/usr/include/numacompat1.h
/usr/lib/aarch64-linux-gnu/libnuma.so
1. There are many machines (arm/ppc), which do not support NUMA.
https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
I did find that link too, last modified 4 years ago.
Despite that, I could not find any ARM references in libnuma sources,
but Jerin proved that there is support for it.

http://oss.sgi.com/projects/libnuma/
https://github.com/numactl/numactl
Post by Hemant Agrawal
2. I could not locate it by default in Linaro toolchains.
3. Since this is not a common across all platform. This option should
not be added to the common_base or common configs. It can be added to
any architecture configuration, which needs it.
So is it thunderx the only arm64 to enable this feature by default?
I thought the dependency was the libnuma library support itself.

Thanks,
Sergio
Post by Hemant Agrawal
Regards,
Hemant
Post by Thomas Monjalon
Post by Ilya Maximets
We're still don't have dynamic build time configuration system.
To make get/set_mempolicy work we need to include <numaif.h>
and have libnuma for successful linkage.
This means that the only option to not have libnuma as dependency
is to return back configuration option
RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
as it was in the first version of the patch.
3. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
from the first version of the patch and *enable* it by default.
In this case anyone who doesn't want to have libnuma as dependency
will be able to disable the config option manually.
Thomas, what do you think? Bruce? Sergio?
It should be enabled on x86 and ppc, and disabled in other
default configurations (ARM for now).
Agree.
Post by Thomas Monjalon
Post by Ilya Maximets
P.S. We're always able to implement syscall wrappers by hands without any
external dependencies, but I don't think it's a good decision.
I agree to use libnuma instead of re-inventing the wheel.
Let's just make it optional at build time and fallback on one node
if disabled.
That is the simple way out.
Sergio
Ilya Maximets
2017-06-21 08:36:16 UTC
Permalink
Post by Sergio Gonzalez Monroy
Post by Hemant Agrawal
-----Original Message-----
Date: Tue, 20 Jun 2017 15:58:50 +0100
Subject: Re: [dpdk-dev] [PATCH v5 0/2] Balanced allocation of hugepages
User-Agent: Mozilla/5.0 (Windows NT 6.3; WOW64; rv:45.0) Gecko/20100101
Thunderbird/45.1.1
Post by Thomas Monjalon
Post by Ilya Maximets
Post by Thomas Monjalon
Post by Hemant Agrawal
Post by Ilya Maximets
1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
from the first version of the patch and disable it by default.
2. Keep patch as it is now and make everyone install libnuma
for successful build.
+1 for option 1
It will be a issue and undesired dependency for SoCs, not supporting
NUMA architecture.
It can be added to the config, who desired to use it by default.
Yes I agree, it cannot be a dependency for architectures which
do not support NUMA.
Please can we rework the patch so that only one node is assumed
if NUMA is disabled for the architecture?
Ilya, I missed that libnuma is not supported on ARM.
It is supported on arm64 and arm64 has NUMA machines(thunderx, thunderx2) too.
[dpdk.org] $ dpkg-query -L libnuma-dev
/.
/usr
/usr/lib
/usr/lib/aarch64-linux-gnu
/usr/lib/aarch64-linux-gnu/libnuma.a
/usr/share
/usr/share/man
/usr/share/man/man3
/usr/share/man/man3/numa.3.gz
/usr/share/doc
/usr/share/doc/libnuma-dev
/usr/share/doc/libnuma-dev/copyright
/usr/include
/usr/include/numaif.h
/usr/include/numa.h
/usr/include/numacompat1.h
/usr/lib/aarch64-linux-gnu/libnuma.so
1. There are many machines (arm/ppc), which do not support NUMA.
https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
I did find that link too, last modified 4 years ago.
Despite that, I could not find any ARM references in libnuma sources, but Jerin proved that there is support for it.
http://oss.sgi.com/projects/libnuma/
https://github.com/numactl/numactl
Post by Hemant Agrawal
2. I could not locate it by default in Linaro toolchains.
3. Since this is not a common across all platform. This option should not be added to the common_base or common configs. It can be added to any architecture configuration, which needs it.
So is it thunderx the only arm64 to enable this feature by default?
I thought the dependency was the libnuma library support itself.
ARMv7 is the only architecture without libnuma package in common distros.
So, in v6 I enabled this feature by default for x86, ppc and thunderx.
I didn't enable it for the whole ARMv8 just because thunderx is the only
platform which supports NUMA and has special defconfig in DPDK repository.

Best regards, Ilya Maximets.
Post by Sergio Gonzalez Monroy
Post by Hemant Agrawal
Post by Thomas Monjalon
Post by Ilya Maximets
We're still don't have dynamic build time configuration system.
To make get/set_mempolicy work we need to include <numaif.h>
and have libnuma for successful linkage.
This means that the only option to not have libnuma as dependency
is to return back configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
as it was in the first version of the patch.
3. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
from the first version of the patch and *enable* it by default.
In this case anyone who doesn't want to have libnuma as dependency
will be able to disable the config option manually.
Thomas, what do you think? Bruce? Sergio?
It should be enabled on x86 and ppc, and disabled in other
default configurations (ARM for now).
Agree.
Post by Thomas Monjalon
Post by Ilya Maximets
P.S. We're always able to implement syscall wrappers by hands without any
external dependencies, but I don't think it's a good decision.
I agree to use libnuma instead of re-inventing the wheel.
Let's just make it optional at build time and fallback on one node
if disabled.
That is the simple way out.
Sergio
Jerin Jacob
2017-06-21 08:41:58 UTC
Permalink
-----Original Message-----
Date: Wed, 21 Jun 2017 09:25:25 +0100
Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
User-Agent: Mozilla/5.0 (Windows NT 6.3; WOW64; rv:45.0) Gecko/20100101
Thunderbird/45.1.1
Post by Hemant Agrawal
-----Original Message-----
Date: Tue, 20 Jun 2017 15:58:50 +0100
Subject: Re: [dpdk-dev] [PATCH v5 0/2] Balanced allocation of hugepages
User-Agent: Mozilla/5.0 (Windows NT 6.3; WOW64; rv:45.0) Gecko/20100101
Thunderbird/45.1.1
Post by Thomas Monjalon
Post by Ilya Maximets
Post by Thomas Monjalon
Post by Hemant Agrawal
Post by Ilya Maximets
1. Return back config
option
RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
from the first version
of the patch and disable it by
default.
2. Keep patch as it is now
and make everyone install
libnuma
for successful build.
+1 for option 1
It will be a issue and undesired dependency for
SoCs, not supporting
NUMA architecture.
It can be added to the config, who desired to use it by default.
Yes I agree, it cannot be a dependency for architectures which
do not support NUMA.
Please can we rework the patch so that only one node is assumed
if NUMA is disabled for the architecture?
Ilya, I missed that libnuma is not supported on ARM.
It is supported on arm64 and arm64 has NUMA machines(thunderx, thunderx2) too.
[dpdk.org] $ dpkg-query -L libnuma-dev
/.
/usr
/usr/lib
/usr/lib/aarch64-linux-gnu
/usr/lib/aarch64-linux-gnu/libnuma.a
/usr/share
/usr/share/man
/usr/share/man/man3
/usr/share/man/man3/numa.3.gz
/usr/share/doc
/usr/share/doc/libnuma-dev
/usr/share/doc/libnuma-dev/copyright
/usr/include
/usr/include/numaif.h
/usr/include/numa.h
/usr/include/numacompat1.h
/usr/lib/aarch64-linux-gnu/libnuma.so
1. There are many machines (arm/ppc), which do not support NUMA.
https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
I did find that link too, last modified 4 years ago.
Despite that, I could not find any ARM references in libnuma sources, but
Jerin proved that there is support for it.
http://oss.sgi.com/projects/libnuma/
https://github.com/numactl/numactl
Those Linaro links are very old. ARM64 NUMA supported has been added in 4.7 kernel.
I guess we are talking about build time time dependency with libnuma here.
Correct? I think, Even with old arm64 kernel(< 4.6), You can build against
libnuma if it is present in rootfs. Just that at runtime, it will return
NUMA support not available. Correct?

How hard is detect the presence of "numaif.h" if existing build system does not
support it? If it trivial, we can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
if build environment has "numaif.h".

Some example in linux kernel build system:
http://lxr.linux.no/linux+v4.10.1/scripts/gcc-goto.sh
Post by Hemant Agrawal
2. I could not locate it by default in Linaro toolchains.
3. Since this is not a common across all platform. This option should
not be added to the common_base or common configs. It can be added to
any architecture configuration, which needs it.
So is it thunderx the only arm64 to enable this feature by default?
I thought the dependency was the libnuma library support itself.
Thanks,
Sergio
Post by Hemant Agrawal
Regards,
Hemant
Post by Thomas Monjalon
Post by Ilya Maximets
We're still don't have dynamic build time configuration system.
To make get/set_mempolicy work we need to include <numaif.h>
and have libnuma for successful linkage.
This means that the only option to not have libnuma as dependency
is to return back configuration option
RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
as it was in the first version of the patch.
3. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
from the first version of the patch and *enable* it by default.
In this case anyone who doesn't want to have
libnuma as dependency
will be able to disable the config option manually.
Thomas, what do you think? Bruce? Sergio?
It should be enabled on x86 and ppc, and disabled in other
default configurations (ARM for now).
Agree.
Post by Thomas Monjalon
Post by Ilya Maximets
P.S. We're always able to implement syscall wrappers by
hands without any
external dependencies, but I don't think it's a good decision.
I agree to use libnuma instead of re-inventing the wheel.
Let's just make it optional at build time and fallback on one node
if disabled.
That is the simple way out.
Sergio
Thomas Monjalon
2017-06-21 08:49:14 UTC
Permalink
Post by Jerin Jacob
Post by Sergio Gonzalez Monroy
Post by Hemant Agrawal
1. There are many machines (arm/ppc), which do not support NUMA.
https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
I did find that link too, last modified 4 years ago.
Despite that, I could not find any ARM references in libnuma sources, but
Jerin proved that there is support for it.
http://oss.sgi.com/projects/libnuma/
https://github.com/numactl/numactl
Those Linaro links are very old. ARM64 NUMA supported has been added in 4.7 kernel.
I guess we are talking about build time time dependency with libnuma here.
Correct? I think, Even with old arm64 kernel(< 4.6), You can build against
libnuma if it is present in rootfs. Just that at runtime, it will return
NUMA support not available. Correct?
How hard is detect the presence of "numaif.h" if existing build system does not
support it? If it trivial, we can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
if build environment has "numaif.h".
http://lxr.linux.no/linux+v4.10.1/scripts/gcc-goto.sh
I think we should not try to detect numaif.h, because it should be
an error on platform supporting NUMA.
If someone really wants to build DPDK without NUMA for x86, he should
disable it in the build config file.
Jerin Jacob
2017-06-21 09:27:45 UTC
Permalink
-----Original Message-----
Date: Wed, 21 Jun 2017 10:49:14 +0200
Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
Post by Jerin Jacob
Post by Sergio Gonzalez Monroy
Post by Hemant Agrawal
1. There are many machines (arm/ppc), which do not support NUMA.
https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
I did find that link too, last modified 4 years ago.
Despite that, I could not find any ARM references in libnuma sources, but
Jerin proved that there is support for it.
http://oss.sgi.com/projects/libnuma/
https://github.com/numactl/numactl
Those Linaro links are very old. ARM64 NUMA supported has been added in 4.7 kernel.
I guess we are talking about build time time dependency with libnuma here.
Correct? I think, Even with old arm64 kernel(< 4.6), You can build against
libnuma if it is present in rootfs. Just that at runtime, it will return
NUMA support not available. Correct?
How hard is detect the presence of "numaif.h" if existing build system does not
support it? If it trivial, we can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
if build environment has "numaif.h".
http://lxr.linux.no/linux+v4.10.1/scripts/gcc-goto.sh
I think we should not try to detect numaif.h, because it should be
an error on platform supporting NUMA.
I have installed libnuma on a NUMA and non NUMA machine.
Compiled and ran following code on those machine and it could detect
the numa availability. Could you add more details on the "error on
platform supporting NUMA".

➜ 83xx [ctest] $ cat main.c
#include <numaif.h>
#include <stdio.h>
#include <errno.h>

int main()
{
/* Check if kernel supports NUMA. */
if (get_mempolicy(NULL, NULL, 0, 0, 0) < 0 && errno == ENOSYS) {
printf("NUMA is not supported.\n");
} else {
printf("NUMA is supported.\n");
}

}
➜ 83xx [ctest] $ gcc -Wall main.c -lnuma
# On non numa system
➜ 83xx [ctest] $ ./a.out
NUMA is not supported

# On numa machine
➜ GB-2S [~] $ ./a.out
NUMA is supported.
If someone really wants to build DPDK without NUMA for x86, he should
disable it in the build config file.
Thomas Monjalon
2017-06-21 09:58:12 UTC
Permalink
-----Original Message-----
Date: Wed, 21 Jun 2017 10:49:14 +0200
Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
Post by Jerin Jacob
Post by Sergio Gonzalez Monroy
Post by Hemant Agrawal
1. There are many machines (arm/ppc), which do not support NUMA.
https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
I did find that link too, last modified 4 years ago.
Despite that, I could not find any ARM references in libnuma sources, but
Jerin proved that there is support for it.
http://oss.sgi.com/projects/libnuma/
https://github.com/numactl/numactl
Those Linaro links are very old. ARM64 NUMA supported has been added in 4.7 kernel.
I guess we are talking about build time time dependency with libnuma here.
Correct? I think, Even with old arm64 kernel(< 4.6), You can build against
libnuma if it is present in rootfs. Just that at runtime, it will return
NUMA support not available. Correct?
How hard is detect the presence of "numaif.h" if existing build system does not
support it? If it trivial, we can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
if build environment has "numaif.h".
http://lxr.linux.no/linux+v4.10.1/scripts/gcc-goto.sh
I think we should not try to detect numaif.h, because it should be
an error on platform supporting NUMA.
I have installed libnuma on a NUMA and non NUMA machine.
Compiled and ran following code on those machine and it could detect
the numa availability. Could you add more details on the "error on
platform supporting NUMA".
I was saying that we do not need to detect NUMA.
If we are building DPDK for a NUMA architecture and libnuma is not
available, then it will be a problem that the user must catch.
The easiest way to catch it, is to fail on the include of numaif.h.
Jerin Jacob
2017-06-21 10:29:41 UTC
Permalink
-----Original Message-----
Date: Wed, 21 Jun 2017 11:58:12 +0200
Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
-----Original Message-----
Date: Wed, 21 Jun 2017 10:49:14 +0200
Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
Post by Jerin Jacob
Post by Sergio Gonzalez Monroy
Post by Hemant Agrawal
1. There are many machines (arm/ppc), which do not support NUMA.
https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
I did find that link too, last modified 4 years ago.
Despite that, I could not find any ARM references in libnuma sources, but
Jerin proved that there is support for it.
http://oss.sgi.com/projects/libnuma/
https://github.com/numactl/numactl
Those Linaro links are very old. ARM64 NUMA supported has been added in 4.7 kernel.
I guess we are talking about build time time dependency with libnuma here.
Correct? I think, Even with old arm64 kernel(< 4.6), You can build against
libnuma if it is present in rootfs. Just that at runtime, it will return
NUMA support not available. Correct?
How hard is detect the presence of "numaif.h" if existing build system does not
support it? If it trivial, we can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
if build environment has "numaif.h".
http://lxr.linux.no/linux+v4.10.1/scripts/gcc-goto.sh
I think we should not try to detect numaif.h, because it should be
an error on platform supporting NUMA.
I have installed libnuma on a NUMA and non NUMA machine.
Compiled and ran following code on those machine and it could detect
the numa availability. Could you add more details on the "error on
platform supporting NUMA".
I was saying that we do not need to detect NUMA.
If we are building DPDK for a NUMA architecture and libnuma is not
available, then it will be a problem that the user must catch.
The easiest way to catch it, is to fail on the include of numaif.h.
libnuma is not really _architecture_ depended.

Ilya Maximets patch disables NUMA support in common arm64 config.I
think, It is not correct, We should not disable on any archs generic config.

IMO, It should be enabled by default in common config and then we can
detect the presence of numaif.h, if not available OR a target does not need it
explicitly, proceed with disabling
RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES. I think, That is more portable.

No strong opinion on "failing the build" vs "printing a warning" in the
absence of numaif.h
Ilya Maximets
2017-06-21 10:36:58 UTC
Permalink
-----Original Message-----
Date: Wed, 21 Jun 2017 11:58:12 +0200
Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
-----Original Message-----
Date: Wed, 21 Jun 2017 10:49:14 +0200
Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
Post by Jerin Jacob
Post by Sergio Gonzalez Monroy
Post by Hemant Agrawal
1. There are many machines (arm/ppc), which do not support NUMA.
https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
I did find that link too, last modified 4 years ago.
Despite that, I could not find any ARM references in libnuma sources, but
Jerin proved that there is support for it.
http://oss.sgi.com/projects/libnuma/
https://github.com/numactl/numactl
Those Linaro links are very old. ARM64 NUMA supported has been added in 4.7 kernel.
I guess we are talking about build time time dependency with libnuma here.
Correct? I think, Even with old arm64 kernel(< 4.6), You can build against
libnuma if it is present in rootfs. Just that at runtime, it will return
NUMA support not available. Correct?
How hard is detect the presence of "numaif.h" if existing build system does not
support it? If it trivial, we can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
if build environment has "numaif.h".
http://lxr.linux.no/linux+v4.10.1/scripts/gcc-goto.sh
I think we should not try to detect numaif.h, because it should be
an error on platform supporting NUMA.
I have installed libnuma on a NUMA and non NUMA machine.
Compiled and ran following code on those machine and it could detect
the numa availability. Could you add more details on the "error on
platform supporting NUMA".
I was saying that we do not need to detect NUMA.
If we are building DPDK for a NUMA architecture and libnuma is not
available, then it will be a problem that the user must catch.
The easiest way to catch it, is to fail on the include of numaif.h.
libnuma is not really _architecture_ depended.
Ilya Maximets patch disables NUMA support in common arm64 config.I
think, It is not correct, We should not disable on any archs generic config.
IMO, It should be enabled by default in common config and then we can
detect the presence of numaif.h, if not available OR a target does not need it
explicitly, proceed with disabling
RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES. I think, That is more portable.
Detecting of headers is impossible until dpdk doesn't have dynamic build
configuration system like autotools, CMake or meson.
Right now we just can't do that.
No strong opinion on "failing the build" vs "printing a warning" in the
absence of numaif.h
Jerin Jacob
2017-06-21 11:22:43 UTC
Permalink
-----Original Message-----
Date: Wed, 21 Jun 2017 13:36:58 +0300
Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101
Thunderbird/45.8.0
-----Original Message-----
Date: Wed, 21 Jun 2017 11:58:12 +0200
Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
-----Original Message-----
Date: Wed, 21 Jun 2017 10:49:14 +0200
Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
Post by Jerin Jacob
Post by Sergio Gonzalez Monroy
Post by Hemant Agrawal
1. There are many machines (arm/ppc), which do not support NUMA.
https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
I did find that link too, last modified 4 years ago.
Despite that, I could not find any ARM references in libnuma sources, but
Jerin proved that there is support for it.
http://oss.sgi.com/projects/libnuma/
https://github.com/numactl/numactl
Those Linaro links are very old. ARM64 NUMA supported has been added in 4.7 kernel.
I guess we are talking about build time time dependency with libnuma here.
Correct? I think, Even with old arm64 kernel(< 4.6), You can build against
libnuma if it is present in rootfs. Just that at runtime, it will return
NUMA support not available. Correct?
How hard is detect the presence of "numaif.h" if existing build system does not
support it? If it trivial, we can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
if build environment has "numaif.h".
http://lxr.linux.no/linux+v4.10.1/scripts/gcc-goto.sh
I think we should not try to detect numaif.h, because it should be
an error on platform supporting NUMA.
I have installed libnuma on a NUMA and non NUMA machine.
Compiled and ran following code on those machine and it could detect
the numa availability. Could you add more details on the "error on
platform supporting NUMA".
I was saying that we do not need to detect NUMA.
If we are building DPDK for a NUMA architecture and libnuma is not
available, then it will be a problem that the user must catch.
The easiest way to catch it, is to fail on the include of numaif.h.
libnuma is not really _architecture_ depended.
Ilya Maximets patch disables NUMA support in common arm64 config.I
think, It is not correct, We should not disable on any archs generic config.
IMO, It should be enabled by default in common config and then we can
detect the presence of numaif.h, if not available OR a target does not need it
explicitly, proceed with disabling
RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES. I think, That is more portable.
Detecting of headers is impossible until dpdk doesn't have dynamic build
configuration system like autotools, CMake or meson.
Right now we just can't do that.
I agree. Unless if we do something like linux kernel does it below
http://elixir.free-electrons.com/linux/latest/source/scripts/kconfig/lxdialog/check-lxdialog.sh

Either way, I think, you can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES in
generic arm64 config and disable on defconfig_arm64-dpaa2-linuxapp-gcc(as Hemant requested) or
any sub arch target that does not need in RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES.
No strong opinion on "failing the build" vs "printing a warning" in the
absence of numaif.h
Thomas Monjalon
2017-06-21 11:29:31 UTC
Permalink
Post by Jerin Jacob
Post by Ilya Maximets
Post by Jerin Jacob
Post by Thomas Monjalon
Post by Jerin Jacob
Post by Thomas Monjalon
Post by Jerin Jacob
Post by Sergio Gonzalez Monroy
Post by Hemant Agrawal
1. There are many machines (arm/ppc), which do not support NUMA.
https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
I did find that link too, last modified 4 years ago.
Despite that, I could not find any ARM references in libnuma sources, but
Jerin proved that there is support for it.
http://oss.sgi.com/projects/libnuma/
https://github.com/numactl/numactl
Those Linaro links are very old. ARM64 NUMA supported has been added in 4.7 kernel.
I guess we are talking about build time time dependency with libnuma here.
Correct? I think, Even with old arm64 kernel(< 4.6), You can build against
libnuma if it is present in rootfs. Just that at runtime, it will return
NUMA support not available. Correct?
How hard is detect the presence of "numaif.h" if existing build system does not
support it? If it trivial, we can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
if build environment has "numaif.h".
http://lxr.linux.no/linux+v4.10.1/scripts/gcc-goto.sh
I think we should not try to detect numaif.h, because it should be
an error on platform supporting NUMA.
I have installed libnuma on a NUMA and non NUMA machine.
Compiled and ran following code on those machine and it could detect
the numa availability. Could you add more details on the "error on
platform supporting NUMA".
I was saying that we do not need to detect NUMA.
If we are building DPDK for a NUMA architecture and libnuma is not
available, then it will be a problem that the user must catch.
The easiest way to catch it, is to fail on the include of numaif.h.
libnuma is not really _architecture_ depended.
Ilya Maximets patch disables NUMA support in common arm64 config.I
think, It is not correct, We should not disable on any archs generic config.
IMO, It should be enabled by default in common config and then we can
detect the presence of numaif.h, if not available OR a target does not need it
explicitly, proceed with disabling
RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES. I think, That is more portable.
Detecting of headers is impossible until dpdk doesn't have dynamic build
configuration system like autotools, CMake or meson.
Right now we just can't do that.
I agree. Unless if we do something like linux kernel does it below
http://elixir.free-electrons.com/linux/latest/source/scripts/kconfig/lxdialog/check-lxdialog.sh
Either way, I think, you can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES in
generic arm64 config and disable on defconfig_arm64-dpaa2-linuxapp-gcc(as Hemant requested) or
any sub arch target that does not need in RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES.
In this case, you can enable it in common_base and disable it only for
armv7 and dpaa2.
Hemant Agrawal
2017-06-27 09:13:55 UTC
Permalink
-----Original Message-----
Date: Wed, 21 Jun 2017 13:36:58 +0300
Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101
Thunderbird/45.8.0
-----Original Message-----
Date: Wed, 21 Jun 2017 11:58:12 +0200
Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
-----Original Message-----
Date: Wed, 21 Jun 2017 10:49:14 +0200
Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
Post by Jerin Jacob
Post by Sergio Gonzalez Monroy
Post by Hemant Agrawal
1. There are many machines (arm/ppc), which do not support NUMA.
https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
I did find that link too, last modified 4 years ago.
Despite that, I could not find any ARM references in libnuma sources, but
Jerin proved that there is support for it.
http://oss.sgi.com/projects/libnuma/
https://github.com/numactl/numactl
Those Linaro links are very old. ARM64 NUMA supported has been added in 4.7 kernel.
I guess we are talking about build time time dependency with libnuma here.
Correct? I think, Even with old arm64 kernel(< 4.6), You can build against
libnuma if it is present in rootfs. Just that at runtime, it will return
NUMA support not available. Correct?
How hard is detect the presence of "numaif.h" if existing build system does not
support it? If it trivial, we can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
if build environment has "numaif.h".
http://lxr.linux.no/linux+v4.10.1/scripts/gcc-goto.sh
I think we should not try to detect numaif.h, because it should be
an error on platform supporting NUMA.
I have installed libnuma on a NUMA and non NUMA machine.
Compiled and ran following code on those machine and it could detect
the numa availability. Could you add more details on the "error on
platform supporting NUMA".
I was saying that we do not need to detect NUMA.
If we are building DPDK for a NUMA architecture and libnuma is not
available, then it will be a problem that the user must catch.
The easiest way to catch it, is to fail on the include of numaif.h.
libnuma is not really _architecture_ depended.
Ilya Maximets patch disables NUMA support in common arm64 config.I
think, It is not correct, We should not disable on any archs generic config.
IMO, It should be enabled by default in common config and then we can
detect the presence of numaif.h, if not available OR a target does not need it
explicitly, proceed with disabling
RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES. I think, That is more portable.
Detecting of headers is impossible until dpdk doesn't have dynamic build
configuration system like autotools, CMake or meson.
Right now we just can't do that.
I agree. Unless if we do something like linux kernel does it below
http://elixir.free-electrons.com/linux/latest/source/scripts/kconfig/lxdialog/check-lxdialog.sh
Either way, I think, you can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES in
generic arm64 config and disable on defconfig_arm64-dpaa2-linuxapp-gcc(as Hemant requested) or
any sub arch target that does not need in RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES.
No, this is not acceptable. it should not be enabled in generic arm64.
It can be enabled in specific ARM platforms, which support NUMA
architecture.
We also use generic ARM code on various of our platform when running
with non-dpaa and/or virtio-net. So enabling it will break all those
platforms.
No strong opinion on "failing the build" vs "printing a warning" in the
absence of numaif.h
Thomas Monjalon
2017-06-27 09:26:28 UTC
Permalink
Post by Hemant Agrawal
Post by Jerin Jacob
Post by Ilya Maximets
Post by Jerin Jacob
Post by Thomas Monjalon
Post by Jerin Jacob
Post by Thomas Monjalon
Post by Jerin Jacob
Post by Sergio Gonzalez Monroy
Post by Hemant Agrawal
1. There are many machines (arm/ppc), which do not support NUMA.
https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
I did find that link too, last modified 4 years ago.
Despite that, I could not find any ARM references in libnuma sources, but
Jerin proved that there is support for it.
http://oss.sgi.com/projects/libnuma/
https://github.com/numactl/numactl
Those Linaro links are very old. ARM64 NUMA supported has been added in 4.7 kernel.
I guess we are talking about build time time dependency with libnuma here.
Correct? I think, Even with old arm64 kernel(< 4.6), You can build against
libnuma if it is present in rootfs. Just that at runtime, it will return
NUMA support not available. Correct?
How hard is detect the presence of "numaif.h" if existing build system does not
support it? If it trivial, we can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
if build environment has "numaif.h".
http://lxr.linux.no/linux+v4.10.1/scripts/gcc-goto.sh
I think we should not try to detect numaif.h, because it should be
an error on platform supporting NUMA.
I have installed libnuma on a NUMA and non NUMA machine.
Compiled and ran following code on those machine and it could detect
the numa availability. Could you add more details on the "error on
platform supporting NUMA".
I was saying that we do not need to detect NUMA.
If we are building DPDK for a NUMA architecture and libnuma is not
available, then it will be a problem that the user must catch.
The easiest way to catch it, is to fail on the include of numaif.h.
libnuma is not really _architecture_ depended.
Ilya Maximets patch disables NUMA support in common arm64 config.I
think, It is not correct, We should not disable on any archs generic config.
IMO, It should be enabled by default in common config and then we can
detect the presence of numaif.h, if not available OR a target does not need it
explicitly, proceed with disabling
RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES. I think, That is more portable.
Detecting of headers is impossible until dpdk doesn't have dynamic build
configuration system like autotools, CMake or meson.
Right now we just can't do that.
I agree. Unless if we do something like linux kernel does it below
http://elixir.free-electrons.com/linux/latest/source/scripts/kconfig/lxdialog/check-lxdialog.sh
Either way, I think, you can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES in
generic arm64 config and disable on defconfig_arm64-dpaa2-linuxapp-gcc(as Hemant requested) or
any sub arch target that does not need in RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES.
No, this is not acceptable. it should not be enabled in generic arm64.
It can be enabled in specific ARM platforms, which support NUMA
architecture.
We also use generic ARM code on various of our platform when running
with non-dpaa and/or virtio-net. So enabling it will break all those
platforms.
Which platforms?
It is your non-upstreamed code. You have to deal with it.
You should disable NUMA in the config of these platforms.
Hemant Agrawal
2017-06-27 09:48:14 UTC
Permalink
Post by Thomas Monjalon
Post by Hemant Agrawal
Post by Jerin Jacob
Post by Ilya Maximets
Post by Jerin Jacob
Post by Thomas Monjalon
Post by Jerin Jacob
Post by Thomas Monjalon
Post by Jerin Jacob
Post by Sergio Gonzalez Monroy
Post by Hemant Agrawal
1. There are many machines (arm/ppc), which do not support NUMA.
https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
I did find that link too, last modified 4 years ago.
Despite that, I could not find any ARM references in libnuma sources, but
Jerin proved that there is support for it.
http://oss.sgi.com/projects/libnuma/
https://github.com/numactl/numactl
Those Linaro links are very old. ARM64 NUMA supported has been added in 4.7 kernel.
I guess we are talking about build time time dependency with libnuma here.
Correct? I think, Even with old arm64 kernel(< 4.6), You can build against
libnuma if it is present in rootfs. Just that at runtime, it will return
NUMA support not available. Correct?
How hard is detect the presence of "numaif.h" if existing build system does not
support it? If it trivial, we can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
if build environment has "numaif.h".
http://lxr.linux.no/linux+v4.10.1/scripts/gcc-goto.sh
I think we should not try to detect numaif.h, because it should be
an error on platform supporting NUMA.
I have installed libnuma on a NUMA and non NUMA machine.
Compiled and ran following code on those machine and it could detect
the numa availability. Could you add more details on the "error on
platform supporting NUMA".
I was saying that we do not need to detect NUMA.
If we are building DPDK for a NUMA architecture and libnuma is not
available, then it will be a problem that the user must catch.
The easiest way to catch it, is to fail on the include of numaif.h.
libnuma is not really _architecture_ depended.
Ilya Maximets patch disables NUMA support in common arm64 config.I
think, It is not correct, We should not disable on any archs generic config.
IMO, It should be enabled by default in common config and then we can
detect the presence of numaif.h, if not available OR a target does not need it
explicitly, proceed with disabling
RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES. I think, That is more portable.
Detecting of headers is impossible until dpdk doesn't have dynamic build
configuration system like autotools, CMake or meson.
Right now we just can't do that.
I agree. Unless if we do something like linux kernel does it below
http://elixir.free-electrons.com/linux/latest/source/scripts/kconfig/lxdialog/check-lxdialog.sh
Either way, I think, you can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES in
generic arm64 config and disable on defconfig_arm64-dpaa2-linuxapp-gcc(as Hemant requested) or
any sub arch target that does not need in RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES.
No, this is not acceptable. it should not be enabled in generic arm64.
It can be enabled in specific ARM platforms, which support NUMA
architecture.
We also use generic ARM code on various of our platform when running
with non-dpaa and/or virtio-net. So enabling it will break all those
platforms.
Which platforms?
It is your non-upstreamed code. You have to deal with it.
You should disable NUMA in the config of these platforms.
See my reply in other thread. This is nothing to do with up-streaming.

All NXP - low end non-dpaa platforms, which don't have any platform
specific code, we use "arm64-armv8a-linuxapp-gcc" as the build config.

There is no need to create special configs for these platforms.
Creating a "non-NUMA" generic config will be an over-kill.
Ilya Maximets
2017-06-21 08:04:08 UTC
Permalink
Version 6:
* Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
returned. Enabled by default for x86, ppc and thunderx.

Version 5:
* Fixed shared build. (Automated build test will fail
anyway because libnuma-devel not installed on build servers)

Version 4:
* Fixed work on systems without NUMA by adding check for NUMA
support in kernel.

Version 3:
* Implemented hybrid schema for allocation.
* Fixed not needed mempolicy change while remapping. (orig = 0)
* Added patch to enable VHOST_NUMA by default.

Version 2:
* rebased (fuzz in Makefile)

Ilya Maximets (2):
mem: balanced allocation of hugepages
config: enable vhost numa awareness by default

config/common_base | 1 +
config/common_linuxapp | 3 +
config/defconfig_arm-armv7a-linuxapp-gcc | 4 +
config/defconfig_arm64-armv8a-linuxapp-gcc | 4 +
config/defconfig_arm64-thunderx-linuxapp-gcc | 4 +
lib/librte_eal/linuxapp/eal/Makefile | 3 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 105 ++++++++++++++++++++++++++-
mk/rte.app.mk | 3 +
8 files changed, 123 insertions(+), 4 deletions(-)
--
2.7.4
Ilya Maximets
2017-06-21 08:04:09 UTC
Permalink
Currently EAL allocates hugepages one by one not paying attention
from which NUMA node allocation was done.

Such behaviour leads to allocation failure if number of available
hugepages for application limited by cgroups or hugetlbfs and
memory requested not only from the first socket.

Example:
# 90 x 1GB hugepages availavle in a system

cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...

EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
PANIC in rte_eal_init():
Cannot init memory

This happens beacause all allocated pages are
on socket 0.

Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
to one of requested nodes using following schema:

1) Allocate essential hugepages:
1.1) Allocate as many hugepages from numa N to
only fit requested memory for this numa.
1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
fashion.
3) Sort pages and choose the most suitable.

In this case all essential memory will be allocated and all remaining
pages will be fairly distributed between all requested nodes.

New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES introduced and
enabled by default for linuxapp on x86, ppc and thunderx.
Enabling of this option adds libnuma as a dependency for EAL.

Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")

Signed-off-by: Ilya Maximets <***@samsung.com>
---
config/common_base | 1 +
config/common_linuxapp | 2 +
config/defconfig_arm-armv7a-linuxapp-gcc | 3 +
config/defconfig_arm64-armv8a-linuxapp-gcc | 3 +
config/defconfig_arm64-thunderx-linuxapp-gcc | 3 +
lib/librte_eal/linuxapp/eal/Makefile | 3 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 105 ++++++++++++++++++++++++++-
mk/rte.app.mk | 3 +
8 files changed, 119 insertions(+), 4 deletions(-)

diff --git a/config/common_base b/config/common_base
index f6aafd1..b9efdf2 100644
--- a/config/common_base
+++ b/config/common_base
@@ -103,6 +103,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=n
CONFIG_RTE_MALLOC_DEBUG=n
+CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n

#
# Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
diff --git a/config/common_linuxapp b/config/common_linuxapp
index b3cf41b..5eb568b 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -35,6 +35,8 @@
CONFIG_RTE_EXEC_ENV="linuxapp"
CONFIG_RTE_EXEC_ENV_LINUXAPP=y

+CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=y
+
CONFIG_RTE_EAL_IGB_UIO=y
CONFIG_RTE_EAL_VFIO=y
CONFIG_RTE_KNI_KMOD=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index 19607eb..5c5226a 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -47,6 +47,9 @@ CONFIG_RTE_ARCH_STRICT_ALIGN=y
CONFIG_RTE_TOOLCHAIN="gcc"
CONFIG_RTE_TOOLCHAIN_GCC=y

+# NUMA is not supported on ARM
+CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
# ARM doesn't have support for vmware TSC map
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n

diff --git a/config/defconfig_arm64-armv8a-linuxapp-gcc b/config/defconfig_arm64-armv8a-linuxapp-gcc
index 9f32766..d9667d3 100644
--- a/config/defconfig_arm64-armv8a-linuxapp-gcc
+++ b/config/defconfig_arm64-armv8a-linuxapp-gcc
@@ -47,6 +47,9 @@ CONFIG_RTE_TOOLCHAIN_GCC=y
# to address minimum DMA alignment across all arm64 implementations.
CONFIG_RTE_CACHE_LINE_SIZE=128

+# Most ARMv8 systems doesn't support NUMA
+CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
CONFIG_RTE_EAL_IGB_UIO=n

CONFIG_RTE_LIBRTE_FM10K_PMD=n
diff --git a/config/defconfig_arm64-thunderx-linuxapp-gcc b/config/defconfig_arm64-thunderx-linuxapp-gcc
index f64da4c..e486c1d 100644
--- a/config/defconfig_arm64-thunderx-linuxapp-gcc
+++ b/config/defconfig_arm64-thunderx-linuxapp-gcc
@@ -37,6 +37,9 @@ CONFIG_RTE_CACHE_LINE_SIZE=128
CONFIG_RTE_MAX_NUMA_NODES=2
CONFIG_RTE_MAX_LCORE=96

+# ThunderX supports NUMA
+CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=y
+
#
# Compile PMD for octeontx sso event device
#
diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index 640afd0..bd10489 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -50,6 +50,9 @@ LDLIBS += -ldl
LDLIBS += -lpthread
LDLIBS += -lgcc_s
LDLIBS += -lrt
+ifeq ($(CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+LDLIBS += -lnuma
+endif

# specific to linuxapp exec-env
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index e17c9cb..9a0087c 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -54,6 +54,9 @@
#include <sys/time.h>
#include <signal.h>
#include <setjmp.h>
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numaif.h>
+#endif

#include <rte_log.h>
#include <rte_memory.h>
@@ -348,6 +351,21 @@ static int huge_wrap_sigsetjmp(void)
return sigsetjmp(huge_jmpenv, 1);
}

+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+#ifndef ULONG_SIZE
+#define ULONG_SIZE sizeof(unsigned long)
+#endif
+#ifndef ULONG_BITS
+#define ULONG_BITS (ULONG_SIZE * CHAR_BIT)
+#endif
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, ULONG_SIZE)
+#endif
+#endif
+
/*
* Mmap all hugepages of hugepage table: it first open a file in
* hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -356,18 +374,82 @@ static int huge_wrap_sigsetjmp(void)
* map continguous physical blocks in contiguous virtual blocks.
*/
static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
- struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+ uint64_t *essential_memory __rte_unused, int orig)
{
int fd;
unsigned i;
void *virtaddr;
void *vma_addr = NULL;
size_t vma_len = 0;
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+ unsigned long nodemask[BITS_TO_LONGS(RTE_MAX_NUMA_NODES)] = {0UL};
+ unsigned long maxnode = 0;
+ int node_id = -1;
+ bool numa_available = true;
+
+ /* Check if kernel supports NUMA. */
+ if (get_mempolicy(NULL, NULL, 0, 0, 0) < 0 && errno == ENOSYS) {
+ RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+ numa_available = false;
+ }
+
+ if (orig && numa_available) {
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ if (internal_config.socket_mem[i])
+ maxnode = i + 1;
+ }
+#endif

for (i = 0; i < hpi->num_pages[0]; i++) {
uint64_t hugepage_sz = hpi->hugepage_sz;

+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ unsigned int j;
+
+ for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
+ if (essential_memory[j])
+ break;
+
+ if (j == RTE_MAX_NUMA_NODES) {
+ node_id = (node_id + 1) % RTE_MAX_NUMA_NODES;
+ while (!internal_config.socket_mem[node_id]) {
+ node_id++;
+ node_id %= RTE_MAX_NUMA_NODES;
+ }
+ } else {
+ node_id = j;
+ if (essential_memory[j] < hugepage_sz)
+ essential_memory[j] = 0;
+ else
+ essential_memory[j] -= hugepage_sz;
+ }
+
+ nodemask[node_id / ULONG_BITS] =
+ 1UL << (node_id % ULONG_BITS);
+
+ RTE_LOG(DEBUG, EAL,
+ "Setting policy MPOL_PREFERRED for socket %d\n",
+ node_id);
+ /*
+ * Due to old linux kernel bug (feature?) we have to
+ * increase maxnode by 1. It will be unconditionally
+ * decreased back to normal value inside the syscall
+ * handler.
+ */
+ if (set_mempolicy(MPOL_PREFERRED,
+ nodemask, maxnode + 1) < 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to set policy MPOL_PREFERRED: "
+ "%s\n", strerror(errno));
+ return i;
+ }
+
+ nodemask[node_id / ULONG_BITS] = 0UL;
+ }
+#endif
+
if (orig) {
hugepg_tbl[i].file_id = i;
hugepg_tbl[i].size = hugepage_sz;
@@ -478,6 +560,10 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
vma_len -= hugepage_sz;
}

+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode && set_mempolicy(MPOL_DEFAULT, NULL, 0) < 0)
+ RTE_LOG(ERR, EAL, "Failed to set mempolicy MPOL_DEFAULT\n");
+#endif
return i;
}

@@ -562,6 +648,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
if (hugepg_tbl[i].orig_va == va) {
hugepg_tbl[i].socket_id = socket_id;
hp_count++;
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+ RTE_LOG(DEBUG, EAL,
+ "Hugepage %s is on socket %d\n",
+ hugepg_tbl[i].filepath, socket_id);
+#endif
}
}
}
@@ -1000,6 +1091,11 @@ rte_eal_hugepage_init(void)

huge_register_sigbus();

+ /* make a copy of socket_mem, needed for balanced allocation. */
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ memory[i] = internal_config.socket_mem[i];
+
+
/* map all hugepages and sort them */
for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
unsigned pages_old, pages_new;
@@ -1017,7 +1113,8 @@ rte_eal_hugepage_init(void)

/* map all hugepages available */
pages_old = hpi->num_pages[0];
- pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+ pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+ memory, 1);
if (pages_new < pages_old) {
RTE_LOG(DEBUG, EAL,
"%d not %d hugepages of size %u MB allocated\n",
@@ -1060,7 +1157,7 @@ rte_eal_hugepage_init(void)
sizeof(struct hugepage_file), cmp_physaddr);

/* remap all hugepages */
- if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+ if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
hpi->num_pages[0]) {
RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
(unsigned)(hpi->hugepage_sz / 0x100000));
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b3..cfc743a 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -186,6 +186,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
# The static libraries do not know their dependencies.
# So linking with static library requires explicit dependencies.
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrt
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lnuma
+endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lm
--
2.7.4
Thomas Monjalon
2017-06-21 08:51:58 UTC
Permalink
Post by Ilya Maximets
+CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n
We can stop inserting LIBRTE in our config options.
CONFIG_RTE_EAL_ is long enough :)
Bruce Richardson
2017-06-21 08:58:45 UTC
Permalink
Post by Thomas Monjalon
Post by Ilya Maximets
+CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n
We can stop inserting LIBRTE in our config options.
CONFIG_RTE_EAL_ is long enough :)
Consistency. While I agree it's unneeded should have it in all or none,
and unless we change a bunch of existing ones, I think it means we have
it in all.
Ilya Maximets
2017-06-21 09:25:51 UTC
Permalink
Post by Bruce Richardson
Post by Thomas Monjalon
Post by Ilya Maximets
+CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n
We can stop inserting LIBRTE in our config options.
CONFIG_RTE_EAL_ is long enough :)
Consistency. While I agree it's unneeded should have it in all or none,
and unless we change a bunch of existing ones, I think it means we have
it in all.
Hmm. There are few options named CONFIG_RTE_EAL_* and CONFIG_RTE_LIBRTE_EAL_*.
Also there is one strange CONFIG_RTE_LIBEAL_(USE_HPET).

Maybe we can prepare the patch to unify all that options later?
Or should I rename CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES to
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES in this series?
Bruce Richardson
2017-06-21 09:34:05 UTC
Permalink
Post by Ilya Maximets
Post by Bruce Richardson
Post by Thomas Monjalon
Post by Ilya Maximets
+CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n
We can stop inserting LIBRTE in our config options.
CONFIG_RTE_EAL_ is long enough :)
Consistency. While I agree it's unneeded should have it in all or none,
and unless we change a bunch of existing ones, I think it means we have
it in all.
Hmm. There are few options named CONFIG_RTE_EAL_* and CONFIG_RTE_LIBRTE_EAL_*.
Also there is one strange CONFIG_RTE_LIBEAL_(USE_HPET).
Maybe we can prepare the patch to unify all that options later?
Or should I rename CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES to
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES in this series?
Sure, if it's already insonsistent for EAL, then use the shorter name.
We can fix up the others later. These options may move or disappear if
we look to move to a different build system e.g. [1], so that may be a good
opportunity to adjust some names.

/Bruce

[1] http://dpdk.org/dev/patchwork/patch/25104/
Thomas Monjalon
2017-06-21 09:28:03 UTC
Permalink
Post by Bruce Richardson
Post by Thomas Monjalon
Post by Ilya Maximets
+CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n
We can stop inserting LIBRTE in our config options.
CONFIG_RTE_EAL_ is long enough :)
Consistency. While I agree it's unneeded should have it in all or none,
and unless we change a bunch of existing ones, I think it means we have
it in all.
It is already not consistent.
It could be cleaned in next release.
For now, we have to decide which one we prefer.
I prefer CONFIG_RTE_EAL_ and CONFIG_RTE_PMD_ for drivers.
Ilya Maximets
2017-06-21 08:04:10 UTC
Permalink
It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.

Signed-off-by: Ilya Maximets <***@samsung.com>
---
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 1 +
config/defconfig_arm64-armv8a-linuxapp-gcc | 1 +
config/defconfig_arm64-thunderx-linuxapp-gcc | 1 +
4 files changed, 4 insertions(+)

diff --git a/config/common_linuxapp b/config/common_linuxapp
index 5eb568b..d2658a2 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -36,6 +36,7 @@ CONFIG_RTE_EXEC_ENV="linuxapp"
CONFIG_RTE_EXEC_ENV_LINUXAPP=y

CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y

CONFIG_RTE_EAL_IGB_UIO=y
CONFIG_RTE_EAL_VFIO=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index 5c5226a..cef6789 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -49,6 +49,7 @@ CONFIG_RTE_TOOLCHAIN_GCC=y

# NUMA is not supported on ARM
CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n

# ARM doesn't have support for vmware TSC map
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
diff --git a/config/defconfig_arm64-armv8a-linuxapp-gcc b/config/defconfig_arm64-armv8a-linuxapp-gcc
index d9667d3..069e4ed 100644
--- a/config/defconfig_arm64-armv8a-linuxapp-gcc
+++ b/config/defconfig_arm64-armv8a-linuxapp-gcc
@@ -49,6 +49,7 @@ CONFIG_RTE_CACHE_LINE_SIZE=128

# Most ARMv8 systems doesn't support NUMA
CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n

CONFIG_RTE_EAL_IGB_UIO=n

diff --git a/config/defconfig_arm64-thunderx-linuxapp-gcc b/config/defconfig_arm64-thunderx-linuxapp-gcc
index e486c1d..e54845c 100644
--- a/config/defconfig_arm64-thunderx-linuxapp-gcc
+++ b/config/defconfig_arm64-thunderx-linuxapp-gcc
@@ -39,6 +39,7 @@ CONFIG_RTE_MAX_LCORE=96

# ThunderX supports NUMA
CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y

#
# Compile PMD for octeontx sso event device
--
2.7.4
Ilya Maximets
2017-06-21 10:08:29 UTC
Permalink
Version 7:
* RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES --> RTE_EAL_NUMA_AWARE_HUGEPAGES

Version 6:
* Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
returned. Enabled by default for x86, ppc and thunderx.

Version 5:
* Fixed shared build. (Automated build test will fail
anyway because libnuma-devel not installed on build servers)

Version 4:
* Fixed work on systems without NUMA by adding check for NUMA
support in kernel.

Version 3:
* Implemented hybrid schema for allocation.
* Fixed not needed mempolicy change while remapping. (orig = 0)
* Added patch to enable VHOST_NUMA by default.

Version 2:
* rebased (fuzz in Makefile)

Ilya Maximets (2):
mem: balanced allocation of hugepages
config: enable vhost numa awareness by default

config/common_base | 1 +
config/common_linuxapp | 3 +
config/defconfig_arm-armv7a-linuxapp-gcc | 4 +
config/defconfig_arm64-armv8a-linuxapp-gcc | 4 +
config/defconfig_arm64-thunderx-linuxapp-gcc | 4 +
lib/librte_eal/linuxapp/eal/Makefile | 3 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 105 ++++++++++++++++++++++++++-
mk/rte.app.mk | 3 +
8 files changed, 123 insertions(+), 4 deletions(-)
--
2.7.4
Ilya Maximets
2017-06-21 10:08:30 UTC
Permalink
Currently EAL allocates hugepages one by one not paying attention
from which NUMA node allocation was done.

Such behaviour leads to allocation failure if number of available
hugepages for application limited by cgroups or hugetlbfs and
memory requested not only from the first socket.

Example:
# 90 x 1GB hugepages availavle in a system

cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...

EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
PANIC in rte_eal_init():
Cannot init memory

This happens beacause all allocated pages are
on socket 0.

Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
to one of requested nodes using following schema:

1) Allocate essential hugepages:
1.1) Allocate as many hugepages from numa N to
only fit requested memory for this numa.
1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
fashion.
3) Sort pages and choose the most suitable.

In this case all essential memory will be allocated and all remaining
pages will be fairly distributed between all requested nodes.

New config option RTE_EAL_NUMA_AWARE_HUGEPAGES introduced and
enabled by default for linuxapp on x86, ppc and thunderx.
Enabling of this option adds libnuma as a dependency for EAL.

Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")

Signed-off-by: Ilya Maximets <***@samsung.com>
---
config/common_base | 1 +
config/common_linuxapp | 2 +
config/defconfig_arm-armv7a-linuxapp-gcc | 3 +
config/defconfig_arm64-armv8a-linuxapp-gcc | 3 +
config/defconfig_arm64-thunderx-linuxapp-gcc | 3 +
lib/librte_eal/linuxapp/eal/Makefile | 3 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 105 ++++++++++++++++++++++++++-
mk/rte.app.mk | 3 +
8 files changed, 119 insertions(+), 4 deletions(-)

diff --git a/config/common_base b/config/common_base
index f6aafd1..660588a 100644
--- a/config/common_base
+++ b/config/common_base
@@ -103,6 +103,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=n
CONFIG_RTE_MALLOC_DEBUG=n
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n

#
# Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
diff --git a/config/common_linuxapp b/config/common_linuxapp
index b3cf41b..050526f 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -35,6 +35,8 @@
CONFIG_RTE_EXEC_ENV="linuxapp"
CONFIG_RTE_EXEC_ENV_LINUXAPP=y

+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+
CONFIG_RTE_EAL_IGB_UIO=y
CONFIG_RTE_EAL_VFIO=y
CONFIG_RTE_KNI_KMOD=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index 19607eb..e06b1d4 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -47,6 +47,9 @@ CONFIG_RTE_ARCH_STRICT_ALIGN=y
CONFIG_RTE_TOOLCHAIN="gcc"
CONFIG_RTE_TOOLCHAIN_GCC=y

+# NUMA is not supported on ARM
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
# ARM doesn't have support for vmware TSC map
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n

diff --git a/config/defconfig_arm64-armv8a-linuxapp-gcc b/config/defconfig_arm64-armv8a-linuxapp-gcc
index 9f32766..2c67cdc 100644
--- a/config/defconfig_arm64-armv8a-linuxapp-gcc
+++ b/config/defconfig_arm64-armv8a-linuxapp-gcc
@@ -47,6 +47,9 @@ CONFIG_RTE_TOOLCHAIN_GCC=y
# to address minimum DMA alignment across all arm64 implementations.
CONFIG_RTE_CACHE_LINE_SIZE=128

+# Most ARMv8 systems doesn't support NUMA
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
CONFIG_RTE_EAL_IGB_UIO=n

CONFIG_RTE_LIBRTE_FM10K_PMD=n
diff --git a/config/defconfig_arm64-thunderx-linuxapp-gcc b/config/defconfig_arm64-thunderx-linuxapp-gcc
index f64da4c..3e79fa8 100644
--- a/config/defconfig_arm64-thunderx-linuxapp-gcc
+++ b/config/defconfig_arm64-thunderx-linuxapp-gcc
@@ -37,6 +37,9 @@ CONFIG_RTE_CACHE_LINE_SIZE=128
CONFIG_RTE_MAX_NUMA_NODES=2
CONFIG_RTE_MAX_LCORE=96

+# ThunderX supports NUMA
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+
#
# Compile PMD for octeontx sso event device
#
diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index 640afd0..8651e27 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -50,6 +50,9 @@ LDLIBS += -ldl
LDLIBS += -lpthread
LDLIBS += -lgcc_s
LDLIBS += -lrt
+ifeq ($(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+LDLIBS += -lnuma
+endif

# specific to linuxapp exec-env
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index e17c9cb..ceadca7 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -54,6 +54,9 @@
#include <sys/time.h>
#include <signal.h>
#include <setjmp.h>
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numaif.h>
+#endif

#include <rte_log.h>
#include <rte_memory.h>
@@ -348,6 +351,21 @@ static int huge_wrap_sigsetjmp(void)
return sigsetjmp(huge_jmpenv, 1);
}

+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+#ifndef ULONG_SIZE
+#define ULONG_SIZE sizeof(unsigned long)
+#endif
+#ifndef ULONG_BITS
+#define ULONG_BITS (ULONG_SIZE * CHAR_BIT)
+#endif
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, ULONG_SIZE)
+#endif
+#endif
+
/*
* Mmap all hugepages of hugepage table: it first open a file in
* hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -356,18 +374,82 @@ static int huge_wrap_sigsetjmp(void)
* map continguous physical blocks in contiguous virtual blocks.
*/
static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
- struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+ uint64_t *essential_memory __rte_unused, int orig)
{
int fd;
unsigned i;
void *virtaddr;
void *vma_addr = NULL;
size_t vma_len = 0;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ unsigned long nodemask[BITS_TO_LONGS(RTE_MAX_NUMA_NODES)] = {0UL};
+ unsigned long maxnode = 0;
+ int node_id = -1;
+ bool numa_available = true;
+
+ /* Check if kernel supports NUMA. */
+ if (get_mempolicy(NULL, NULL, 0, 0, 0) < 0 && errno == ENOSYS) {
+ RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+ numa_available = false;
+ }
+
+ if (orig && numa_available) {
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ if (internal_config.socket_mem[i])
+ maxnode = i + 1;
+ }
+#endif

for (i = 0; i < hpi->num_pages[0]; i++) {
uint64_t hugepage_sz = hpi->hugepage_sz;

+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ unsigned int j;
+
+ for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
+ if (essential_memory[j])
+ break;
+
+ if (j == RTE_MAX_NUMA_NODES) {
+ node_id = (node_id + 1) % RTE_MAX_NUMA_NODES;
+ while (!internal_config.socket_mem[node_id]) {
+ node_id++;
+ node_id %= RTE_MAX_NUMA_NODES;
+ }
+ } else {
+ node_id = j;
+ if (essential_memory[j] < hugepage_sz)
+ essential_memory[j] = 0;
+ else
+ essential_memory[j] -= hugepage_sz;
+ }
+
+ nodemask[node_id / ULONG_BITS] =
+ 1UL << (node_id % ULONG_BITS);
+
+ RTE_LOG(DEBUG, EAL,
+ "Setting policy MPOL_PREFERRED for socket %d\n",
+ node_id);
+ /*
+ * Due to old linux kernel bug (feature?) we have to
+ * increase maxnode by 1. It will be unconditionally
+ * decreased back to normal value inside the syscall
+ * handler.
+ */
+ if (set_mempolicy(MPOL_PREFERRED,
+ nodemask, maxnode + 1) < 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to set policy MPOL_PREFERRED: "
+ "%s\n", strerror(errno));
+ return i;
+ }
+
+ nodemask[node_id / ULONG_BITS] = 0UL;
+ }
+#endif
+
if (orig) {
hugepg_tbl[i].file_id = i;
hugepg_tbl[i].size = hugepage_sz;
@@ -478,6 +560,10 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
vma_len -= hugepage_sz;
}

+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode && set_mempolicy(MPOL_DEFAULT, NULL, 0) < 0)
+ RTE_LOG(ERR, EAL, "Failed to set mempolicy MPOL_DEFAULT\n");
+#endif
return i;
}

@@ -562,6 +648,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
if (hugepg_tbl[i].orig_va == va) {
hugepg_tbl[i].socket_id = socket_id;
hp_count++;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ RTE_LOG(DEBUG, EAL,
+ "Hugepage %s is on socket %d\n",
+ hugepg_tbl[i].filepath, socket_id);
+#endif
}
}
}
@@ -1000,6 +1091,11 @@ rte_eal_hugepage_init(void)

huge_register_sigbus();

+ /* make a copy of socket_mem, needed for balanced allocation. */
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ memory[i] = internal_config.socket_mem[i];
+
+
/* map all hugepages and sort them */
for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
unsigned pages_old, pages_new;
@@ -1017,7 +1113,8 @@ rte_eal_hugepage_init(void)

/* map all hugepages available */
pages_old = hpi->num_pages[0];
- pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+ pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+ memory, 1);
if (pages_new < pages_old) {
RTE_LOG(DEBUG, EAL,
"%d not %d hugepages of size %u MB allocated\n",
@@ -1060,7 +1157,7 @@ rte_eal_hugepage_init(void)
sizeof(struct hugepage_file), cmp_physaddr);

/* remap all hugepages */
- if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+ if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
hpi->num_pages[0]) {
RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
(unsigned)(hpi->hugepage_sz / 0x100000));
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b3..4fe22d1 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -186,6 +186,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
# The static libraries do not know their dependencies.
# So linking with static library requires explicit dependencies.
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrt
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lnuma
+endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lm
--
2.7.4
Ilya Maximets
2017-06-21 10:08:31 UTC
Permalink
It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.

Signed-off-by: Ilya Maximets <***@samsung.com>
---
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 1 +
config/defconfig_arm64-armv8a-linuxapp-gcc | 1 +
config/defconfig_arm64-thunderx-linuxapp-gcc | 1 +
4 files changed, 4 insertions(+)

diff --git a/config/common_linuxapp b/config/common_linuxapp
index 050526f..2e44434 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -43,6 +43,7 @@ CONFIG_RTE_KNI_KMOD=y
CONFIG_RTE_LIBRTE_KNI=y
CONFIG_RTE_LIBRTE_PMD_KNI=y
CONFIG_RTE_LIBRTE_VHOST=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y
CONFIG_RTE_LIBRTE_PMD_VHOST=y
CONFIG_RTE_LIBRTE_PMD_AF_PACKET=y
CONFIG_RTE_LIBRTE_PMD_TAP=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index e06b1d4..00bc2ab 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -49,6 +49,7 @@ CONFIG_RTE_TOOLCHAIN_GCC=y

# NUMA is not supported on ARM
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n

# ARM doesn't have support for vmware TSC map
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
diff --git a/config/defconfig_arm64-armv8a-linuxapp-gcc b/config/defconfig_arm64-armv8a-linuxapp-gcc
index 2c67cdc..d190afb 100644
--- a/config/defconfig_arm64-armv8a-linuxapp-gcc
+++ b/config/defconfig_arm64-armv8a-linuxapp-gcc
@@ -49,6 +49,7 @@ CONFIG_RTE_CACHE_LINE_SIZE=128

# Most ARMv8 systems doesn't support NUMA
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n

CONFIG_RTE_EAL_IGB_UIO=n

diff --git a/config/defconfig_arm64-thunderx-linuxapp-gcc b/config/defconfig_arm64-thunderx-linuxapp-gcc
index 3e79fa8..7b07b7d 100644
--- a/config/defconfig_arm64-thunderx-linuxapp-gcc
+++ b/config/defconfig_arm64-thunderx-linuxapp-gcc
@@ -39,6 +39,7 @@ CONFIG_RTE_MAX_LCORE=96

# ThunderX supports NUMA
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y

#
# Compile PMD for octeontx sso event device
--
2.7.4
Hemant Agrawal
2017-06-27 09:20:34 UTC
Permalink
Post by Ilya Maximets
It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.
---
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 1 +
config/defconfig_arm64-armv8a-linuxapp-gcc | 1 +
config/defconfig_arm64-thunderx-linuxapp-gcc | 1 +
4 files changed, 4 insertions(+)
diff --git a/config/common_linuxapp b/config/common_linuxapp
index 050526f..2e44434 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -43,6 +43,7 @@ CONFIG_RTE_KNI_KMOD=y
CONFIG_RTE_LIBRTE_KNI=y
CONFIG_RTE_LIBRTE_PMD_KNI=y
CONFIG_RTE_LIBRTE_VHOST=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y
CONFIG_RTE_LIBRTE_PMD_VHOST=y
CONFIG_RTE_LIBRTE_PMD_AF_PACKET=y
CONFIG_RTE_LIBRTE_PMD_TAP=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index e06b1d4..00bc2ab 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -49,6 +49,7 @@ CONFIG_RTE_TOOLCHAIN_GCC=y
# NUMA is not supported on ARM
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
# ARM doesn't have support for vmware TSC map
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
diff --git a/config/defconfig_arm64-armv8a-linuxapp-gcc b/config/defconfig_arm64-armv8a-linuxapp-gcc
index 2c67cdc..d190afb 100644
--- a/config/defconfig_arm64-armv8a-linuxapp-gcc
+++ b/config/defconfig_arm64-armv8a-linuxapp-gcc
@@ -49,6 +49,7 @@ CONFIG_RTE_CACHE_LINE_SIZE=128
# Most ARMv8 systems doesn't support NUMA
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
CONFIG_RTE_EAL_IGB_UIO=n
diff --git a/config/defconfig_arm64-thunderx-linuxapp-gcc b/config/defconfig_arm64-thunderx-linuxapp-gcc
index 3e79fa8..7b07b7d 100644
--- a/config/defconfig_arm64-thunderx-linuxapp-gcc
+++ b/config/defconfig_arm64-thunderx-linuxapp-gcc
@@ -39,6 +39,7 @@ CONFIG_RTE_MAX_LCORE=96
# ThunderX supports NUMA
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y
#
# Compile PMD for octeontx sso event device
This particular version of patch is:
Acked-by: Hemant Agrawal <***@nxp.com>
Ilya Maximets
2017-06-26 10:44:08 UTC
Permalink
So, what do you think about this version?
Is it ready for merge or some additional changes needed?

Best regards, Ilya Maximets.
Post by Ilya Maximets
* RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES --> RTE_EAL_NUMA_AWARE_HUGEPAGES
* Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
returned. Enabled by default for x86, ppc and thunderx.
* Fixed shared build. (Automated build test will fail
anyway because libnuma-devel not installed on build servers)
* Fixed work on systems without NUMA by adding check for NUMA
support in kernel.
* Implemented hybrid schema for allocation.
* Fixed not needed mempolicy change while remapping. (orig = 0)
* Added patch to enable VHOST_NUMA by default.
* rebased (fuzz in Makefile)
mem: balanced allocation of hugepages
config: enable vhost numa awareness by default
config/common_base | 1 +
config/common_linuxapp | 3 +
config/defconfig_arm-armv7a-linuxapp-gcc | 4 +
config/defconfig_arm64-armv8a-linuxapp-gcc | 4 +
config/defconfig_arm64-thunderx-linuxapp-gcc | 4 +
lib/librte_eal/linuxapp/eal/Makefile | 3 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 105 ++++++++++++++++++++++++++-
mk/rte.app.mk | 3 +
8 files changed, 123 insertions(+), 4 deletions(-)
Jerin Jacob
2017-06-26 14:07:07 UTC
Permalink
-----Original Message-----
Date: Mon, 26 Jun 2017 13:44:08 +0300
Subject: Re: [PATCH v7 0/2] Balanced allocation of hugepages
User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101
Thunderbird/45.8.0
So, what do you think about this version?
Is it ready for merge or some additional changes needed?
Looks like following comment is not addressed.
http://dpdk.org/ml/archives/dev/2017-June/068398.html
Best regards, Ilya Maximets.
Post by Ilya Maximets
* RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES --> RTE_EAL_NUMA_AWARE_HUGEPAGES
* Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
returned. Enabled by default for x86, ppc and thunderx.
* Fixed shared build. (Automated build test will fail
anyway because libnuma-devel not installed on build servers)
* Fixed work on systems without NUMA by adding check for NUMA
support in kernel.
* Implemented hybrid schema for allocation.
* Fixed not needed mempolicy change while remapping. (orig = 0)
* Added patch to enable VHOST_NUMA by default.
* rebased (fuzz in Makefile)
mem: balanced allocation of hugepages
config: enable vhost numa awareness by default
config/common_base | 1 +
config/common_linuxapp | 3 +
config/defconfig_arm-armv7a-linuxapp-gcc | 4 +
config/defconfig_arm64-armv8a-linuxapp-gcc | 4 +
config/defconfig_arm64-thunderx-linuxapp-gcc | 4 +
lib/librte_eal/linuxapp/eal/Makefile | 3 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 105 ++++++++++++++++++++++++++-
mk/rte.app.mk | 3 +
8 files changed, 123 insertions(+), 4 deletions(-)
Sergio Gonzalez Monroy
2017-06-26 15:33:47 UTC
Permalink
Post by Ilya Maximets
So, what do you think about this version?
Is it ready for merge or some additional changes needed?
I was just having another look at it and was wondering if we should
re-set the old policy instead of DEFAULT?

Also noticed that we probably should increase essential_memory by
hugepage_sz in case of SIGBUS? I think there is an issue if we have more
than one size.

Thanks,
Sergio
Post by Ilya Maximets
Best regards, Ilya Maximets.
Post by Ilya Maximets
* RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES --> RTE_EAL_NUMA_AWARE_HUGEPAGES
* Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
returned. Enabled by default for x86, ppc and thunderx.
* Fixed shared build. (Automated build test will fail
anyway because libnuma-devel not installed on build servers)
* Fixed work on systems without NUMA by adding check for NUMA
support in kernel.
* Implemented hybrid schema for allocation.
* Fixed not needed mempolicy change while remapping. (orig = 0)
* Added patch to enable VHOST_NUMA by default.
* rebased (fuzz in Makefile)
mem: balanced allocation of hugepages
config: enable vhost numa awareness by default
config/common_base | 1 +
config/common_linuxapp | 3 +
config/defconfig_arm-armv7a-linuxapp-gcc | 4 +
config/defconfig_arm64-armv8a-linuxapp-gcc | 4 +
config/defconfig_arm64-thunderx-linuxapp-gcc | 4 +
lib/librte_eal/linuxapp/eal/Makefile | 3 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 105 ++++++++++++++++++++++++++-
mk/rte.app.mk | 3 +
8 files changed, 123 insertions(+), 4 deletions(-)
Ilya Maximets
2017-06-27 08:42:36 UTC
Permalink
Post by Ilya Maximets
So, what do you think about this version?
Is it ready for merge or some additional changes needed?
I was just having another look at it and was wondering if we should re-set the old policy instead of DEFAULT?
Yes. I tried to do that previously, but it requires some manipulations
get maximum nodemask size supported by kernel. So, I've implemented
this behaviour with help of libnuma which makes a lot of checks while
library initialisation (constructor). I'll send v8 with that soon.
Also noticed that we probably should increase essential_memory by hugepage_sz in
case of SIGBUS? I think there is an issue if we have more than one size.
Good catch. Also fixed in v8. Additionally I found that we need to restore
old mempolicy in case of any error. So I replaced all the 'return i' to
the out to proper termination point.
Thanks,
Sergio
Post by Ilya Maximets
Best regards, Ilya Maximets.
Post by Ilya Maximets
* RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES --> RTE_EAL_NUMA_AWARE_HUGEPAGES
* Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
returned. Enabled by default for x86, ppc and thunderx.
* Fixed shared build. (Automated build test will fail
anyway because libnuma-devel not installed on build servers)
* Fixed work on systems without NUMA by adding check for NUMA
support in kernel.
* Implemented hybrid schema for allocation.
* Fixed not needed mempolicy change while remapping. (orig = 0)
* Added patch to enable VHOST_NUMA by default.
* rebased (fuzz in Makefile)
mem: balanced allocation of hugepages
config: enable vhost numa awareness by default
config/common_base | 1 +
config/common_linuxapp | 3 +
config/defconfig_arm-armv7a-linuxapp-gcc | 4 +
config/defconfig_arm64-armv8a-linuxapp-gcc | 4 +
config/defconfig_arm64-thunderx-linuxapp-gcc | 4 +
lib/librte_eal/linuxapp/eal/Makefile | 3 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 105 ++++++++++++++++++++++++++-
mk/rte.app.mk | 3 +
8 files changed, 123 insertions(+), 4 deletions(-)
Ilya Maximets
2017-06-27 08:46:24 UTC
Permalink
Version 8:
* helper functions from libnuma used to set mempolicy and
work with cpu mask.
* Function now restores previous mempolicy instead of MPOL_DEFAULT.
* Fixed essential_memory on SIGBUS.
* Fixed restoring of mempolicy in case of errors (goto out).
* Enabled by default for all linuxapp except armv7 and dpaa2.

Version 7:
* RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES --> RTE_EAL_NUMA_AWARE_HUGEPAGES

Version 6:
* Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
returned. Enabled by default for x86, ppc and thunderx.

Version 5:
* Fixed shared build. (Automated build test will fail
anyway because libnuma-devel not installed on build servers)

Version 4:
* Fixed work on systems without NUMA by adding check for NUMA
support in kernel.

Version 3:
* Implemented hybrid schema for allocation.
* Fixed not needed mempolicy change while remapping. (orig = 0)
* Added patch to enable VHOST_NUMA by default.

Version 2:
* rebased (fuzz in Makefile)

Ilya Maximets (2):
mem: balanced allocation of hugepages
config: enable vhost numa awareness by default

config/common_base | 1 +
config/common_linuxapp | 2 +
config/defconfig_arm-armv7a-linuxapp-gcc | 4 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 4 +
lib/librte_eal/linuxapp/eal/Makefile | 3 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 117 ++++++++++++++++++++++++++++--
mk/rte.app.mk | 3 +
7 files changed, 126 insertions(+), 8 deletions(-)
--
2.7.4
Ilya Maximets
2017-06-27 08:46:25 UTC
Permalink
Currently EAL allocates hugepages one by one not paying attention
from which NUMA node allocation was done.

Such behaviour leads to allocation failure if number of available
hugepages for application limited by cgroups or hugetlbfs and
memory requested not only from the first socket.

Example:
# 90 x 1GB hugepages availavle in a system

cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...

EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
PANIC in rte_eal_init():
Cannot init memory

This happens beacause all allocated pages are
on socket 0.

Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
to one of requested nodes using following schema:

1) Allocate essential hugepages:
1.1) Allocate as many hugepages from numa N to
only fit requested memory for this numa.
1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
fashion.
3) Sort pages and choose the most suitable.

In this case all essential memory will be allocated and all remaining
pages will be fairly distributed between all requested nodes.

New config option RTE_EAL_NUMA_AWARE_HUGEPAGES introduced and
enabled by default for linuxapp except armv7 and dpaa2.
Enabling of this option adds libnuma as a dependency for EAL.

Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")

Signed-off-by: Ilya Maximets <***@samsung.com>
---
config/common_base | 1 +
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 3 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 3 +
lib/librte_eal/linuxapp/eal/Makefile | 3 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 117 ++++++++++++++++++++++++++++--
mk/rte.app.mk | 3 +
7 files changed, 123 insertions(+), 8 deletions(-)

diff --git a/config/common_base b/config/common_base
index f6aafd1..660588a 100644
--- a/config/common_base
+++ b/config/common_base
@@ -103,6 +103,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=n
CONFIG_RTE_MALLOC_DEBUG=n
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n

#
# Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
diff --git a/config/common_linuxapp b/config/common_linuxapp
index b3cf41b..64bef87 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -35,6 +35,7 @@
CONFIG_RTE_EXEC_ENV="linuxapp"
CONFIG_RTE_EXEC_ENV_LINUXAPP=y

+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
CONFIG_RTE_EAL_IGB_UIO=y
CONFIG_RTE_EAL_VFIO=y
CONFIG_RTE_KNI_KMOD=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index 19607eb..e06b1d4 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -47,6 +47,9 @@ CONFIG_RTE_ARCH_STRICT_ALIGN=y
CONFIG_RTE_TOOLCHAIN="gcc"
CONFIG_RTE_TOOLCHAIN_GCC=y

+# NUMA is not supported on ARM
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
# ARM doesn't have support for vmware TSC map
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n

diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index 2304ab6..f78449d 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -45,6 +45,9 @@ CONFIG_RTE_CACHE_LINE_SIZE=64

CONFIG_RTE_PKTMBUF_HEADROOM=256

+# Doesn't support NUMA
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+
#
# Compile Support Libraries for DPAA2
#
diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index 640afd0..8651e27 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -50,6 +50,9 @@ LDLIBS += -ldl
LDLIBS += -lpthread
LDLIBS += -lgcc_s
LDLIBS += -lrt
+ifeq ($(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+LDLIBS += -lnuma
+endif

# specific to linuxapp exec-env
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index e17c9cb..6d2b199 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -54,6 +54,10 @@
#include <sys/time.h>
#include <signal.h>
#include <setjmp.h>
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numa.h>
+#include <numaif.h>
+#endif

#include <rte_log.h>
#include <rte_memory.h>
@@ -348,6 +352,14 @@ static int huge_wrap_sigsetjmp(void)
return sigsetjmp(huge_jmpenv, 1);
}

+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+/* Callback for numa library. */
+void numa_error(char *where)
+{
+ RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
+}
+#endif
+
/*
* Mmap all hugepages of hugepage table: it first open a file in
* hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -356,18 +368,77 @@ static int huge_wrap_sigsetjmp(void)
* map continguous physical blocks in contiguous virtual blocks.
*/
static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
- struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+ uint64_t *essential_memory __rte_unused, int orig)
{
int fd;
unsigned i;
void *virtaddr;
void *vma_addr = NULL;
size_t vma_len = 0;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ int node_id = -1;
+ int essential_prev = 0;
+ int oldpolicy;
+ struct bitmask *oldmask = numa_allocate_nodemask();
+ bool have_numa = true;
+ unsigned long maxnode = 0;
+
+ /* Check if kernel supports NUMA. */
+ if (numa_available() != 0) {
+ RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+ have_numa = false;
+ }
+
+ if (orig && have_numa) {
+ RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
+ if (get_mempolicy(&oldpolicy,
+ oldmask->maskp, oldmask->size + 1, 0, 0) < 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to get current mempolicy: %s. "
+ "Assuming MPOL_DEFAULT.\n", strerror(errno));
+ oldpolicy = MPOL_DEFAULT;
+ }
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ if (internal_config.socket_mem[i])
+ maxnode = i + 1;
+ }
+#endif

for (i = 0; i < hpi->num_pages[0]; i++) {
uint64_t hugepage_sz = hpi->hugepage_sz;

+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ unsigned int j;
+
+ for (j = 0; j < maxnode; j++)
+ if (essential_memory[j])
+ break;
+
+ if (j == maxnode) {
+ node_id = (node_id + 1) % maxnode;
+ while (!internal_config.socket_mem[node_id]) {
+ node_id++;
+ node_id %= maxnode;
+ }
+ } else {
+ node_id = j;
+ essential_prev = essential_memory[j];
+
+ if (essential_memory[j] < hugepage_sz)
+ essential_memory[j] = 0;
+ else
+ essential_memory[j] -= hugepage_sz;
+ }
+
+ RTE_LOG(DEBUG, EAL,
+ "Setting policy MPOL_PREFERRED for socket %d\n",
+ node_id);
+ numa_set_preferred(node_id);
+ }
+#endif
+
if (orig) {
hugepg_tbl[i].file_id = i;
hugepg_tbl[i].size = hugepage_sz;
@@ -422,7 +493,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
if (fd < 0) {
RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
strerror(errno));
- return i;
+ goto out;
}

/* map the segment, and populate page tables,
@@ -433,7 +504,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
strerror(errno));
close(fd);
- return i;
+ goto out;
}

if (orig) {
@@ -458,7 +529,10 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
munmap(virtaddr, hugepage_sz);
close(fd);
unlink(hugepg_tbl[i].filepath);
- return i;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ essential_memory[node_id] = essential_prev;
+#endif
+ goto out;
}
*(int *)virtaddr = 0;
}
@@ -469,7 +543,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
__func__, strerror(errno));
close(fd);
- return i;
+ goto out;
}

close(fd);
@@ -478,6 +552,22 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
vma_len -= hugepage_sz;
}

+out:
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ RTE_LOG(DEBUG, EAL,
+ "Restoring previous memory policy: %d\n", oldpolicy);
+ if (oldpolicy == MPOL_DEFAULT) {
+ numa_set_localalloc();
+ } else if (set_mempolicy(oldpolicy, oldmask->maskp,
+ oldmask->size + 1) < 0) {
+ RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
+ strerror(errno));
+ numa_set_localalloc();
+ }
+ }
+ numa_free_cpumask(oldmask);
+#endif
return i;
}

@@ -562,6 +652,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
if (hugepg_tbl[i].orig_va == va) {
hugepg_tbl[i].socket_id = socket_id;
hp_count++;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ RTE_LOG(DEBUG, EAL,
+ "Hugepage %s is on socket %d\n",
+ hugepg_tbl[i].filepath, socket_id);
+#endif
}
}
}
@@ -1000,6 +1095,11 @@ rte_eal_hugepage_init(void)

huge_register_sigbus();

+ /* make a copy of socket_mem, needed for balanced allocation. */
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ memory[i] = internal_config.socket_mem[i];
+
+
/* map all hugepages and sort them */
for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
unsigned pages_old, pages_new;
@@ -1017,7 +1117,8 @@ rte_eal_hugepage_init(void)

/* map all hugepages available */
pages_old = hpi->num_pages[0];
- pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+ pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+ memory, 1);
if (pages_new < pages_old) {
RTE_LOG(DEBUG, EAL,
"%d not %d hugepages of size %u MB allocated\n",
@@ -1060,7 +1161,7 @@ rte_eal_hugepage_init(void)
sizeof(struct hugepage_file), cmp_physaddr);

/* remap all hugepages */
- if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+ if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
hpi->num_pages[0]) {
RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
(unsigned)(hpi->hugepage_sz / 0x100000));
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b3..4fe22d1 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -186,6 +186,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
# The static libraries do not know their dependencies.
# So linking with static library requires explicit dependencies.
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrt
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lnuma
+endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lm
--
2.7.4
Ilya Maximets
2017-06-27 08:46:26 UTC
Permalink
It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.

Signed-off-by: Ilya Maximets <***@samsung.com>
---
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 1 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
3 files changed, 3 insertions(+)

diff --git a/config/common_linuxapp b/config/common_linuxapp
index 64bef87..74c7d64 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -42,6 +42,7 @@ CONFIG_RTE_KNI_KMOD=y
CONFIG_RTE_LIBRTE_KNI=y
CONFIG_RTE_LIBRTE_PMD_KNI=y
CONFIG_RTE_LIBRTE_VHOST=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y
CONFIG_RTE_LIBRTE_PMD_VHOST=y
CONFIG_RTE_LIBRTE_PMD_AF_PACKET=y
CONFIG_RTE_LIBRTE_PMD_TAP=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index e06b1d4..00bc2ab 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -49,6 +49,7 @@ CONFIG_RTE_TOOLCHAIN_GCC=y

# NUMA is not supported on ARM
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n

# ARM doesn't have support for vmware TSC map
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index f78449d..b061fb0 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256

# Doesn't support NUMA
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n

#
# Compile Support Libraries for DPAA2
--
2.7.4
Hemant Agrawal
2017-06-27 09:18:27 UTC
Permalink
Post by Ilya Maximets
It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.
---
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 1 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
3 files changed, 3 insertions(+)
diff --git a/config/common_linuxapp b/config/common_linuxapp
index 64bef87..74c7d64 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -42,6 +42,7 @@ CONFIG_RTE_KNI_KMOD=y
CONFIG_RTE_LIBRTE_KNI=y
CONFIG_RTE_LIBRTE_PMD_KNI=y
CONFIG_RTE_LIBRTE_VHOST=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y
CONFIG_RTE_LIBRTE_PMD_VHOST=y
CONFIG_RTE_LIBRTE_PMD_AF_PACKET=y
CONFIG_RTE_LIBRTE_PMD_TAP=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index e06b1d4..00bc2ab 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -49,6 +49,7 @@ CONFIG_RTE_TOOLCHAIN_GCC=y
# NUMA is not supported on ARM
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
# ARM doesn't have support for vmware TSC map
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index f78449d..b061fb0 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256
# Doesn't support NUMA
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
#
# Compile Support Libraries for DPAA2
-1
It should also be disabled for generic ARM64. This patch is breaking
generic arm64 config tests on our platforms and creating a unnecessary
dependency.
Thomas Monjalon
2017-06-27 09:21:50 UTC
Permalink
Post by Hemant Agrawal
Post by Ilya Maximets
It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.
---
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 1 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
3 files changed, 3 insertions(+)
[...]
Post by Hemant Agrawal
Post by Ilya Maximets
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256
# Doesn't support NUMA
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
#
# Compile Support Libraries for DPAA2
-1
It should also be disabled for generic ARM64. This patch is breaking
generic arm64 config tests on our platforms and creating a unnecessary
dependency.
What do you mean? Which ARM64 platform is it breaking?
We can specifically disable it on more platforms.
Hemant Agrawal
2017-06-27 09:41:07 UTC
Permalink
Post by Thomas Monjalon
Post by Hemant Agrawal
Post by Ilya Maximets
It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.
---
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 1 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
3 files changed, 3 insertions(+)
[...]
Post by Hemant Agrawal
Post by Ilya Maximets
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256
# Doesn't support NUMA
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
#
# Compile Support Libraries for DPAA2
-1
It should also be disabled for generic ARM64. This patch is breaking
generic arm64 config tests on our platforms and creating a unnecessary
dependency.
What do you mean? Which ARM64 platform is it breaking?
We can specifically disable it on more platforms.
Unlike x86, ARM only represent a core architecture.
Different platforms can integrate these cores differently in their SoCs.
The stock ARM v8 cores do not provide support for NUMA in my knowledge.
Some vendors have modified ARM cores (e.g. Cavium) to support NUMA
architecture. However that is not a common phenomena.
NUMA config should not be default for generic ARM config. It should be
enabled only for architecture supporting it.

So, *arm64-armv8a-linuxapp-gcc* config is being used by several vendors
include NXP. e.g. We use this config on several of our low end systems
(non-dpaa). Also, we use it when running in VM with virtio interfaces on
all of our different platforms (non-dpaa, dpaa1, dpaa2 etc).
Thomas Monjalon
2017-06-27 09:59:11 UTC
Permalink
Post by Hemant Agrawal
Post by Thomas Monjalon
Post by Hemant Agrawal
Post by Ilya Maximets
It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.
---
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 1 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
3 files changed, 3 insertions(+)
[...]
Post by Hemant Agrawal
Post by Ilya Maximets
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256
# Doesn't support NUMA
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
#
# Compile Support Libraries for DPAA2
-1
It should also be disabled for generic ARM64. This patch is breaking
generic arm64 config tests on our platforms and creating a unnecessary
dependency.
What do you mean? Which ARM64 platform is it breaking?
We can specifically disable it on more platforms.
Unlike x86, ARM only represent a core architecture.
Different platforms can integrate these cores differently in their SoCs.
The stock ARM v8 cores do not provide support for NUMA in my knowledge.
Some vendors have modified ARM cores (e.g. Cavium) to support NUMA
architecture. However that is not a common phenomena.
NUMA config should not be default for generic ARM config. It should be
enabled only for architecture supporting it.
So, *arm64-armv8a-linuxapp-gcc* config is being used by several vendors
include NXP. e.g. We use this config on several of our low end systems
(non-dpaa). Also, we use it when running in VM with virtio interfaces on
all of our different platforms (non-dpaa, dpaa1, dpaa2 etc).
We need more opinions from ARM and Cavium.

The general idea in DPDK config is to enable as much feature as we can.
It conflicts with the general availability of NUMA on ARMv8.
Jerin Jacob
2017-06-27 09:59:46 UTC
Permalink
-----Original Message-----
Date: Tue, 27 Jun 2017 15:11:07 +0530
Subject: Re: [PATCH v8 2/2] config: enable vhost numa awareness by default
User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101
Thunderbird/45.8.0
Post by Thomas Monjalon
Post by Hemant Agrawal
Post by Ilya Maximets
It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.
---
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 1 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
3 files changed, 3 insertions(+)
[...]
Post by Hemant Agrawal
Post by Ilya Maximets
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256
# Doesn't support NUMA
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
#
# Compile Support Libraries for DPAA2
-1
It should also be disabled for generic ARM64. This patch is breaking
generic arm64 config tests on our platforms and creating a unnecessary
dependency.
What do you mean? Which ARM64 platform is it breaking?
We can specifically disable it on more platforms.
Unlike x86, ARM only represent a core architecture.
Different platforms can integrate these cores differently in their SoCs.
The stock ARM v8 cores do not provide support for NUMA in my knowledge.
A72 is just _an_ implementation of armv8. Not ARMv8 specification
itself. By specification it is NUMA capable and there are NUMA
implementation too.
Some vendors have modified ARM cores (e.g. Cavium) to support NUMA
architecture. However that is not a common phenomena.
NUMA config should not be default for generic ARM config. It should be
enabled only for architecture supporting it.
It just an build time dependency. Right? If you feed the libnuma package,
it will NON NUMA as well. Right? ARM64 libnuma package is already
available for major distributions.

My point is, I don't want to make arm64 generic config an exceptional case,
If DPDK common config creates libnuma dependency then there is no reason
for arm64 not have it. It is same for x86 and powerpc, non numa systems
too. Right?
So, *arm64-armv8a-linuxapp-gcc* config is being used by several vendors
include NXP. e.g. We use this config on several of our low end systems
(non-dpaa). Also, we use it when running in VM with virtio interfaces on all
of our different platforms (non-dpaa, dpaa1, dpaa2 etc).
On the same note, arm64-armv8a-linuxapp-gcc used by other vendors for Server machines
with NUMA and if want to keep creating new targets there is no end to it.

How hard is to install libnuma on VM? There is already package for it.
Hemant Agrawal
2017-06-27 12:17:44 UTC
Permalink
-----Original Message-----
Date: Tue, 27 Jun 2017 15:11:07 +0530
Subject: Re: [PATCH v8 2/2] config: enable vhost numa awareness by default
User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101
Thunderbird/45.8.0
Post by Thomas Monjalon
Post by Hemant Agrawal
Post by Ilya Maximets
It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.
---
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 1 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
3 files changed, 3 insertions(+)
[...]
Post by Hemant Agrawal
Post by Ilya Maximets
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256
# Doesn't support NUMA
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
#
# Compile Support Libraries for DPAA2
-1
It should also be disabled for generic ARM64. This patch is breaking
generic arm64 config tests on our platforms and creating a unnecessary
dependency.
What do you mean? Which ARM64 platform is it breaking?
We can specifically disable it on more platforms.
Unlike x86, ARM only represent a core architecture.
Different platforms can integrate these cores differently in their SoCs.
The stock ARM v8 cores do not provide support for NUMA in my knowledge.
A72 is just _an_ implementation of armv8. Not ARMv8 specification
itself. By specification it is NUMA capable and there are NUMA
implementation too.
Some vendors have modified ARM cores (e.g. Cavium) to support NUMA
architecture. However that is not a common phenomena.
NUMA config should not be default for generic ARM config. It should be
enabled only for architecture supporting it.
It just an build time dependency. Right? If you feed the libnuma package,
it will NON NUMA as well. Right? ARM64 libnuma package is already
available for major distributions.
yes, libnuma will work for non-NUMA.
My point is, I don't want to make arm64 generic config an exceptional case,
If DPDK common config creates libnuma dependency then there is no reason
for arm64 not have it. It is same for x86 and powerpc, non numa systems
too. Right?
x86 and powerpc configs are single vendor based.
Common should be common and generic.

Why to create a unnecessary dependency, when we know that the support is
not uniform? It adds difficulties e.g. For the ARM cross compilation,
will also have to cross compile libnuma-dev. Makefile will need a path
for specifying the lib and include paths for libnuma and numa.h.
So, *arm64-armv8a-linuxapp-gcc* config is being used by several vendors
include NXP. e.g. We use this config on several of our low end systems
(non-dpaa). Also, we use it when running in VM with virtio interfaces on all
of our different platforms (non-dpaa, dpaa1, dpaa2 etc).
On the same note, arm64-armv8a-linuxapp-gcc used by other vendors for Server machines
with NUMA and if want to keep creating new targets there is no end to it.
How hard is to install libnuma on VM? There is already package for it.
Jerin Jacob
2017-06-27 12:45:57 UTC
Permalink
-----Original Message-----
Date: Tue, 27 Jun 2017 17:47:44 +0530
Subject: Re: [PATCH v8 2/2] config: enable vhost numa awareness by default
User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101
Thunderbird/45.8.0
-----Original Message-----
Date: Tue, 27 Jun 2017 15:11:07 +0530
Subject: Re: [PATCH v8 2/2] config: enable vhost numa awareness by default
User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101
Thunderbird/45.8.0
Post by Thomas Monjalon
Post by Hemant Agrawal
Post by Ilya Maximets
It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.
---
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 1 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
3 files changed, 3 insertions(+)
[...]
Post by Hemant Agrawal
Post by Ilya Maximets
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256
# Doesn't support NUMA
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
#
# Compile Support Libraries for DPAA2
-1
It should also be disabled for generic ARM64. This patch is breaking
generic arm64 config tests on our platforms and creating a unnecessary
dependency.
What do you mean? Which ARM64 platform is it breaking?
We can specifically disable it on more platforms.
Unlike x86, ARM only represent a core architecture.
Different platforms can integrate these cores differently in their SoCs.
The stock ARM v8 cores do not provide support for NUMA in my knowledge.
A72 is just _an_ implementation of armv8. Not ARMv8 specification
itself. By specification it is NUMA capable and there are NUMA
implementation too.
Some vendors have modified ARM cores (e.g. Cavium) to support NUMA
architecture. However that is not a common phenomena.
NUMA config should not be default for generic ARM config. It should be
enabled only for architecture supporting it.
It just an build time dependency. Right? If you feed the libnuma package,
it will NON NUMA as well. Right? ARM64 libnuma package is already
available for major distributions.
yes, libnuma will work for non-NUMA.
My point is, I don't want to make arm64 generic config an exceptional case,
If DPDK common config creates libnuma dependency then there is no reason
for arm64 not have it. It is same for x86 and powerpc, non numa systems
too. Right?
x86 and powerpc configs are single vendor based.
Common should be common and generic.
Yes. What I understand by common is that it should work on functionality on _all_ the
armv8 targets. If you don't include NUMA then it will have functionality issue
with NUMA targets.

The ARM64 Linux kernel took the similar approach. The default config has all
options and NUMA is _enabled_ even it is not supported on A72.

http://elixir.free-electrons.com/linux/latest/source/arch/arm64/configs/defconfig#L77
Why to create a unnecessary dependency, when we know that the support is not
uniform? It adds difficulties e.g. For the ARM cross compilation, will also
have to cross compile libnuma-dev. Makefile will need a path for specifying
the lib and include paths for libnuma and numa.h.
Yes. I agree. Cross compilation needs additional step. On the other
hand, If we don't include NUMA in common config, We need to add new targets on
all new SoCs(like thunderx2). IMO, In order to reduce the config, I think,
this is the better way.(and it is not hard to disable NUMA for cross
compilation mode if not interested)
So, *arm64-armv8a-linuxapp-gcc* config is being used by several vendors
include NXP. e.g. We use this config on several of our low end systems
(non-dpaa). Also, we use it when running in VM with virtio interfaces on all
of our different platforms (non-dpaa, dpaa1, dpaa2 etc).
On the same note, arm64-armv8a-linuxapp-gcc used by other vendors for Server machines
with NUMA and if want to keep creating new targets there is no end to it.
How hard is to install libnuma on VM? There is already package for it.
Hemant Agrawal
2017-06-27 13:00:40 UTC
Permalink
-----Original Message-----
Date: Tue, 27 Jun 2017 17:47:44 +0530
Subject: Re: [PATCH v8 2/2] config: enable vhost numa awareness by default
User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101
Thunderbird/45.8.0
-----Original Message-----
Date: Tue, 27 Jun 2017 15:11:07 +0530
Subject: Re: [PATCH v8 2/2] config: enable vhost numa awareness by default
User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101
Thunderbird/45.8.0
Post by Thomas Monjalon
Post by Hemant Agrawal
Post by Ilya Maximets
It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.
---
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 1 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
3 files changed, 3 insertions(+)
[...]
Post by Hemant Agrawal
Post by Ilya Maximets
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256
# Doesn't support NUMA
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
#
# Compile Support Libraries for DPAA2
-1
It should also be disabled for generic ARM64. This patch is breaking
generic arm64 config tests on our platforms and creating a unnecessary
dependency.
What do you mean? Which ARM64 platform is it breaking?
We can specifically disable it on more platforms.
Unlike x86, ARM only represent a core architecture.
Different platforms can integrate these cores differently in their SoCs.
The stock ARM v8 cores do not provide support for NUMA in my knowledge.
A72 is just _an_ implementation of armv8. Not ARMv8 specification
itself. By specification it is NUMA capable and there are NUMA
implementation too.
Some vendors have modified ARM cores (e.g. Cavium) to support NUMA
architecture. However that is not a common phenomena.
NUMA config should not be default for generic ARM config. It should be
enabled only for architecture supporting it.
It just an build time dependency. Right? If you feed the libnuma package,
it will NON NUMA as well. Right? ARM64 libnuma package is already
available for major distributions.
yes, libnuma will work for non-NUMA.
My point is, I don't want to make arm64 generic config an exceptional case,
If DPDK common config creates libnuma dependency then there is no reason
for arm64 not have it. It is same for x86 and powerpc, non numa systems
too. Right?
x86 and powerpc configs are single vendor based.
Common should be common and generic.
Yes. What I understand by common is that it should work on functionality on _all_ the
armv8 targets. If you don't include NUMA then it will have functionality issue
with NUMA targets.
The ARM64 Linux kernel took the similar approach. The default config has all
options and NUMA is _enabled_ even it is not supported on A72.
http://elixir.free-electrons.com/linux/latest/source/arch/arm64/configs/defconfig#L77
Ok! Not able to think of any other issue for now.
Why to create a unnecessary dependency, when we know that the support is not
uniform? It adds difficulties e.g. For the ARM cross compilation, will also
have to cross compile libnuma-dev. Makefile will need a path for specifying
the lib and include paths for libnuma and numa.h.
Yes. I agree. Cross compilation needs additional step. On the other
hand, If we don't include NUMA in common config, We need to add new targets on
all new SoCs(like thunderx2). IMO, In order to reduce the config, I think,
this is the better way.(and it is not hard to disable NUMA for cross
compilation mode if not interested)
So, *arm64-armv8a-linuxapp-gcc* config is being used by several vendors
include NXP. e.g. We use this config on several of our low end systems
(non-dpaa). Also, we use it when running in VM with virtio interfaces on all
of our different platforms (non-dpaa, dpaa1, dpaa2 etc).
On the same note, arm64-armv8a-linuxapp-gcc used by other vendors for Server machines
with NUMA and if want to keep creating new targets there is no end to it.
How hard is to install libnuma on VM? There is already package for it.
Thomas Monjalon
2017-06-27 09:19:44 UTC
Permalink
Post by Ilya Maximets
It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.
---
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 1 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
3 files changed, 3 insertions(+)
I forgot to ask you to update devtools/test-build.sh.
DPDK_DEP_NUMA can be removed.
Ilya Maximets
2017-06-27 10:26:13 UTC
Permalink
Post by Thomas Monjalon
Post by Ilya Maximets
It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.
---
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 1 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
3 files changed, 3 insertions(+)
I forgot to ask you to update devtools/test-build.sh.
DPDK_DEP_NUMA can be removed.
Ok. Fixed in v9.
Ilya Maximets
2017-06-27 10:24:38 UTC
Permalink
Version 9:
* Removed DPDK_DEP_NUMA from test-build.sh . Not needed
anymore.
* Fixed out of bound write to essential_memory in case
where socket-mem not specified and SIGBUS occured.

Version 8:
* helper functions from libnuma used to set mempolicy and
work with cpu mask.
* Function now restores previous mempolicy instead of MPOL_DEFAULT.
* Fixed essential_memory on SIGBUS.
* Fixed restoring of mempolicy in case of errors (goto out).
* Enabled by default for all linuxapp except armv7 and dpaa2.

Version 7:
* RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES --> RTE_EAL_NUMA_AWARE_HUGEPAGES

Version 6:
* Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
returned. Enabled by default for x86, ppc and thunderx.

Version 5:
* Fixed shared build. (Automated build test will fail
anyway because libnuma-devel not installed on build servers)

Version 4:
* Fixed work on systems without NUMA by adding check for NUMA
support in kernel.

Version 3:
* Implemented hybrid schema for allocation.
* Fixed not needed mempolicy change while remapping. (orig = 0)
* Added patch to enable VHOST_NUMA by default.

Version 2:
* rebased (fuzz in Makefile)

Ilya Maximets (2):
mem: balanced allocation of hugepages
config: enable vhost numa awareness by default

config/common_base | 1 +
config/common_linuxapp | 2 +
config/defconfig_arm-armv7a-linuxapp-gcc | 4 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 4 +
devtools/test-build.sh | 4 -
lib/librte_eal/linuxapp/eal/Makefile | 3 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 120 ++++++++++++++++++++++++++++--
mk/rte.app.mk | 3 +
8 files changed, 129 insertions(+), 12 deletions(-)
--
2.7.4
Ilya Maximets
2017-06-27 10:24:39 UTC
Permalink
Currently EAL allocates hugepages one by one not paying attention
from which NUMA node allocation was done.

Such behaviour leads to allocation failure if number of available
hugepages for application limited by cgroups or hugetlbfs and
memory requested not only from the first socket.

Example:
# 90 x 1GB hugepages availavle in a system

cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...

EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
PANIC in rte_eal_init():
Cannot init memory

This happens beacause all allocated pages are
on socket 0.

Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
to one of requested nodes using following schema:

1) Allocate essential hugepages:
1.1) Allocate as many hugepages from numa N to
only fit requested memory for this numa.
1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
fashion.
3) Sort pages and choose the most suitable.

In this case all essential memory will be allocated and all remaining
pages will be fairly distributed between all requested nodes.

New config option RTE_EAL_NUMA_AWARE_HUGEPAGES introduced and
enabled by default for linuxapp except armv7 and dpaa2.
Enabling of this option adds libnuma as a dependency for EAL.

Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")

Signed-off-by: Ilya Maximets <***@samsung.com>
---
config/common_base | 1 +
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 3 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 3 +
lib/librte_eal/linuxapp/eal/Makefile | 3 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 120 ++++++++++++++++++++++++++++--
mk/rte.app.mk | 3 +
7 files changed, 126 insertions(+), 8 deletions(-)

diff --git a/config/common_base b/config/common_base
index f6aafd1..660588a 100644
--- a/config/common_base
+++ b/config/common_base
@@ -103,6 +103,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=n
CONFIG_RTE_MALLOC_DEBUG=n
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n

#
# Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
diff --git a/config/common_linuxapp b/config/common_linuxapp
index b3cf41b..64bef87 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -35,6 +35,7 @@
CONFIG_RTE_EXEC_ENV="linuxapp"
CONFIG_RTE_EXEC_ENV_LINUXAPP=y

+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
CONFIG_RTE_EAL_IGB_UIO=y
CONFIG_RTE_EAL_VFIO=y
CONFIG_RTE_KNI_KMOD=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index 19607eb..e06b1d4 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -47,6 +47,9 @@ CONFIG_RTE_ARCH_STRICT_ALIGN=y
CONFIG_RTE_TOOLCHAIN="gcc"
CONFIG_RTE_TOOLCHAIN_GCC=y

+# NUMA is not supported on ARM
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
# ARM doesn't have support for vmware TSC map
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n

diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index 2304ab6..f78449d 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -45,6 +45,9 @@ CONFIG_RTE_CACHE_LINE_SIZE=64

CONFIG_RTE_PKTMBUF_HEADROOM=256

+# Doesn't support NUMA
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+
#
# Compile Support Libraries for DPAA2
#
diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index 640afd0..8651e27 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -50,6 +50,9 @@ LDLIBS += -ldl
LDLIBS += -lpthread
LDLIBS += -lgcc_s
LDLIBS += -lrt
+ifeq ($(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+LDLIBS += -lnuma
+endif

# specific to linuxapp exec-env
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index e17c9cb..647d89c 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -54,6 +54,10 @@
#include <sys/time.h>
#include <signal.h>
#include <setjmp.h>
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numa.h>
+#include <numaif.h>
+#endif

#include <rte_log.h>
#include <rte_memory.h>
@@ -348,6 +352,14 @@ static int huge_wrap_sigsetjmp(void)
return sigsetjmp(huge_jmpenv, 1);
}

+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+/* Callback for numa library. */
+void numa_error(char *where)
+{
+ RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
+}
+#endif
+
/*
* Mmap all hugepages of hugepage table: it first open a file in
* hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -356,18 +368,78 @@ static int huge_wrap_sigsetjmp(void)
* map continguous physical blocks in contiguous virtual blocks.
*/
static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
- struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+ uint64_t *essential_memory __rte_unused, int orig)
{
int fd;
unsigned i;
void *virtaddr;
void *vma_addr = NULL;
size_t vma_len = 0;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ int node_id = -1;
+ int essential_prev = 0;
+ int oldpolicy;
+ struct bitmask *oldmask = numa_allocate_nodemask();
+ bool have_numa = true;
+ unsigned long maxnode = 0;
+
+ /* Check if kernel supports NUMA. */
+ if (numa_available() != 0) {
+ RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+ have_numa = false;
+ }
+
+ if (orig && have_numa) {
+ RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
+ if (get_mempolicy(&oldpolicy, oldmask->maskp,
+ oldmask->size + 1, 0, 0) < 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to get current mempolicy: %s. "
+ "Assuming MPOL_DEFAULT.\n", strerror(errno));
+ oldpolicy = MPOL_DEFAULT;
+ }
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ if (internal_config.socket_mem[i])
+ maxnode = i + 1;
+ }
+#endif

for (i = 0; i < hpi->num_pages[0]; i++) {
uint64_t hugepage_sz = hpi->hugepage_sz;

+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ unsigned int j;
+
+ for (j = 0; j < maxnode; j++)
+ if (essential_memory[j])
+ break;
+
+ if (j == maxnode) {
+ node_id = (node_id + 1) % maxnode;
+ while (!internal_config.socket_mem[node_id]) {
+ node_id++;
+ node_id %= maxnode;
+ }
+ essential_prev = 0;
+ } else {
+ node_id = j;
+ essential_prev = essential_memory[j];
+
+ if (essential_memory[j] < hugepage_sz)
+ essential_memory[j] = 0;
+ else
+ essential_memory[j] -= hugepage_sz;
+ }
+
+ RTE_LOG(DEBUG, EAL,
+ "Setting policy MPOL_PREFERRED for socket %d\n",
+ node_id);
+ numa_set_preferred(node_id);
+ }
+#endif
+
if (orig) {
hugepg_tbl[i].file_id = i;
hugepg_tbl[i].size = hugepage_sz;
@@ -422,7 +494,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
if (fd < 0) {
RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
strerror(errno));
- return i;
+ goto out;
}

/* map the segment, and populate page tables,
@@ -433,7 +505,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
strerror(errno));
close(fd);
- return i;
+ goto out;
}

if (orig) {
@@ -458,7 +530,12 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
munmap(virtaddr, hugepage_sz);
close(fd);
unlink(hugepg_tbl[i].filepath);
- return i;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode)
+ essential_memory[node_id] =
+ essential_prev;
+#endif
+ goto out;
}
*(int *)virtaddr = 0;
}
@@ -469,7 +546,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
__func__, strerror(errno));
close(fd);
- return i;
+ goto out;
}

close(fd);
@@ -478,6 +555,22 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
vma_len -= hugepage_sz;
}

+out:
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ RTE_LOG(DEBUG, EAL,
+ "Restoring previous memory policy: %d\n", oldpolicy);
+ if (oldpolicy == MPOL_DEFAULT) {
+ numa_set_localalloc();
+ } else if (set_mempolicy(oldpolicy, oldmask->maskp,
+ oldmask->size + 1) < 0) {
+ RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
+ strerror(errno));
+ numa_set_localalloc();
+ }
+ }
+ numa_free_cpumask(oldmask);
+#endif
return i;
}

@@ -562,6 +655,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
if (hugepg_tbl[i].orig_va == va) {
hugepg_tbl[i].socket_id = socket_id;
hp_count++;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ RTE_LOG(DEBUG, EAL,
+ "Hugepage %s is on socket %d\n",
+ hugepg_tbl[i].filepath, socket_id);
+#endif
}
}
}
@@ -1000,6 +1098,11 @@ rte_eal_hugepage_init(void)

huge_register_sigbus();

+ /* make a copy of socket_mem, needed for balanced allocation. */
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ memory[i] = internal_config.socket_mem[i];
+
+
/* map all hugepages and sort them */
for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
unsigned pages_old, pages_new;
@@ -1017,7 +1120,8 @@ rte_eal_hugepage_init(void)

/* map all hugepages available */
pages_old = hpi->num_pages[0];
- pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+ pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+ memory, 1);
if (pages_new < pages_old) {
RTE_LOG(DEBUG, EAL,
"%d not %d hugepages of size %u MB allocated\n",
@@ -1060,7 +1164,7 @@ rte_eal_hugepage_init(void)
sizeof(struct hugepage_file), cmp_physaddr);

/* remap all hugepages */
- if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+ if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
hpi->num_pages[0]) {
RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
(unsigned)(hpi->hugepage_sz / 0x100000));
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b3..4fe22d1 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -186,6 +186,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
# The static libraries do not know their dependencies.
# So linking with static library requires explicit dependencies.
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrt
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lnuma
+endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lm
--
2.7.4
Sergio Gonzalez Monroy
2017-06-28 10:30:31 UTC
Permalink
Post by Ilya Maximets
Currently EAL allocates hugepages one by one not paying attention
from which NUMA node allocation was done.
Such behaviour leads to allocation failure if number of available
hugepages for application limited by cgroups or hugetlbfs and
memory requested not only from the first socket.
# 90 x 1GB hugepages availavle in a system
cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
Cannot init memory
This happens beacause all allocated pages are
on socket 0.
Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
1.1) Allocate as many hugepages from numa N to
only fit requested memory for this numa.
1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
fashion.
3) Sort pages and choose the most suitable.
In this case all essential memory will be allocated and all remaining
pages will be fairly distributed between all requested nodes.
New config option RTE_EAL_NUMA_AWARE_HUGEPAGES introduced and
enabled by default for linuxapp except armv7 and dpaa2.
Enabling of this option adds libnuma as a dependency for EAL.
Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
---
config/common_base | 1 +
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 3 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 3 +
lib/librte_eal/linuxapp/eal/Makefile | 3 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 120 ++++++++++++++++++++++++++++--
mk/rte.app.mk | 3 +
7 files changed, 126 insertions(+), 8 deletions(-)
Good stuff Ilya!

Hemant, Jerin, could you also ack the patch if you are happy with it?
Thanks.

Acked-by: Sergio Gonzalez Monroy <***@intel.com>
Hemant Agrawal
2017-06-29 05:32:22 UTC
Permalink
Post by Ilya Maximets
Currently EAL allocates hugepages one by one not paying attention
from which NUMA node allocation was done.
Such behaviour leads to allocation failure if number of available
hugepages for application limited by cgroups or hugetlbfs and
memory requested not only from the first socket.
# 90 x 1GB hugepages availavle in a system
cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
Cannot init memory
This happens beacause all allocated pages are
on socket 0.
Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
1.1) Allocate as many hugepages from numa N to
only fit requested memory for this numa.
1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
fashion.
3) Sort pages and choose the most suitable.
In this case all essential memory will be allocated and all remaining
pages will be fairly distributed between all requested nodes.
New config option RTE_EAL_NUMA_AWARE_HUGEPAGES introduced and
enabled by default for linuxapp except armv7 and dpaa2.
Enabling of this option adds libnuma as a dependency for EAL.
Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
---
config/common_base | 1 +
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 3 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 3 +
lib/librte_eal/linuxapp/eal/Makefile | 3 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 120 ++++++++++++++++++++++++++++--
mk/rte.app.mk | 3 +
7 files changed, 126 insertions(+), 8 deletions(-)
diff --git a/config/common_base b/config/common_base
index f6aafd1..660588a 100644
--- a/config/common_base
+++ b/config/common_base
@@ -103,6 +103,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=n
CONFIG_RTE_MALLOC_DEBUG=n
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
#
# Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
diff --git a/config/common_linuxapp b/config/common_linuxapp
index b3cf41b..64bef87 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -35,6 +35,7 @@
CONFIG_RTE_EXEC_ENV="linuxapp"
CONFIG_RTE_EXEC_ENV_LINUXAPP=y
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
CONFIG_RTE_EAL_IGB_UIO=y
CONFIG_RTE_EAL_VFIO=y
CONFIG_RTE_KNI_KMOD=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index 19607eb..e06b1d4 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -47,6 +47,9 @@ CONFIG_RTE_ARCH_STRICT_ALIGN=y
CONFIG_RTE_TOOLCHAIN="gcc"
CONFIG_RTE_TOOLCHAIN_GCC=y
+# NUMA is not supported on ARM
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
# ARM doesn't have support for vmware TSC map
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index 2304ab6..f78449d 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -45,6 +45,9 @@ CONFIG_RTE_CACHE_LINE_SIZE=64
CONFIG_RTE_PKTMBUF_HEADROOM=256
+# Doesn't support NUMA
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+
DPAA2 does not support NUMA so,
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
Post by Ilya Maximets
#
# Compile Support Libraries for DPAA2
#
diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index 640afd0..8651e27 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -50,6 +50,9 @@ LDLIBS += -ldl
LDLIBS += -lpthread
LDLIBS += -lgcc_s
LDLIBS += -lrt
+ifeq ($(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+LDLIBS += -lnuma
+endif
# specific to linuxapp exec-env
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index e17c9cb..647d89c 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -54,6 +54,10 @@
#include <sys/time.h>
#include <signal.h>
#include <setjmp.h>
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numa.h>
+#include <numaif.h>
+#endif
#include <rte_log.h>
#include <rte_memory.h>
@@ -348,6 +352,14 @@ static int huge_wrap_sigsetjmp(void)
return sigsetjmp(huge_jmpenv, 1);
}
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+/* Callback for numa library. */
+void numa_error(char *where)
+{
+ RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
+}
+#endif
+
/*
* Mmap all hugepages of hugepage table: it first open a file in
* hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -356,18 +368,78 @@ static int huge_wrap_sigsetjmp(void)
* map continguous physical blocks in contiguous virtual blocks.
*/
static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
- struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+ uint64_t *essential_memory __rte_unused, int orig)
{
int fd;
unsigned i;
void *virtaddr;
void *vma_addr = NULL;
size_t vma_len = 0;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ int node_id = -1;
+ int essential_prev = 0;
+ int oldpolicy;
+ struct bitmask *oldmask = numa_allocate_nodemask();
+ bool have_numa = true;
+ unsigned long maxnode = 0;
+
+ /* Check if kernel supports NUMA. */
+ if (numa_available() != 0) {
+ RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+ have_numa = false;
+ }
+
+ if (orig && have_numa) {
+ RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
+ if (get_mempolicy(&oldpolicy, oldmask->maskp,
+ oldmask->size + 1, 0, 0) < 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to get current mempolicy: %s. "
+ "Assuming MPOL_DEFAULT.\n", strerror(errno));
+ oldpolicy = MPOL_DEFAULT;
+ }
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ if (internal_config.socket_mem[i])
+ maxnode = i + 1;
+ }
+#endif
for (i = 0; i < hpi->num_pages[0]; i++) {
uint64_t hugepage_sz = hpi->hugepage_sz;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ unsigned int j;
+
+ for (j = 0; j < maxnode; j++)
+ if (essential_memory[j])
+ break;
+
+ if (j == maxnode) {
+ node_id = (node_id + 1) % maxnode;
+ while (!internal_config.socket_mem[node_id]) {
+ node_id++;
+ node_id %= maxnode;
+ }
+ essential_prev = 0;
+ } else {
+ node_id = j;
+ essential_prev = essential_memory[j];
+
+ if (essential_memory[j] < hugepage_sz)
+ essential_memory[j] = 0;
+ else
+ essential_memory[j] -= hugepage_sz;
+ }
+
+ RTE_LOG(DEBUG, EAL,
+ "Setting policy MPOL_PREFERRED for socket %d\n",
+ node_id);
+ numa_set_preferred(node_id);
+ }
+#endif
+
if (orig) {
hugepg_tbl[i].file_id = i;
hugepg_tbl[i].size = hugepage_sz;
@@ -422,7 +494,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
if (fd < 0) {
RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
strerror(errno));
- return i;
+ goto out;
}
/* map the segment, and populate page tables,
@@ -433,7 +505,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
strerror(errno));
close(fd);
- return i;
+ goto out;
}
if (orig) {
@@ -458,7 +530,12 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
munmap(virtaddr, hugepage_sz);
close(fd);
unlink(hugepg_tbl[i].filepath);
- return i;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode)
+ essential_memory[node_id] =
+ essential_prev;
+#endif
+ goto out;
}
*(int *)virtaddr = 0;
}
@@ -469,7 +546,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
__func__, strerror(errno));
close(fd);
- return i;
+ goto out;
}
close(fd);
@@ -478,6 +555,22 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
vma_len -= hugepage_sz;
}
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ RTE_LOG(DEBUG, EAL,
+ "Restoring previous memory policy: %d\n", oldpolicy);
+ if (oldpolicy == MPOL_DEFAULT) {
+ numa_set_localalloc();
+ } else if (set_mempolicy(oldpolicy, oldmask->maskp,
+ oldmask->size + 1) < 0) {
+ RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
+ strerror(errno));
+ numa_set_localalloc();
+ }
+ }
+ numa_free_cpumask(oldmask);
+#endif
return i;
}
@@ -562,6 +655,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
if (hugepg_tbl[i].orig_va == va) {
hugepg_tbl[i].socket_id = socket_id;
hp_count++;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ RTE_LOG(DEBUG, EAL,
+ "Hugepage %s is on socket %d\n",
+ hugepg_tbl[i].filepath, socket_id);
+#endif
}
}
}
@@ -1000,6 +1098,11 @@ rte_eal_hugepage_init(void)
huge_register_sigbus();
+ /* make a copy of socket_mem, needed for balanced allocation. */
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ memory[i] = internal_config.socket_mem[i];
+
+
/* map all hugepages and sort them */
for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
unsigned pages_old, pages_new;
@@ -1017,7 +1120,8 @@ rte_eal_hugepage_init(void)
/* map all hugepages available */
pages_old = hpi->num_pages[0];
- pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+ pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+ memory, 1);
if (pages_new < pages_old) {
RTE_LOG(DEBUG, EAL,
"%d not %d hugepages of size %u MB allocated\n",
@@ -1060,7 +1164,7 @@ rte_eal_hugepage_init(void)
sizeof(struct hugepage_file), cmp_physaddr);
/* remap all hugepages */
- if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+ if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
hpi->num_pages[0]) {
RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
(unsigned)(hpi->hugepage_sz / 0x100000));
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b3..4fe22d1 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -186,6 +186,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
# The static libraries do not know their dependencies.
# So linking with static library requires explicit dependencies.
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrt
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lnuma
+endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lm
Ilya Maximets
2017-06-29 05:48:28 UTC
Permalink
Post by Hemant Agrawal
Post by Ilya Maximets
Currently EAL allocates hugepages one by one not paying attention
from which NUMA node allocation was done.
Such behaviour leads to allocation failure if number of available
hugepages for application limited by cgroups or hugetlbfs and
memory requested not only from the first socket.
# 90 x 1GB hugepages availavle in a system
cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
Cannot init memory
This happens beacause all allocated pages are
on socket 0.
Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
1.1) Allocate as many hugepages from numa N to
only fit requested memory for this numa.
1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
fashion.
3) Sort pages and choose the most suitable.
In this case all essential memory will be allocated and all remaining
pages will be fairly distributed between all requested nodes.
New config option RTE_EAL_NUMA_AWARE_HUGEPAGES introduced and
enabled by default for linuxapp except armv7 and dpaa2.
Enabling of this option adds libnuma as a dependency for EAL.
Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
---
config/common_base | 1 +
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 3 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 3 +
lib/librte_eal/linuxapp/eal/Makefile | 3 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 120 ++++++++++++++++++++++++++++--
mk/rte.app.mk | 3 +
7 files changed, 126 insertions(+), 8 deletions(-)
diff --git a/config/common_base b/config/common_base
index f6aafd1..660588a 100644
--- a/config/common_base
+++ b/config/common_base
@@ -103,6 +103,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=n
CONFIG_RTE_MALLOC_DEBUG=n
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
#
# Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
diff --git a/config/common_linuxapp b/config/common_linuxapp
index b3cf41b..64bef87 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -35,6 +35,7 @@
CONFIG_RTE_EXEC_ENV="linuxapp"
CONFIG_RTE_EXEC_ENV_LINUXAPP=y
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
CONFIG_RTE_EAL_IGB_UIO=y
CONFIG_RTE_EAL_VFIO=y
CONFIG_RTE_KNI_KMOD=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index 19607eb..e06b1d4 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -47,6 +47,9 @@ CONFIG_RTE_ARCH_STRICT_ALIGN=y
CONFIG_RTE_TOOLCHAIN="gcc"
CONFIG_RTE_TOOLCHAIN_GCC=y
+# NUMA is not supported on ARM
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
# ARM doesn't have support for vmware TSC map
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index 2304ab6..f78449d 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -45,6 +45,9 @@ CONFIG_RTE_CACHE_LINE_SIZE=64
CONFIG_RTE_PKTMBUF_HEADROOM=256
+# Doesn't support NUMA
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+
DPAA2 does not support NUMA so,
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
Oh, sorry. Just typo.
Thanks for catching this.


Sergio, I'll send v10 with only this change and will keep your
acked-by because the change is trivial.
Post by Hemant Agrawal
Post by Ilya Maximets
#
# Compile Support Libraries for DPAA2
#
diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index 640afd0..8651e27 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -50,6 +50,9 @@ LDLIBS += -ldl
LDLIBS += -lpthread
LDLIBS += -lgcc_s
LDLIBS += -lrt
+ifeq ($(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+LDLIBS += -lnuma
+endif
# specific to linuxapp exec-env
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index e17c9cb..647d89c 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -54,6 +54,10 @@
#include <sys/time.h>
#include <signal.h>
#include <setjmp.h>
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numa.h>
+#include <numaif.h>
+#endif
#include <rte_log.h>
#include <rte_memory.h>
@@ -348,6 +352,14 @@ static int huge_wrap_sigsetjmp(void)
return sigsetjmp(huge_jmpenv, 1);
}
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+/* Callback for numa library. */
+void numa_error(char *where)
+{
+ RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
+}
+#endif
+
/*
* Mmap all hugepages of hugepage table: it first open a file in
* hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -356,18 +368,78 @@ static int huge_wrap_sigsetjmp(void)
* map continguous physical blocks in contiguous virtual blocks.
*/
static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
- struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+ uint64_t *essential_memory __rte_unused, int orig)
{
int fd;
unsigned i;
void *virtaddr;
void *vma_addr = NULL;
size_t vma_len = 0;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ int node_id = -1;
+ int essential_prev = 0;
+ int oldpolicy;
+ struct bitmask *oldmask = numa_allocate_nodemask();
+ bool have_numa = true;
+ unsigned long maxnode = 0;
+
+ /* Check if kernel supports NUMA. */
+ if (numa_available() != 0) {
+ RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+ have_numa = false;
+ }
+
+ if (orig && have_numa) {
+ RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
+ if (get_mempolicy(&oldpolicy, oldmask->maskp,
+ oldmask->size + 1, 0, 0) < 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to get current mempolicy: %s. "
+ "Assuming MPOL_DEFAULT.\n", strerror(errno));
+ oldpolicy = MPOL_DEFAULT;
+ }
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ if (internal_config.socket_mem[i])
+ maxnode = i + 1;
+ }
+#endif
for (i = 0; i < hpi->num_pages[0]; i++) {
uint64_t hugepage_sz = hpi->hugepage_sz;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ unsigned int j;
+
+ for (j = 0; j < maxnode; j++)
+ if (essential_memory[j])
+ break;
+
+ if (j == maxnode) {
+ node_id = (node_id + 1) % maxnode;
+ while (!internal_config.socket_mem[node_id]) {
+ node_id++;
+ node_id %= maxnode;
+ }
+ essential_prev = 0;
+ } else {
+ node_id = j;
+ essential_prev = essential_memory[j];
+
+ if (essential_memory[j] < hugepage_sz)
+ essential_memory[j] = 0;
+ else
+ essential_memory[j] -= hugepage_sz;
+ }
+
+ RTE_LOG(DEBUG, EAL,
+ "Setting policy MPOL_PREFERRED for socket %d\n",
+ node_id);
+ numa_set_preferred(node_id);
+ }
+#endif
+
if (orig) {
hugepg_tbl[i].file_id = i;
hugepg_tbl[i].size = hugepage_sz;
@@ -422,7 +494,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
if (fd < 0) {
RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
strerror(errno));
- return i;
+ goto out;
}
/* map the segment, and populate page tables,
@@ -433,7 +505,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
strerror(errno));
close(fd);
- return i;
+ goto out;
}
if (orig) {
@@ -458,7 +530,12 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
munmap(virtaddr, hugepage_sz);
close(fd);
unlink(hugepg_tbl[i].filepath);
- return i;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode)
+ essential_memory[node_id] =
+ essential_prev;
+#endif
+ goto out;
}
*(int *)virtaddr = 0;
}
@@ -469,7 +546,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
__func__, strerror(errno));
close(fd);
- return i;
+ goto out;
}
close(fd);
@@ -478,6 +555,22 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
vma_len -= hugepage_sz;
}
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ RTE_LOG(DEBUG, EAL,
+ "Restoring previous memory policy: %d\n", oldpolicy);
+ if (oldpolicy == MPOL_DEFAULT) {
+ numa_set_localalloc();
+ } else if (set_mempolicy(oldpolicy, oldmask->maskp,
+ oldmask->size + 1) < 0) {
+ RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
+ strerror(errno));
+ numa_set_localalloc();
+ }
+ }
+ numa_free_cpumask(oldmask);
+#endif
return i;
}
@@ -562,6 +655,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
if (hugepg_tbl[i].orig_va == va) {
hugepg_tbl[i].socket_id = socket_id;
hp_count++;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ RTE_LOG(DEBUG, EAL,
+ "Hugepage %s is on socket %d\n",
+ hugepg_tbl[i].filepath, socket_id);
+#endif
}
}
}
@@ -1000,6 +1098,11 @@ rte_eal_hugepage_init(void)
huge_register_sigbus();
+ /* make a copy of socket_mem, needed for balanced allocation. */
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ memory[i] = internal_config.socket_mem[i];
+
+
/* map all hugepages and sort them */
for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
unsigned pages_old, pages_new;
@@ -1017,7 +1120,8 @@ rte_eal_hugepage_init(void)
/* map all hugepages available */
pages_old = hpi->num_pages[0];
- pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+ pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+ memory, 1);
if (pages_new < pages_old) {
RTE_LOG(DEBUG, EAL,
"%d not %d hugepages of size %u MB allocated\n",
@@ -1060,7 +1164,7 @@ rte_eal_hugepage_init(void)
sizeof(struct hugepage_file), cmp_physaddr);
/* remap all hugepages */
- if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+ if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
hpi->num_pages[0]) {
RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
(unsigned)(hpi->hugepage_sz / 0x100000));
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b3..4fe22d1 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -186,6 +186,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
# The static libraries do not know their dependencies.
# So linking with static library requires explicit dependencies.
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrt
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lnuma
+endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lm
Ilya Maximets
2017-06-29 06:08:35 UTC
Permalink
Post by Ilya Maximets
Post by Hemant Agrawal
Post by Ilya Maximets
Currently EAL allocates hugepages one by one not paying attention
from which NUMA node allocation was done.
Such behaviour leads to allocation failure if number of available
hugepages for application limited by cgroups or hugetlbfs and
memory requested not only from the first socket.
# 90 x 1GB hugepages availavle in a system
cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
Cannot init memory
This happens beacause all allocated pages are
on socket 0.
Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
1.1) Allocate as many hugepages from numa N to
only fit requested memory for this numa.
1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
fashion.
3) Sort pages and choose the most suitable.
In this case all essential memory will be allocated and all remaining
pages will be fairly distributed between all requested nodes.
New config option RTE_EAL_NUMA_AWARE_HUGEPAGES introduced and
enabled by default for linuxapp except armv7 and dpaa2.
Enabling of this option adds libnuma as a dependency for EAL.
Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
---
config/common_base | 1 +
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 3 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 3 +
lib/librte_eal/linuxapp/eal/Makefile | 3 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 120 ++++++++++++++++++++++++++++--
mk/rte.app.mk | 3 +
7 files changed, 126 insertions(+), 8 deletions(-)
diff --git a/config/common_base b/config/common_base
index f6aafd1..660588a 100644
--- a/config/common_base
+++ b/config/common_base
@@ -103,6 +103,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=n
CONFIG_RTE_MALLOC_DEBUG=n
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
#
# Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
diff --git a/config/common_linuxapp b/config/common_linuxapp
index b3cf41b..64bef87 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -35,6 +35,7 @@
CONFIG_RTE_EXEC_ENV="linuxapp"
CONFIG_RTE_EXEC_ENV_LINUXAPP=y
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
CONFIG_RTE_EAL_IGB_UIO=y
CONFIG_RTE_EAL_VFIO=y
CONFIG_RTE_KNI_KMOD=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index 19607eb..e06b1d4 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -47,6 +47,9 @@ CONFIG_RTE_ARCH_STRICT_ALIGN=y
CONFIG_RTE_TOOLCHAIN="gcc"
CONFIG_RTE_TOOLCHAIN_GCC=y
+# NUMA is not supported on ARM
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
# ARM doesn't have support for vmware TSC map
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index 2304ab6..f78449d 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -45,6 +45,9 @@ CONFIG_RTE_CACHE_LINE_SIZE=64
CONFIG_RTE_PKTMBUF_HEADROOM=256
+# Doesn't support NUMA
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+
DPAA2 does not support NUMA so,
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
Oh, sorry. Just typo.
Thanks for catching this.
Fixed. Hemant, please, check the new version (v10).
Post by Ilya Maximets
Sergio, I'll send v10 with only this change and will keep your
acked-by because the change is trivial.
Post by Hemant Agrawal
Post by Ilya Maximets
#
# Compile Support Libraries for DPAA2
#
diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index 640afd0..8651e27 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -50,6 +50,9 @@ LDLIBS += -ldl
LDLIBS += -lpthread
LDLIBS += -lgcc_s
LDLIBS += -lrt
+ifeq ($(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+LDLIBS += -lnuma
+endif
# specific to linuxapp exec-env
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index e17c9cb..647d89c 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -54,6 +54,10 @@
#include <sys/time.h>
#include <signal.h>
#include <setjmp.h>
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numa.h>
+#include <numaif.h>
+#endif
#include <rte_log.h>
#include <rte_memory.h>
@@ -348,6 +352,14 @@ static int huge_wrap_sigsetjmp(void)
return sigsetjmp(huge_jmpenv, 1);
}
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+/* Callback for numa library. */
+void numa_error(char *where)
+{
+ RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
+}
+#endif
+
/*
* Mmap all hugepages of hugepage table: it first open a file in
* hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -356,18 +368,78 @@ static int huge_wrap_sigsetjmp(void)
* map continguous physical blocks in contiguous virtual blocks.
*/
static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
- struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+ uint64_t *essential_memory __rte_unused, int orig)
{
int fd;
unsigned i;
void *virtaddr;
void *vma_addr = NULL;
size_t vma_len = 0;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ int node_id = -1;
+ int essential_prev = 0;
+ int oldpolicy;
+ struct bitmask *oldmask = numa_allocate_nodemask();
+ bool have_numa = true;
+ unsigned long maxnode = 0;
+
+ /* Check if kernel supports NUMA. */
+ if (numa_available() != 0) {
+ RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+ have_numa = false;
+ }
+
+ if (orig && have_numa) {
+ RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
+ if (get_mempolicy(&oldpolicy, oldmask->maskp,
+ oldmask->size + 1, 0, 0) < 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to get current mempolicy: %s. "
+ "Assuming MPOL_DEFAULT.\n", strerror(errno));
+ oldpolicy = MPOL_DEFAULT;
+ }
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ if (internal_config.socket_mem[i])
+ maxnode = i + 1;
+ }
+#endif
for (i = 0; i < hpi->num_pages[0]; i++) {
uint64_t hugepage_sz = hpi->hugepage_sz;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ unsigned int j;
+
+ for (j = 0; j < maxnode; j++)
+ if (essential_memory[j])
+ break;
+
+ if (j == maxnode) {
+ node_id = (node_id + 1) % maxnode;
+ while (!internal_config.socket_mem[node_id]) {
+ node_id++;
+ node_id %= maxnode;
+ }
+ essential_prev = 0;
+ } else {
+ node_id = j;
+ essential_prev = essential_memory[j];
+
+ if (essential_memory[j] < hugepage_sz)
+ essential_memory[j] = 0;
+ else
+ essential_memory[j] -= hugepage_sz;
+ }
+
+ RTE_LOG(DEBUG, EAL,
+ "Setting policy MPOL_PREFERRED for socket %d\n",
+ node_id);
+ numa_set_preferred(node_id);
+ }
+#endif
+
if (orig) {
hugepg_tbl[i].file_id = i;
hugepg_tbl[i].size = hugepage_sz;
@@ -422,7 +494,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
if (fd < 0) {
RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
strerror(errno));
- return i;
+ goto out;
}
/* map the segment, and populate page tables,
@@ -433,7 +505,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
strerror(errno));
close(fd);
- return i;
+ goto out;
}
if (orig) {
@@ -458,7 +530,12 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
munmap(virtaddr, hugepage_sz);
close(fd);
unlink(hugepg_tbl[i].filepath);
- return i;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode)
+ essential_memory[node_id] =
+ essential_prev;
+#endif
+ goto out;
}
*(int *)virtaddr = 0;
}
@@ -469,7 +546,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
__func__, strerror(errno));
close(fd);
- return i;
+ goto out;
}
close(fd);
@@ -478,6 +555,22 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
vma_len -= hugepage_sz;
}
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ RTE_LOG(DEBUG, EAL,
+ "Restoring previous memory policy: %d\n", oldpolicy);
+ if (oldpolicy == MPOL_DEFAULT) {
+ numa_set_localalloc();
+ } else if (set_mempolicy(oldpolicy, oldmask->maskp,
+ oldmask->size + 1) < 0) {
+ RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
+ strerror(errno));
+ numa_set_localalloc();
+ }
+ }
+ numa_free_cpumask(oldmask);
+#endif
return i;
}
@@ -562,6 +655,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
if (hugepg_tbl[i].orig_va == va) {
hugepg_tbl[i].socket_id = socket_id;
hp_count++;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ RTE_LOG(DEBUG, EAL,
+ "Hugepage %s is on socket %d\n",
+ hugepg_tbl[i].filepath, socket_id);
+#endif
}
}
}
@@ -1000,6 +1098,11 @@ rte_eal_hugepage_init(void)
huge_register_sigbus();
+ /* make a copy of socket_mem, needed for balanced allocation. */
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ memory[i] = internal_config.socket_mem[i];
+
+
/* map all hugepages and sort them */
for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
unsigned pages_old, pages_new;
@@ -1017,7 +1120,8 @@ rte_eal_hugepage_init(void)
/* map all hugepages available */
pages_old = hpi->num_pages[0];
- pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+ pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+ memory, 1);
if (pages_new < pages_old) {
RTE_LOG(DEBUG, EAL,
"%d not %d hugepages of size %u MB allocated\n",
@@ -1060,7 +1164,7 @@ rte_eal_hugepage_init(void)
sizeof(struct hugepage_file), cmp_physaddr);
/* remap all hugepages */
- if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+ if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
hpi->num_pages[0]) {
RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
(unsigned)(hpi->hugepage_sz / 0x100000));
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b3..4fe22d1 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -186,6 +186,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
# The static libraries do not know their dependencies.
# So linking with static library requires explicit dependencies.
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrt
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lnuma
+endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lm
Ilya Maximets
2017-06-27 10:24:40 UTC
Permalink
It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.

DPDK_DEP_NUMA not needed anymore.

Signed-off-by: Ilya Maximets <***@samsung.com>
---
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 1 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
devtools/test-build.sh | 4 ----
4 files changed, 3 insertions(+), 4 deletions(-)

diff --git a/config/common_linuxapp b/config/common_linuxapp
index 64bef87..74c7d64 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -42,6 +42,7 @@ CONFIG_RTE_KNI_KMOD=y
CONFIG_RTE_LIBRTE_KNI=y
CONFIG_RTE_LIBRTE_PMD_KNI=y
CONFIG_RTE_LIBRTE_VHOST=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y
CONFIG_RTE_LIBRTE_PMD_VHOST=y
CONFIG_RTE_LIBRTE_PMD_AF_PACKET=y
CONFIG_RTE_LIBRTE_PMD_TAP=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index e06b1d4..00bc2ab 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -49,6 +49,7 @@ CONFIG_RTE_TOOLCHAIN_GCC=y

# NUMA is not supported on ARM
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n

# ARM doesn't have support for vmware TSC map
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index f78449d..b061fb0 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256

# Doesn't support NUMA
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n

#
# Compile Support Libraries for DPAA2
diff --git a/devtools/test-build.sh b/devtools/test-build.sh
index 61bdce7..0dbc04a 100755
--- a/devtools/test-build.sh
+++ b/devtools/test-build.sh
@@ -41,7 +41,6 @@ default_path=$PATH
# - DPDK_DEP_ISAL_CRYPTO (y/[n])
# - DPDK_DEP_LDFLAGS
# - DPDK_DEP_MOFED (y/[n])
-# - DPDK_DEP_NUMA (y/[n])
# - DPDK_DEP_PCAP (y/[n])
# - DPDK_DEP_SSL (y/[n])
# - DPDK_DEP_SZE (y/[n])
@@ -124,7 +123,6 @@ reset_env ()
unset DPDK_DEP_ISAL_CRYPTO
unset DPDK_DEP_LDFLAGS
unset DPDK_DEP_MOFED
- unset DPDK_DEP_NUMA
unset DPDK_DEP_PCAP
unset DPDK_DEP_SSL
unset DPDK_DEP_SZE
@@ -163,8 +161,6 @@ config () # <directory> <target> <options>
sed -ri 's,(TEST_PMD_RECORD_.*=)n,\1y,' $1/.config )

# Automatic configuration
- test "$DPDK_DEP_NUMA" != y || \
- sed -ri 's,(NUMA=)n,\1y,' $1/.config
sed -ri 's,(LIBRTE_IEEE1588=)n,\1y,' $1/.config
sed -ri 's,(BYPASS=)n,\1y,' $1/.config
test "$DPDK_DEP_ARCHIVE" != y || \
--
2.7.4
Hemant Agrawal
2017-06-29 05:31:18 UTC
Permalink
Post by Ilya Maximets
It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.
DPDK_DEP_NUMA not needed anymore.
---
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 1 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
devtools/test-build.sh | 4 ----
4 files changed, 3 insertions(+), 4 deletions(-)
diff --git a/config/common_linuxapp b/config/common_linuxapp
index 64bef87..74c7d64 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -42,6 +42,7 @@ CONFIG_RTE_KNI_KMOD=y
CONFIG_RTE_LIBRTE_KNI=y
CONFIG_RTE_LIBRTE_PMD_KNI=y
CONFIG_RTE_LIBRTE_VHOST=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y
CONFIG_RTE_LIBRTE_PMD_VHOST=y
CONFIG_RTE_LIBRTE_PMD_AF_PACKET=y
CONFIG_RTE_LIBRTE_PMD_TAP=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index e06b1d4..00bc2ab 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -49,6 +49,7 @@ CONFIG_RTE_TOOLCHAIN_GCC=y
# NUMA is not supported on ARM
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
# ARM doesn't have support for vmware TSC map
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index f78449d..b061fb0 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256
# Doesn't support NUMA
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
#
# Compile Support Libraries for DPAA2
diff --git a/devtools/test-build.sh b/devtools/test-build.sh
index 61bdce7..0dbc04a 100755
--- a/devtools/test-build.sh
+++ b/devtools/test-build.sh
@@ -41,7 +41,6 @@ default_path=$PATH
# - DPDK_DEP_ISAL_CRYPTO (y/[n])
# - DPDK_DEP_LDFLAGS
# - DPDK_DEP_MOFED (y/[n])
-# - DPDK_DEP_NUMA (y/[n])
# - DPDK_DEP_PCAP (y/[n])
# - DPDK_DEP_SSL (y/[n])
# - DPDK_DEP_SZE (y/[n])
@@ -124,7 +123,6 @@ reset_env ()
unset DPDK_DEP_ISAL_CRYPTO
unset DPDK_DEP_LDFLAGS
unset DPDK_DEP_MOFED
- unset DPDK_DEP_NUMA
unset DPDK_DEP_PCAP
unset DPDK_DEP_SSL
unset DPDK_DEP_SZE
@@ -163,8 +161,6 @@ config () # <directory> <target> <options>
sed -ri 's,(TEST_PMD_RECORD_.*=)n,\1y,' $1/.config )
# Automatic configuration
- test "$DPDK_DEP_NUMA" != y || \
- sed -ri 's,(NUMA=)n,\1y,' $1/.config
sed -ri 's,(LIBRTE_IEEE1588=)n,\1y,' $1/.config
sed -ri 's,(BYPASS=)n,\1y,' $1/.config
test "$DPDK_DEP_ARCHIVE" != y || \
Acked-by: Hemant Agrawal <***@nxp.com>
Ilya Maximets
2017-06-29 05:59:18 UTC
Permalink
Version 10:
* Fixed typo in DPAA2 config.

Version 9:
* Removed DPDK_DEP_NUMA from test-build.sh . Not needed
anymore.
* Fixed out of bound write to essential_memory in case
where socket-mem not specified and SIGBUS occured.

Version 8:
* helper functions from libnuma used to set mempolicy and
work with cpu mask.
* Function now restores previous mempolicy instead of MPOL_DEFAULT.
* Fixed essential_memory on SIGBUS.
* Fixed restoring of mempolicy in case of errors (goto out).
* Enabled by default for all linuxapp except armv7 and dpaa2.

Version 7:
* RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES --> RTE_EAL_NUMA_AWARE_HUGEPAGES

Version 6:
* Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
returned. Enabled by default for x86, ppc and thunderx.

Version 5:
* Fixed shared build. (Automated build test will fail
anyway because libnuma-devel not installed on build servers)

Version 4:
* Fixed work on systems without NUMA by adding check for NUMA
support in kernel.

Version 3:
* Implemented hybrid schema for allocation.
* Fixed not needed mempolicy change while remapping. (orig = 0)
* Added patch to enable VHOST_NUMA by default.

Version 2:
* rebased (fuzz in Makefile)

Ilya Maximets (2):
mem: balanced allocation of hugepages
config: enable vhost numa awareness by default

config/common_base | 1 +
config/common_linuxapp | 2 +
config/defconfig_arm-armv7a-linuxapp-gcc | 4 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 4 +
devtools/test-build.sh | 4 -
lib/librte_eal/linuxapp/eal/Makefile | 3 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 120 ++++++++++++++++++++++++++++--
mk/rte.app.mk | 3 +
8 files changed, 129 insertions(+), 12 deletions(-)
--
2.7.4
Ilya Maximets
2017-06-29 05:59:19 UTC
Permalink
Currently EAL allocates hugepages one by one not paying attention
from which NUMA node allocation was done.

Such behaviour leads to allocation failure if number of available
hugepages for application limited by cgroups or hugetlbfs and
memory requested not only from the first socket.

Example:
# 90 x 1GB hugepages availavle in a system

cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...

EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
PANIC in rte_eal_init():
Cannot init memory

This happens beacause all allocated pages are
on socket 0.

Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
to one of requested nodes using following schema:

1) Allocate essential hugepages:
1.1) Allocate as many hugepages from numa N to
only fit requested memory for this numa.
1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
fashion.
3) Sort pages and choose the most suitable.

In this case all essential memory will be allocated and all remaining
pages will be fairly distributed between all requested nodes.

New config option RTE_EAL_NUMA_AWARE_HUGEPAGES introduced and
enabled by default for linuxapp except armv7 and dpaa2.
Enabling of this option adds libnuma as a dependency for EAL.

Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")

Signed-off-by: Ilya Maximets <***@samsung.com>
Acked-by: Sergio Gonzalez Monroy <***@intel.com>
---
config/common_base | 1 +
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 3 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 3 +
lib/librte_eal/linuxapp/eal/Makefile | 3 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 120 ++++++++++++++++++++++++++++--
mk/rte.app.mk | 3 +
7 files changed, 126 insertions(+), 8 deletions(-)

diff --git a/config/common_base b/config/common_base
index f6aafd1..660588a 100644
--- a/config/common_base
+++ b/config/common_base
@@ -103,6 +103,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=n
CONFIG_RTE_MALLOC_DEBUG=n
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n

#
# Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
diff --git a/config/common_linuxapp b/config/common_linuxapp
index b3cf41b..64bef87 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -35,6 +35,7 @@
CONFIG_RTE_EXEC_ENV="linuxapp"
CONFIG_RTE_EXEC_ENV_LINUXAPP=y

+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
CONFIG_RTE_EAL_IGB_UIO=y
CONFIG_RTE_EAL_VFIO=y
CONFIG_RTE_KNI_KMOD=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index 19607eb..e06b1d4 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -47,6 +47,9 @@ CONFIG_RTE_ARCH_STRICT_ALIGN=y
CONFIG_RTE_TOOLCHAIN="gcc"
CONFIG_RTE_TOOLCHAIN_GCC=y

+# NUMA is not supported on ARM
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
# ARM doesn't have support for vmware TSC map
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n

diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index 2304ab6..f78449d 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -45,6 +45,9 @@ CONFIG_RTE_CACHE_LINE_SIZE=64

CONFIG_RTE_PKTMBUF_HEADROOM=256

+# Doesn't support NUMA
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
#
# Compile Support Libraries for DPAA2
#
diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index 640afd0..8651e27 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -50,6 +50,9 @@ LDLIBS += -ldl
LDLIBS += -lpthread
LDLIBS += -lgcc_s
LDLIBS += -lrt
+ifeq ($(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+LDLIBS += -lnuma
+endif

# specific to linuxapp exec-env
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index e17c9cb..647d89c 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -54,6 +54,10 @@
#include <sys/time.h>
#include <signal.h>
#include <setjmp.h>
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numa.h>
+#include <numaif.h>
+#endif

#include <rte_log.h>
#include <rte_memory.h>
@@ -348,6 +352,14 @@ static int huge_wrap_sigsetjmp(void)
return sigsetjmp(huge_jmpenv, 1);
}

+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+/* Callback for numa library. */
+void numa_error(char *where)
+{
+ RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
+}
+#endif
+
/*
* Mmap all hugepages of hugepage table: it first open a file in
* hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -356,18 +368,78 @@ static int huge_wrap_sigsetjmp(void)
* map continguous physical blocks in contiguous virtual blocks.
*/
static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
- struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+ uint64_t *essential_memory __rte_unused, int orig)
{
int fd;
unsigned i;
void *virtaddr;
void *vma_addr = NULL;
size_t vma_len = 0;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ int node_id = -1;
+ int essential_prev = 0;
+ int oldpolicy;
+ struct bitmask *oldmask = numa_allocate_nodemask();
+ bool have_numa = true;
+ unsigned long maxnode = 0;
+
+ /* Check if kernel supports NUMA. */
+ if (numa_available() != 0) {
+ RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+ have_numa = false;
+ }
+
+ if (orig && have_numa) {
+ RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
+ if (get_mempolicy(&oldpolicy, oldmask->maskp,
+ oldmask->size + 1, 0, 0) < 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to get current mempolicy: %s. "
+ "Assuming MPOL_DEFAULT.\n", strerror(errno));
+ oldpolicy = MPOL_DEFAULT;
+ }
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ if (internal_config.socket_mem[i])
+ maxnode = i + 1;
+ }
+#endif

for (i = 0; i < hpi->num_pages[0]; i++) {
uint64_t hugepage_sz = hpi->hugepage_sz;

+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ unsigned int j;
+
+ for (j = 0; j < maxnode; j++)
+ if (essential_memory[j])
+ break;
+
+ if (j == maxnode) {
+ node_id = (node_id + 1) % maxnode;
+ while (!internal_config.socket_mem[node_id]) {
+ node_id++;
+ node_id %= maxnode;
+ }
+ essential_prev = 0;
+ } else {
+ node_id = j;
+ essential_prev = essential_memory[j];
+
+ if (essential_memory[j] < hugepage_sz)
+ essential_memory[j] = 0;
+ else
+ essential_memory[j] -= hugepage_sz;
+ }
+
+ RTE_LOG(DEBUG, EAL,
+ "Setting policy MPOL_PREFERRED for socket %d\n",
+ node_id);
+ numa_set_preferred(node_id);
+ }
+#endif
+
if (orig) {
hugepg_tbl[i].file_id = i;
hugepg_tbl[i].size = hugepage_sz;
@@ -422,7 +494,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
if (fd < 0) {
RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
strerror(errno));
- return i;
+ goto out;
}

/* map the segment, and populate page tables,
@@ -433,7 +505,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
strerror(errno));
close(fd);
- return i;
+ goto out;
}

if (orig) {
@@ -458,7 +530,12 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
munmap(virtaddr, hugepage_sz);
close(fd);
unlink(hugepg_tbl[i].filepath);
- return i;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode)
+ essential_memory[node_id] =
+ essential_prev;
+#endif
+ goto out;
}
*(int *)virtaddr = 0;
}
@@ -469,7 +546,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
__func__, strerror(errno));
close(fd);
- return i;
+ goto out;
}

close(fd);
@@ -478,6 +555,22 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
vma_len -= hugepage_sz;
}

+out:
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ RTE_LOG(DEBUG, EAL,
+ "Restoring previous memory policy: %d\n", oldpolicy);
+ if (oldpolicy == MPOL_DEFAULT) {
+ numa_set_localalloc();
+ } else if (set_mempolicy(oldpolicy, oldmask->maskp,
+ oldmask->size + 1) < 0) {
+ RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
+ strerror(errno));
+ numa_set_localalloc();
+ }
+ }
+ numa_free_cpumask(oldmask);
+#endif
return i;
}

@@ -562,6 +655,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
if (hugepg_tbl[i].orig_va == va) {
hugepg_tbl[i].socket_id = socket_id;
hp_count++;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ RTE_LOG(DEBUG, EAL,
+ "Hugepage %s is on socket %d\n",
+ hugepg_tbl[i].filepath, socket_id);
+#endif
}
}
}
@@ -1000,6 +1098,11 @@ rte_eal_hugepage_init(void)

huge_register_sigbus();

+ /* make a copy of socket_mem, needed for balanced allocation. */
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ memory[i] = internal_config.socket_mem[i];
+
+
/* map all hugepages and sort them */
for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
unsigned pages_old, pages_new;
@@ -1017,7 +1120,8 @@ rte_eal_hugepage_init(void)

/* map all hugepages available */
pages_old = hpi->num_pages[0];
- pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+ pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+ memory, 1);
if (pages_new < pages_old) {
RTE_LOG(DEBUG, EAL,
"%d not %d hugepages of size %u MB allocated\n",
@@ -1060,7 +1164,7 @@ rte_eal_hugepage_init(void)
sizeof(struct hugepage_file), cmp_physaddr);

/* remap all hugepages */
- if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+ if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
hpi->num_pages[0]) {
RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
(unsigned)(hpi->hugepage_sz / 0x100000));
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b3..4fe22d1 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -186,6 +186,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
# The static libraries do not know their dependencies.
# So linking with static library requires explicit dependencies.
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrt
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lnuma
+endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lm
--
2.7.4
Hemant Agrawal
2017-06-29 07:03:18 UTC
Permalink
Post by Ilya Maximets
Currently EAL allocates hugepages one by one not paying attention
from which NUMA node allocation was done.
Such behaviour leads to allocation failure if number of available
hugepages for application limited by cgroups or hugetlbfs and
memory requested not only from the first socket.
# 90 x 1GB hugepages availavle in a system
cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
Cannot init memory
This happens beacause all allocated pages are
on socket 0.
Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
1.1) Allocate as many hugepages from numa N to
only fit requested memory for this numa.
1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
fashion.
3) Sort pages and choose the most suitable.
In this case all essential memory will be allocated and all remaining
pages will be fairly distributed between all requested nodes.
New config option RTE_EAL_NUMA_AWARE_HUGEPAGES introduced and
enabled by default for linuxapp except armv7 and dpaa2.
Enabling of this option adds libnuma as a dependency for EAL.
Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
---
config/common_base | 1 +
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 3 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 3 +
lib/librte_eal/linuxapp/eal/Makefile | 3 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 120 ++++++++++++++++++++++++++++--
mk/rte.app.mk | 3 +
7 files changed, 126 insertions(+), 8 deletions(-)
diff --git a/config/common_base b/config/common_base
index f6aafd1..660588a 100644
--- a/config/common_base
+++ b/config/common_base
@@ -103,6 +103,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=n
CONFIG_RTE_MALLOC_DEBUG=n
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
#
# Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
diff --git a/config/common_linuxapp b/config/common_linuxapp
index b3cf41b..64bef87 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -35,6 +35,7 @@
CONFIG_RTE_EXEC_ENV="linuxapp"
CONFIG_RTE_EXEC_ENV_LINUXAPP=y
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
CONFIG_RTE_EAL_IGB_UIO=y
CONFIG_RTE_EAL_VFIO=y
CONFIG_RTE_KNI_KMOD=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index 19607eb..e06b1d4 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -47,6 +47,9 @@ CONFIG_RTE_ARCH_STRICT_ALIGN=y
CONFIG_RTE_TOOLCHAIN="gcc"
CONFIG_RTE_TOOLCHAIN_GCC=y
+# NUMA is not supported on ARM
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
# ARM doesn't have support for vmware TSC map
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index 2304ab6..f78449d 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -45,6 +45,9 @@ CONFIG_RTE_CACHE_LINE_SIZE=64
CONFIG_RTE_PKTMBUF_HEADROOM=256
+# Doesn't support NUMA
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
#
# Compile Support Libraries for DPAA2
#
diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index 640afd0..8651e27 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -50,6 +50,9 @@ LDLIBS += -ldl
LDLIBS += -lpthread
LDLIBS += -lgcc_s
LDLIBS += -lrt
+ifeq ($(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+LDLIBS += -lnuma
+endif
# specific to linuxapp exec-env
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index e17c9cb..647d89c 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -54,6 +54,10 @@
#include <sys/time.h>
#include <signal.h>
#include <setjmp.h>
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numa.h>
+#include <numaif.h>
+#endif
#include <rte_log.h>
#include <rte_memory.h>
@@ -348,6 +352,14 @@ static int huge_wrap_sigsetjmp(void)
return sigsetjmp(huge_jmpenv, 1);
}
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+/* Callback for numa library. */
+void numa_error(char *where)
+{
+ RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
+}
+#endif
+
/*
* Mmap all hugepages of hugepage table: it first open a file in
* hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -356,18 +368,78 @@ static int huge_wrap_sigsetjmp(void)
* map continguous physical blocks in contiguous virtual blocks.
*/
static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
- struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+ uint64_t *essential_memory __rte_unused, int orig)
{
int fd;
unsigned i;
void *virtaddr;
void *vma_addr = NULL;
size_t vma_len = 0;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ int node_id = -1;
+ int essential_prev = 0;
+ int oldpolicy;
+ struct bitmask *oldmask = numa_allocate_nodemask();
+ bool have_numa = true;
+ unsigned long maxnode = 0;
+
+ /* Check if kernel supports NUMA. */
+ if (numa_available() != 0) {
+ RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+ have_numa = false;
+ }
+
+ if (orig && have_numa) {
+ RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
+ if (get_mempolicy(&oldpolicy, oldmask->maskp,
+ oldmask->size + 1, 0, 0) < 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to get current mempolicy: %s. "
+ "Assuming MPOL_DEFAULT.\n", strerror(errno));
+ oldpolicy = MPOL_DEFAULT;
+ }
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ if (internal_config.socket_mem[i])
+ maxnode = i + 1;
+ }
+#endif
for (i = 0; i < hpi->num_pages[0]; i++) {
uint64_t hugepage_sz = hpi->hugepage_sz;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ unsigned int j;
+
+ for (j = 0; j < maxnode; j++)
+ if (essential_memory[j])
+ break;
+
+ if (j == maxnode) {
+ node_id = (node_id + 1) % maxnode;
+ while (!internal_config.socket_mem[node_id]) {
+ node_id++;
+ node_id %= maxnode;
+ }
+ essential_prev = 0;
+ } else {
+ node_id = j;
+ essential_prev = essential_memory[j];
+
+ if (essential_memory[j] < hugepage_sz)
+ essential_memory[j] = 0;
+ else
+ essential_memory[j] -= hugepage_sz;
+ }
+
+ RTE_LOG(DEBUG, EAL,
+ "Setting policy MPOL_PREFERRED for socket %d\n",
+ node_id);
+ numa_set_preferred(node_id);
+ }
+#endif
+
if (orig) {
hugepg_tbl[i].file_id = i;
hugepg_tbl[i].size = hugepage_sz;
@@ -422,7 +494,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
if (fd < 0) {
RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
strerror(errno));
- return i;
+ goto out;
}
/* map the segment, and populate page tables,
@@ -433,7 +505,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
strerror(errno));
close(fd);
- return i;
+ goto out;
}
if (orig) {
@@ -458,7 +530,12 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
munmap(virtaddr, hugepage_sz);
close(fd);
unlink(hugepg_tbl[i].filepath);
- return i;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode)
+ essential_memory[node_id] =
+ essential_prev;
+#endif
+ goto out;
}
*(int *)virtaddr = 0;
}
@@ -469,7 +546,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
__func__, strerror(errno));
close(fd);
- return i;
+ goto out;
}
close(fd);
@@ -478,6 +555,22 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
vma_len -= hugepage_sz;
}
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ RTE_LOG(DEBUG, EAL,
+ "Restoring previous memory policy: %d\n", oldpolicy);
+ if (oldpolicy == MPOL_DEFAULT) {
+ numa_set_localalloc();
+ } else if (set_mempolicy(oldpolicy, oldmask->maskp,
+ oldmask->size + 1) < 0) {
+ RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
+ strerror(errno));
+ numa_set_localalloc();
+ }
+ }
+ numa_free_cpumask(oldmask);
+#endif
return i;
}
@@ -562,6 +655,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
if (hugepg_tbl[i].orig_va == va) {
hugepg_tbl[i].socket_id = socket_id;
hp_count++;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ RTE_LOG(DEBUG, EAL,
+ "Hugepage %s is on socket %d\n",
+ hugepg_tbl[i].filepath, socket_id);
+#endif
}
}
}
@@ -1000,6 +1098,11 @@ rte_eal_hugepage_init(void)
huge_register_sigbus();
+ /* make a copy of socket_mem, needed for balanced allocation. */
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ memory[i] = internal_config.socket_mem[i];
+
+
/* map all hugepages and sort them */
for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
unsigned pages_old, pages_new;
@@ -1017,7 +1120,8 @@ rte_eal_hugepage_init(void)
/* map all hugepages available */
pages_old = hpi->num_pages[0];
- pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+ pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+ memory, 1);
if (pages_new < pages_old) {
RTE_LOG(DEBUG, EAL,
"%d not %d hugepages of size %u MB allocated\n",
@@ -1060,7 +1164,7 @@ rte_eal_hugepage_init(void)
sizeof(struct hugepage_file), cmp_physaddr);
/* remap all hugepages */
- if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+ if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
hpi->num_pages[0]) {
RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
(unsigned)(hpi->hugepage_sz / 0x100000));
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b3..4fe22d1 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -186,6 +186,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
# The static libraries do not know their dependencies.
# So linking with static library requires explicit dependencies.
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrt
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lnuma
+endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lm
Acked-by: Hemant Agrawal <***@nxp.com>
Ilya Maximets
2017-06-29 05:59:20 UTC
Permalink
It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.

DPDK_DEP_NUMA not needed anymore.

Signed-off-by: Ilya Maximets <***@samsung.com>
Acked-by: Hemant Agrawal <***@nxp.com>
---
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 1 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
devtools/test-build.sh | 4 ----
4 files changed, 3 insertions(+), 4 deletions(-)

diff --git a/config/common_linuxapp b/config/common_linuxapp
index 64bef87..74c7d64 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -42,6 +42,7 @@ CONFIG_RTE_KNI_KMOD=y
CONFIG_RTE_LIBRTE_KNI=y
CONFIG_RTE_LIBRTE_PMD_KNI=y
CONFIG_RTE_LIBRTE_VHOST=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y
CONFIG_RTE_LIBRTE_PMD_VHOST=y
CONFIG_RTE_LIBRTE_PMD_AF_PACKET=y
CONFIG_RTE_LIBRTE_PMD_TAP=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index e06b1d4..00bc2ab 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -49,6 +49,7 @@ CONFIG_RTE_TOOLCHAIN_GCC=y

# NUMA is not supported on ARM
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n

# ARM doesn't have support for vmware TSC map
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index f78449d..b061fb0 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256

# Doesn't support NUMA
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n

#
# Compile Support Libraries for DPAA2
diff --git a/devtools/test-build.sh b/devtools/test-build.sh
index 61bdce7..0dbc04a 100755
--- a/devtools/test-build.sh
+++ b/devtools/test-build.sh
@@ -41,7 +41,6 @@ default_path=$PATH
# - DPDK_DEP_ISAL_CRYPTO (y/[n])
# - DPDK_DEP_LDFLAGS
# - DPDK_DEP_MOFED (y/[n])
-# - DPDK_DEP_NUMA (y/[n])
# - DPDK_DEP_PCAP (y/[n])
# - DPDK_DEP_SSL (y/[n])
# - DPDK_DEP_SZE (y/[n])
@@ -124,7 +123,6 @@ reset_env ()
unset DPDK_DEP_ISAL_CRYPTO
unset DPDK_DEP_LDFLAGS
unset DPDK_DEP_MOFED
- unset DPDK_DEP_NUMA
unset DPDK_DEP_PCAP
unset DPDK_DEP_SSL
unset DPDK_DEP_SZE
@@ -163,8 +161,6 @@ config () # <directory> <target> <options>
sed -ri 's,(TEST_PMD_RECORD_.*=)n,\1y,' $1/.config )

# Automatic configuration
- test "$DPDK_DEP_NUMA" != y || \
- sed -ri 's,(NUMA=)n,\1y,' $1/.config
sed -ri 's,(LIBRTE_IEEE1588=)n,\1y,' $1/.config
sed -ri 's,(BYPASS=)n,\1y,' $1/.config
test "$DPDK_DEP_ARCHIVE" != y || \
--
2.7.4
Thomas Monjalon
2017-06-30 15:50:58 UTC
Permalink
Post by Ilya Maximets
It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.
DPDK_DEP_NUMA not needed anymore.
---
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 1 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
devtools/test-build.sh | 4 ----
4 files changed, 3 insertions(+), 4 deletions(-)
After a second thought, we still need DPDK_DEP_NUMA in
devtools/test-build.sh.
We just need to inverse the logic so the default is enabled.

I am going to send a v11.
Jerin Jacob
2017-06-29 06:29:48 UTC
Permalink
-----Original Message-----
Date: Thu, 29 Jun 2017 08:59:18 +0300
Subject: [PATCH v10 0/2] Balanced allocation of hugepages
X-Mailer: git-send-email 2.7.4
* Fixed typo in DPAA2 config.
* Removed DPDK_DEP_NUMA from test-build.sh . Not needed
anymore.
* Fixed out of bound write to essential_memory in case
where socket-mem not specified and SIGBUS occured.
* helper functions from libnuma used to set mempolicy and
work with cpu mask.
* Function now restores previous mempolicy instead of MPOL_DEFAULT.
* Fixed essential_memory on SIGBUS.
* Fixed restoring of mempolicy in case of errors (goto out).
* Enabled by default for all linuxapp except armv7 and dpaa2.
* RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES --> RTE_EAL_NUMA_AWARE_HUGEPAGES
* Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
returned. Enabled by default for x86, ppc and thunderx.
* Fixed shared build. (Automated build test will fail
anyway because libnuma-devel not installed on build servers)
* Fixed work on systems without NUMA by adding check for NUMA
support in kernel.
* Implemented hybrid schema for allocation.
* Fixed not needed mempolicy change while remapping. (orig = 0)
* Added patch to enable VHOST_NUMA by default.
* rebased (fuzz in Makefile)
mem: balanced allocation of hugepages
config: enable vhost numa awareness by default
Series-Acked-by: Jerin Jacob <***@caviumnetworks.com>
Tested on a arm64 NUMA machine.
Tested-by: Jerin Jacob <***@caviumnetworks.com>
Ilya Maximets
2017-06-30 08:36:50 UTC
Permalink
So, are we ready for merging this now?
Thomas?

Best regards, Ilya Maximets.
-----Original Message-----
Date: Thu, 29 Jun 2017 08:59:18 +0300
Subject: [PATCH v10 0/2] Balanced allocation of hugepages
X-Mailer: git-send-email 2.7.4
* Fixed typo in DPAA2 config.
* Removed DPDK_DEP_NUMA from test-build.sh . Not needed
anymore.
* Fixed out of bound write to essential_memory in case
where socket-mem not specified and SIGBUS occured.
* helper functions from libnuma used to set mempolicy and
work with cpu mask.
* Function now restores previous mempolicy instead of MPOL_DEFAULT.
* Fixed essential_memory on SIGBUS.
* Fixed restoring of mempolicy in case of errors (goto out).
* Enabled by default for all linuxapp except armv7 and dpaa2.
* RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES --> RTE_EAL_NUMA_AWARE_HUGEPAGES
* Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
returned. Enabled by default for x86, ppc and thunderx.
* Fixed shared build. (Automated build test will fail
anyway because libnuma-devel not installed on build servers)
* Fixed work on systems without NUMA by adding check for NUMA
support in kernel.
* Implemented hybrid schema for allocation.
* Fixed not needed mempolicy change while remapping. (orig = 0)
* Added patch to enable VHOST_NUMA by default.
* rebased (fuzz in Makefile)
mem: balanced allocation of hugepages
config: enable vhost numa awareness by default
Tested on a arm64 NUMA machine.
Thomas Monjalon
2017-06-30 16:12:22 UTC
Permalink
Version 11:
* Fixed test-build.sh for missing libnuma dependency

Version 10:
* Fixed typo in DPAA2 config.

Version 9:
* Removed DPDK_DEP_NUMA from test-build.sh . Not needed
anymore.
* Fixed out of bound write to essential_memory in case
where socket-mem not specified and SIGBUS occured.

Version 8:
* helper functions from libnuma used to set mempolicy and
work with cpu mask.
* Function now restores previous mempolicy instead of MPOL_DEFAULT.
* Fixed essential_memory on SIGBUS.
* Fixed restoring of mempolicy in case of errors (goto out).
* Enabled by default for all linuxapp except armv7 and dpaa2.

Version 7:
* RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES --> RTE_EAL_NUMA_AWARE_HUGEPAGES

Version 6:
* Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
returned. Enabled by default for x86, ppc and thunderx.

Version 5:
* Fixed shared build. (Automated build test will fail
anyway because libnuma-devel not installed on build servers)

Version 4:
* Fixed work on systems without NUMA by adding check for NUMA
support in kernel.

Version 3:
* Implemented hybrid schema for allocation.
* Fixed not needed mempolicy change while remapping. (orig = 0)
* Added patch to enable VHOST_NUMA by default.

Version 2:
* rebased (fuzz in Makefile)

Ilya Maximets (2):
mem: balanced allocation of hugepages
config: enable vhost NUMA awareness by default

config/common_base | 1 +
config/common_linuxapp | 2 +
config/defconfig_arm-armv7a-linuxapp-gcc | 4 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 4 +
devtools/test-build.sh | 6 +-
lib/librte_eal/linuxapp/eal/Makefile | 3 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 120 ++++++++++++++++++++++++++++--
mk/rte.app.mk | 3 +
8 files changed, 132 insertions(+), 11 deletions(-)
--
2.13.1
Thomas Monjalon
2017-06-30 16:12:23 UTC
Permalink
From: Ilya Maximets <***@samsung.com>

Currently EAL allocates hugepages one by one not paying attention
from which NUMA node allocation was done.

Such behaviour leads to allocation failure if number of available
hugepages for application limited by cgroups or hugetlbfs and
memory requested not only from the first socket.

Example:
# 90 x 1GB hugepages availavle in a system

cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...

EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
PANIC in rte_eal_init():
Cannot init memory

This happens beacause all allocated pages are
on socket 0.

Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
to one of requested nodes using following schema:

1) Allocate essential hugepages:
1.1) Allocate as many hugepages from numa N to
only fit requested memory for this numa.
1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
fashion.
3) Sort pages and choose the most suitable.

In this case all essential memory will be allocated and all remaining
pages will be fairly distributed between all requested nodes.

New config option RTE_EAL_NUMA_AWARE_HUGEPAGES introduced and
enabled by default for linuxapp except armv7 and dpaa2.
Enabling of this option adds libnuma as a dependency for EAL.

Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")

Signed-off-by: Ilya Maximets <***@samsung.com>
Acked-by: Sergio Gonzalez Monroy <***@intel.com>
Acked-by: Hemant Agrawal <***@nxp.com>
Acked-by: Jerin Jacob <***@caviumnetworks.com>
Tested-by: Jerin Jacob <***@caviumnetworks.com>
---
config/common_base | 1 +
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 3 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 3 +
devtools/test-build.sh | 6 +-
lib/librte_eal/linuxapp/eal/Makefile | 3 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 120 ++++++++++++++++++++++++++++--
mk/rte.app.mk | 3 +
8 files changed, 129 insertions(+), 11 deletions(-)

diff --git a/config/common_base b/config/common_base
index f6aafd17d..660588a3d 100644
--- a/config/common_base
+++ b/config/common_base
@@ -103,6 +103,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=n
CONFIG_RTE_MALLOC_DEBUG=n
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n

#
# Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
diff --git a/config/common_linuxapp b/config/common_linuxapp
index b3cf41b01..64bef87af 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -35,6 +35,7 @@
CONFIG_RTE_EXEC_ENV="linuxapp"
CONFIG_RTE_EXEC_ENV_LINUXAPP=y

+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
CONFIG_RTE_EAL_IGB_UIO=y
CONFIG_RTE_EAL_VFIO=y
CONFIG_RTE_KNI_KMOD=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index 19607eb67..e06b1d441 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -47,6 +47,9 @@ CONFIG_RTE_ARCH_STRICT_ALIGN=y
CONFIG_RTE_TOOLCHAIN="gcc"
CONFIG_RTE_TOOLCHAIN_GCC=y

+# NUMA is not supported on ARM
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
# ARM doesn't have support for vmware TSC map
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n

diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index 2304ab607..d17201b1e 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -45,6 +45,9 @@ CONFIG_RTE_CACHE_LINE_SIZE=64

CONFIG_RTE_PKTMBUF_HEADROOM=256

+# Doesn't support NUMA
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
#
# Compile Support Libraries for DPAA2
#
diff --git a/devtools/test-build.sh b/devtools/test-build.sh
index 61bdce7cc..079c8b8f8 100755
--- a/devtools/test-build.sh
+++ b/devtools/test-build.sh
@@ -41,7 +41,7 @@ default_path=$PATH
# - DPDK_DEP_ISAL_CRYPTO (y/[n])
# - DPDK_DEP_LDFLAGS
# - DPDK_DEP_MOFED (y/[n])
-# - DPDK_DEP_NUMA (y/[n])
+# - DPDK_DEP_NUMA ([y]/n)
# - DPDK_DEP_PCAP (y/[n])
# - DPDK_DEP_SSL (y/[n])
# - DPDK_DEP_SZE (y/[n])
@@ -163,8 +163,8 @@ config () # <directory> <target> <options>
sed -ri 's,(TEST_PMD_RECORD_.*=)n,\1y,' $1/.config )

# Automatic configuration
- test "$DPDK_DEP_NUMA" != y || \
- sed -ri 's,(NUMA=)n,\1y,' $1/.config
+ test "$DPDK_DEP_NUMA" != n || \
+ sed -ri 's,(NUMA.*=)y,\1n,' $1/.config
sed -ri 's,(LIBRTE_IEEE1588=)n,\1y,' $1/.config
sed -ri 's,(BYPASS=)n,\1y,' $1/.config
test "$DPDK_DEP_ARCHIVE" != y || \
diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index 640afd088..8651e2783 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -50,6 +50,9 @@ LDLIBS += -ldl
LDLIBS += -lpthread
LDLIBS += -lgcc_s
LDLIBS += -lrt
+ifeq ($(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+LDLIBS += -lnuma
+endif

# specific to linuxapp exec-env
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index e17c9cb5d..647d89c58 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -54,6 +54,10 @@
#include <sys/time.h>
#include <signal.h>
#include <setjmp.h>
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numa.h>
+#include <numaif.h>
+#endif

#include <rte_log.h>
#include <rte_memory.h>
@@ -348,6 +352,14 @@ static int huge_wrap_sigsetjmp(void)
return sigsetjmp(huge_jmpenv, 1);
}

+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+/* Callback for numa library. */
+void numa_error(char *where)
+{
+ RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
+}
+#endif
+
/*
* Mmap all hugepages of hugepage table: it first open a file in
* hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -356,18 +368,78 @@ static int huge_wrap_sigsetjmp(void)
* map continguous physical blocks in contiguous virtual blocks.
*/
static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
- struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+ uint64_t *essential_memory __rte_unused, int orig)
{
int fd;
unsigned i;
void *virtaddr;
void *vma_addr = NULL;
size_t vma_len = 0;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ int node_id = -1;
+ int essential_prev = 0;
+ int oldpolicy;
+ struct bitmask *oldmask = numa_allocate_nodemask();
+ bool have_numa = true;
+ unsigned long maxnode = 0;
+
+ /* Check if kernel supports NUMA. */
+ if (numa_available() != 0) {
+ RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+ have_numa = false;
+ }
+
+ if (orig && have_numa) {
+ RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
+ if (get_mempolicy(&oldpolicy, oldmask->maskp,
+ oldmask->size + 1, 0, 0) < 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to get current mempolicy: %s. "
+ "Assuming MPOL_DEFAULT.\n", strerror(errno));
+ oldpolicy = MPOL_DEFAULT;
+ }
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ if (internal_config.socket_mem[i])
+ maxnode = i + 1;
+ }
+#endif

for (i = 0; i < hpi->num_pages[0]; i++) {
uint64_t hugepage_sz = hpi->hugepage_sz;

+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ unsigned int j;
+
+ for (j = 0; j < maxnode; j++)
+ if (essential_memory[j])
+ break;
+
+ if (j == maxnode) {
+ node_id = (node_id + 1) % maxnode;
+ while (!internal_config.socket_mem[node_id]) {
+ node_id++;
+ node_id %= maxnode;
+ }
+ essential_prev = 0;
+ } else {
+ node_id = j;
+ essential_prev = essential_memory[j];
+
+ if (essential_memory[j] < hugepage_sz)
+ essential_memory[j] = 0;
+ else
+ essential_memory[j] -= hugepage_sz;
+ }
+
+ RTE_LOG(DEBUG, EAL,
+ "Setting policy MPOL_PREFERRED for socket %d\n",
+ node_id);
+ numa_set_preferred(node_id);
+ }
+#endif
+
if (orig) {
hugepg_tbl[i].file_id = i;
hugepg_tbl[i].size = hugepage_sz;
@@ -422,7 +494,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
if (fd < 0) {
RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
strerror(errno));
- return i;
+ goto out;
}

/* map the segment, and populate page tables,
@@ -433,7 +505,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
strerror(errno));
close(fd);
- return i;
+ goto out;
}

if (orig) {
@@ -458,7 +530,12 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
munmap(virtaddr, hugepage_sz);
close(fd);
unlink(hugepg_tbl[i].filepath);
- return i;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode)
+ essential_memory[node_id] =
+ essential_prev;
+#endif
+ goto out;
}
*(int *)virtaddr = 0;
}
@@ -469,7 +546,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
__func__, strerror(errno));
close(fd);
- return i;
+ goto out;
}

close(fd);
@@ -478,6 +555,22 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
vma_len -= hugepage_sz;
}

+out:
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ RTE_LOG(DEBUG, EAL,
+ "Restoring previous memory policy: %d\n", oldpolicy);
+ if (oldpolicy == MPOL_DEFAULT) {
+ numa_set_localalloc();
+ } else if (set_mempolicy(oldpolicy, oldmask->maskp,
+ oldmask->size + 1) < 0) {
+ RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
+ strerror(errno));
+ numa_set_localalloc();
+ }
+ }
+ numa_free_cpumask(oldmask);
+#endif
return i;
}

@@ -562,6 +655,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
if (hugepg_tbl[i].orig_va == va) {
hugepg_tbl[i].socket_id = socket_id;
hp_count++;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ RTE_LOG(DEBUG, EAL,
+ "Hugepage %s is on socket %d\n",
+ hugepg_tbl[i].filepath, socket_id);
+#endif
}
}
}
@@ -1000,6 +1098,11 @@ rte_eal_hugepage_init(void)

huge_register_sigbus();

+ /* make a copy of socket_mem, needed for balanced allocation. */
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ memory[i] = internal_config.socket_mem[i];
+
+
/* map all hugepages and sort them */
for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
unsigned pages_old, pages_new;
@@ -1017,7 +1120,8 @@ rte_eal_hugepage_init(void)

/* map all hugepages available */
pages_old = hpi->num_pages[0];
- pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+ pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+ memory, 1);
if (pages_new < pages_old) {
RTE_LOG(DEBUG, EAL,
"%d not %d hugepages of size %u MB allocated\n",
@@ -1060,7 +1164,7 @@ rte_eal_hugepage_init(void)
sizeof(struct hugepage_file), cmp_physaddr);

/* remap all hugepages */
- if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+ if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
hpi->num_pages[0]) {
RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
(unsigned)(hpi->hugepage_sz / 0x100000));
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b382..4fe22d1fe 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -186,6 +186,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
# The static libraries do not know their dependencies.
# So linking with static library requires explicit dependencies.
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrt
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lnuma
+endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lm
--
2.13.1
Thomas Monjalon
2017-06-30 16:12:24 UTC
Permalink
From: Ilya Maximets <***@samsung.com>

It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.

Signed-off-by: Ilya Maximets <***@samsung.com>
Acked-by: Hemant Agrawal <***@nxp.com>
Acked-by: Jerin Jacob <***@caviumnetworks.com>
Tested-by: Jerin Jacob <***@caviumnetworks.com>
---
config/common_linuxapp | 1 +
config/defconfig_arm-armv7a-linuxapp-gcc | 1 +
config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
3 files changed, 3 insertions(+)

diff --git a/config/common_linuxapp b/config/common_linuxapp
index 64bef87af..74c7d64ec 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -42,6 +42,7 @@ CONFIG_RTE_KNI_KMOD=y
CONFIG_RTE_LIBRTE_KNI=y
CONFIG_RTE_LIBRTE_PMD_KNI=y
CONFIG_RTE_LIBRTE_VHOST=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y
CONFIG_RTE_LIBRTE_PMD_VHOST=y
CONFIG_RTE_LIBRTE_PMD_AF_PACKET=y
CONFIG_RTE_LIBRTE_PMD_TAP=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index e06b1d441..00bc2ab90 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -49,6 +49,7 @@ CONFIG_RTE_TOOLCHAIN_GCC=y

# NUMA is not supported on ARM
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n

# ARM doesn't have support for vmware TSC map
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index d17201b1e..4452c2311 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256

# Doesn't support NUMA
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n

#
# Compile Support Libraries for DPAA2
--
2.13.1
Thomas Monjalon
2017-07-01 10:59:20 UTC
Permalink
Post by Thomas Monjalon
* Fixed test-build.sh for missing libnuma dependency
* Fixed typo in DPAA2 config.
* Removed DPDK_DEP_NUMA from test-build.sh . Not needed
anymore.
* Fixed out of bound write to essential_memory in case
where socket-mem not specified and SIGBUS occured.
* helper functions from libnuma used to set mempolicy and
work with cpu mask.
* Function now restores previous mempolicy instead of MPOL_DEFAULT.
* Fixed essential_memory on SIGBUS.
* Fixed restoring of mempolicy in case of errors (goto out).
* Enabled by default for all linuxapp except armv7 and dpaa2.
* RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES --> RTE_EAL_NUMA_AWARE_HUGEPAGES
* Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
returned. Enabled by default for x86, ppc and thunderx.
* Fixed shared build. (Automated build test will fail
anyway because libnuma-devel not installed on build servers)
* Fixed work on systems without NUMA by adding check for NUMA
support in kernel.
* Implemented hybrid schema for allocation.
* Fixed not needed mempolicy change while remapping. (orig = 0)
* Added patch to enable VHOST_NUMA by default.
* rebased (fuzz in Makefile)
mem: balanced allocation of hugepages
config: enable vhost NUMA awareness by default
Applied this version, thanks for getting an agreement after long discussions :)
Continue reading on narkive:
Loading...