Discussion:
[dpdk-dev] examples/power: allow use of more than 64 cores
(too old to reply)
David Hunt
2018-11-22 17:02:16 UTC
Permalink
vm_power_manager has up to now been limited to 64-cores because of the
extensive use of uint64 type for bitmasks. This patch set converts those
uint64s to character arrays, and increases the limit of cores from 64 to
256, and allowing users to increase this by increasing the #define.

First of all we convert all the relevant uint64_t's to char arrays. Then
we remove the unneeded mask functions that were limited to 64 cores. Also
extend the guest functionality and finally rause the number of supported
cores to something more sensible, i.e. 256.

[1/4] examples/power: change 64-bit masks to arrays
[2/4] examples/power: remove mask functions
[3/4] examples/power: allow vms to use lcores over 63
[4/4] examples/power: increase MAX_CPUS to 256
David Hunt
2018-11-22 17:02:17 UTC
Permalink
vm_power_manager currently makes use of uint64_t masks to keep track of
cores in use, limiting use of the app to only being able to manage the
first 64 cores in a multi-core system. Many modern systems have core
counts greater than 64, so this limitation needs to be removed.

This patch converts the relevant 64-bit masks to character arrays.

Signed-off-by: David Hunt <***@intel.com>
---
examples/vm_power_manager/channel_manager.c | 111 +++++++++++---------
examples/vm_power_manager/channel_manager.h | 2 +-
examples/vm_power_manager/vm_power_cli.c | 6 +-
3 files changed, 68 insertions(+), 51 deletions(-)

diff --git a/examples/vm_power_manager/channel_manager.c b/examples/vm_power_manager/channel_manager.c
index 4fac099df..5af4996db 100644
--- a/examples/vm_power_manager/channel_manager.c
+++ b/examples/vm_power_manager/channel_manager.c
@@ -29,14 +29,11 @@
#include "channel_manager.h"
#include "channel_commands.h"
#include "channel_monitor.h"
+#include "power_manager.h"


#define RTE_LOGTYPE_CHANNEL_MANAGER RTE_LOGTYPE_USER1

-#define ITERATIVE_BITMASK_CHECK_64(mask_u64b, i) \
- for (i = 0; mask_u64b; mask_u64b &= ~(1ULL << i++)) \
- if ((mask_u64b >> i) & 1) \
-
/* Global pointer to libvirt connection */
static virConnectPtr global_vir_conn_ptr;

@@ -54,7 +51,7 @@ struct virtual_machine_info {
char name[CHANNEL_MGR_MAX_NAME_LEN];
rte_atomic64_t pcpu_mask[CHANNEL_CMDS_MAX_CPUS];
struct channel_info *channels[CHANNEL_CMDS_MAX_VM_CHANNELS];
- uint64_t channel_mask;
+ char channel_mask[POWER_MGR_MAX_CPUS];
uint8_t num_channels;
enum vm_status status;
virDomainPtr domainPtr;
@@ -135,12 +132,14 @@ update_pcpus_mask(struct virtual_machine_info *vm_info)
}

int
-set_pcpus_mask(char *vm_name, unsigned vcpu, uint64_t core_mask)
+set_pcpus_mask(char *vm_name, unsigned int vcpu, char *core_mask)
{
unsigned i = 0;
int flags = VIR_DOMAIN_AFFECT_LIVE|VIR_DOMAIN_AFFECT_CONFIG;
struct virtual_machine_info *vm_info;
- uint64_t mask = core_mask;
+ char mask[POWER_MGR_MAX_CPUS];
+
+ memcpy(mask, core_mask, POWER_MGR_MAX_CPUS);

if (vcpu >= CHANNEL_CMDS_MAX_CPUS) {
RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds max allowable(%d)\n",
@@ -156,8 +155,8 @@ set_pcpus_mask(char *vm_name, unsigned vcpu, uint64_t core_mask)

if (!virDomainIsActive(vm_info->domainPtr)) {
RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU "
- "mask(0x%"PRIx64") for VM '%s', VM is not active\n",
- vcpu, core_mask, vm_info->name);
+ " for VM '%s', VM is not active\n",
+ vcpu, vm_info->name);
return -1;
}

@@ -167,22 +166,25 @@ set_pcpus_mask(char *vm_name, unsigned vcpu, uint64_t core_mask)
return -1;
}
memset(global_cpumaps, 0 , CHANNEL_CMDS_MAX_CPUS * global_maplen);
- ITERATIVE_BITMASK_CHECK_64(mask, i) {
- VIR_USE_CPU(global_cpumaps, i);
- if (i >= global_n_host_cpus) {
- RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available "
- "number of CPUs(%u)\n", i, global_n_host_cpus);
- return -1;
+ for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
+ if (mask[i] == 1) {
+ VIR_USE_CPU(global_cpumaps, i);
+ if (i >= global_n_host_cpus) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available "
+ "number of CPUs(%u)\n",
+ i, global_n_host_cpus);
+ return -1;
+ }
}
}
if (virDomainPinVcpuFlags(vm_info->domainPtr, vcpu, global_cpumaps,
global_maplen, flags) < 0) {
RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU "
- "mask(0x%"PRIx64") for VM '%s'\n", vcpu, core_mask,
+ " for VM '%s'\n", vcpu,
vm_info->name);
return -1;
}
- rte_atomic64_set(&vm_info->pcpu_mask[vcpu], core_mask);
+ memcpy(&vm_info->pcpu_mask[vcpu], core_mask, POWER_MGR_MAX_CPUS);
return 0;

}
@@ -190,7 +192,11 @@ set_pcpus_mask(char *vm_name, unsigned vcpu, uint64_t core_mask)
int
set_pcpu(char *vm_name, unsigned vcpu, unsigned core_num)
{
- uint64_t mask = 1ULL << core_num;
+ char mask[POWER_MGR_MAX_CPUS];
+
+ memset(mask, 0, POWER_MGR_MAX_CPUS);
+
+ mask[core_num] = 1;

return set_pcpus_mask(vm_name, vcpu, mask);
}
@@ -211,7 +217,7 @@ static inline int
channel_exists(struct virtual_machine_info *vm_info, unsigned channel_num)
{
rte_spinlock_lock(&(vm_info->config_spinlock));
- if (vm_info->channel_mask & (1ULL << channel_num)) {
+ if (vm_info->channel_mask[channel_num]) {
rte_spinlock_unlock(&(vm_info->config_spinlock));
return 1;
}
@@ -343,7 +349,7 @@ setup_channel_info(struct virtual_machine_info **vm_info_dptr,
}
rte_spinlock_lock(&(vm_info->config_spinlock));
vm_info->num_channels++;
- vm_info->channel_mask |= 1ULL << channel_num;
+ vm_info->channel_mask[channel_num] = 1;
vm_info->channels[channel_num] = chan_info;
chan_info->status = CHANNEL_MGR_CHANNEL_CONNECTED;
rte_spinlock_unlock(&(vm_info->config_spinlock));
@@ -590,7 +596,7 @@ remove_channel(struct channel_info **chan_info_dptr)
vm_info = (struct virtual_machine_info *)chan_info->priv_info;

rte_spinlock_lock(&(vm_info->config_spinlock));
- vm_info->channel_mask &= ~(1ULL << chan_info->channel_num);
+ vm_info->channel_mask[chan_info->channel_num] = 0;
vm_info->num_channels--;
rte_spinlock_unlock(&(vm_info->config_spinlock));

@@ -603,7 +609,7 @@ set_channel_status_all(const char *vm_name, enum channel_status status)
{
struct virtual_machine_info *vm_info;
unsigned i;
- uint64_t mask;
+ char mask[POWER_MGR_MAX_CPUS];
int num_channels_changed = 0;

if (!(status == CHANNEL_MGR_CHANNEL_CONNECTED ||
@@ -619,10 +625,12 @@ set_channel_status_all(const char *vm_name, enum channel_status status)
}

rte_spinlock_lock(&(vm_info->config_spinlock));
- mask = vm_info->channel_mask;
- ITERATIVE_BITMASK_CHECK_64(mask, i) {
- vm_info->channels[i]->status = status;
- num_channels_changed++;
+ memcpy(mask, (char *)vm_info->channel_mask, POWER_MGR_MAX_CPUS);
+ for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
+ if (mask[i] == 1) {
+ vm_info->channels[i]->status = status;
+ num_channels_changed++;
+ }
}
rte_spinlock_unlock(&(vm_info->config_spinlock));
return num_channels_changed;
@@ -665,7 +673,7 @@ get_all_vm(int *num_vm, int *num_vcpu)

virNodeInfo node_info;
virDomainPtr *domptr;
- uint64_t mask;
+ char mask[POWER_MGR_MAX_CPUS];
int i, ii, numVcpus[MAX_VCPUS], cpu, n_vcpus;
unsigned int jj;
const char *vm_name;
@@ -714,15 +722,16 @@ get_all_vm(int *num_vm, int *num_vcpu)

/* Save pcpu in use by libvirt VMs */
for (ii = 0; ii < n_vcpus; ii++) {
- mask = 0;
+ memset(mask, 0, POWER_MGR_MAX_CPUS);
for (jj = 0; jj < global_n_host_cpus; jj++) {
if (VIR_CPU_USABLE(global_cpumaps,
global_maplen, ii, jj) > 0) {
- mask |= 1ULL << jj;
+ mask[jj] = 1;
}
}
- ITERATIVE_BITMASK_CHECK_64(mask, cpu) {
- lvm_info[i].pcpus[ii] = cpu;
+ for (cpu = 0; cpu < POWER_MGR_MAX_CPUS; cpu++) {
+ if (mask[cpu])
+ lvm_info[i].pcpus[ii] = cpu;
}
}
}
@@ -733,7 +742,7 @@ get_info_vm(const char *vm_name, struct vm_info *info)
{
struct virtual_machine_info *vm_info;
unsigned i, channel_num = 0;
- uint64_t mask;
+ char mask[POWER_MGR_MAX_CPUS];

vm_info = find_domain_by_name(vm_name);
if (vm_info == NULL) {
@@ -746,14 +755,19 @@ get_info_vm(const char *vm_name, struct vm_info *info)

rte_spinlock_lock(&(vm_info->config_spinlock));

- mask = vm_info->channel_mask;
- ITERATIVE_BITMASK_CHECK_64(mask, i) {
- info->channels[channel_num].channel_num = i;
- memcpy(info->channels[channel_num].channel_path,
- vm_info->channels[i]->channel_path, UNIX_PATH_MAX);
- info->channels[channel_num].status = vm_info->channels[i]->status;
- info->channels[channel_num].fd = vm_info->channels[i]->fd;
- channel_num++;
+ memcpy(mask, (char *)vm_info->channel_mask, POWER_MGR_MAX_CPUS);
+ for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
+ if (mask[i] == 1) {
+ info->channels[channel_num].channel_num = i;
+ memcpy(info->channels[channel_num].channel_path,
+ vm_info->channels[i]->channel_path,
+ UNIX_PATH_MAX);
+ info->channels[channel_num].status =
+ vm_info->channels[i]->status;
+ info->channels[channel_num].fd =
+ vm_info->channels[i]->fd;
+ channel_num++;
+ }
}

info->num_channels = channel_num;
@@ -822,7 +836,7 @@ add_vm(const char *vm_name)
}
strncpy(new_domain->name, vm_name, sizeof(new_domain->name));
new_domain->name[sizeof(new_domain->name) - 1] = '\0';
- new_domain->channel_mask = 0;
+ memset(new_domain->channel_mask, 0, POWER_MGR_MAX_CPUS);
new_domain->num_channels = 0;

if (!virDomainIsActive(dom_ptr))
@@ -940,18 +954,21 @@ void
channel_manager_exit(void)
{
unsigned i;
- uint64_t mask;
+ char mask[POWER_MGR_MAX_CPUS];
struct virtual_machine_info *vm_info;

LIST_FOREACH(vm_info, &vm_list_head, vms_info) {

rte_spinlock_lock(&(vm_info->config_spinlock));

- mask = vm_info->channel_mask;
- ITERATIVE_BITMASK_CHECK_64(mask, i) {
- remove_channel_from_monitor(vm_info->channels[i]);
- close(vm_info->channels[i]->fd);
- rte_free(vm_info->channels[i]);
+ memcpy(mask, (char *)vm_info->channel_mask, POWER_MGR_MAX_CPUS);
+ for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
+ if (mask[i] == 1) {
+ remove_channel_from_monitor(
+ vm_info->channels[i]);
+ close(vm_info->channels[i]->fd);
+ rte_free(vm_info->channels[i]);
+ }
}
rte_spinlock_unlock(&(vm_info->config_spinlock));

diff --git a/examples/vm_power_manager/channel_manager.h b/examples/vm_power_manager/channel_manager.h
index d948b304c..c2520ab6f 100644
--- a/examples/vm_power_manager/channel_manager.h
+++ b/examples/vm_power_manager/channel_manager.h
@@ -149,7 +149,7 @@ uint64_t get_pcpus_mask(struct channel_info *chan_info, unsigned vcpu);
* - 0 on success.
* - Negative on error.
*/
-int set_pcpus_mask(char *vm_name, unsigned vcpu, uint64_t core_mask);
+int set_pcpus_mask(char *vm_name, unsigned int vcpu, char *core_mask);

/**
* Set the Physical CPU for the specified vCPU.
diff --git a/examples/vm_power_manager/vm_power_cli.c b/examples/vm_power_manager/vm_power_cli.c
index d588d38aa..101e67c9c 100644
--- a/examples/vm_power_manager/vm_power_cli.c
+++ b/examples/vm_power_manager/vm_power_cli.c
@@ -128,7 +128,7 @@ struct cmd_set_pcpu_mask_result {
cmdline_fixed_string_t set_pcpu_mask;
cmdline_fixed_string_t vm_name;
uint8_t vcpu;
- uint64_t core_mask;
+ char core_mask[POWER_MGR_MAX_CPUS];
};

static void
@@ -139,10 +139,10 @@ cmd_set_pcpu_mask_parsed(void *parsed_result, struct cmdline *cl,

if (set_pcpus_mask(res->vm_name, res->vcpu, res->core_mask) == 0)
cmdline_printf(cl, "Pinned vCPU(%"PRId8") to pCPU core "
- "mask(0x%"PRIx64")\n", res->vcpu, res->core_mask);
+ "\n", res->vcpu);
else
cmdline_printf(cl, "Unable to pin vCPU(%"PRId8") to pCPU core "
- "mask(0x%"PRIx64")\n", res->vcpu, res->core_mask);
+ "\n", res->vcpu);
}

cmdline_parse_token_string_t cmd_set_pcpu_mask =
--
2.17.1
Burakov, Anatoly
2018-12-10 12:18:40 UTC
Permalink
Post by David Hunt
vm_power_manager currently makes use of uint64_t masks to keep track of
cores in use, limiting use of the app to only being able to manage the
first 64 cores in a multi-core system. Many modern systems have core
counts greater than 64, so this limitation needs to be removed.
This patch converts the relevant 64-bit masks to character arrays.
Hi Dave,

Overall looks OK, but some comments below.
Post by David Hunt
---
examples/vm_power_manager/channel_manager.c | 111 +++++++++++---------
examples/vm_power_manager/channel_manager.h | 2 +-
examples/vm_power_manager/vm_power_cli.c | 6 +-
3 files changed, 68 insertions(+), 51 deletions(-)
diff --git a/examples/vm_power_manager/channel_manager.c b/examples/vm_power_manager/channel_manager.c
index 4fac099df..5af4996db 100644
--- a/examples/vm_power_manager/channel_manager.c
+++ b/examples/vm_power_manager/channel_manager.c
@@ -29,14 +29,11 @@
#include "channel_manager.h"
#include "channel_commands.h"
#include "channel_monitor.h"
+#include "power_manager.h"
#define RTE_LOGTYPE_CHANNEL_MANAGER RTE_LOGTYPE_USER1
-#define ITERATIVE_BITMASK_CHECK_64(mask_u64b, i) \
- for (i = 0; mask_u64b; mask_u64b &= ~(1ULL << i++)) \
- if ((mask_u64b >> i) & 1) \
-
/* Global pointer to libvirt connection */
static virConnectPtr global_vir_conn_ptr;
@@ -54,7 +51,7 @@ struct virtual_machine_info {
char name[CHANNEL_MGR_MAX_NAME_LEN];
rte_atomic64_t pcpu_mask[CHANNEL_CMDS_MAX_CPUS];
struct channel_info *channels[CHANNEL_CMDS_MAX_VM_CHANNELS];
- uint64_t channel_mask;
+ char channel_mask[POWER_MGR_MAX_CPUS];
uint8_t num_channels;
enum vm_status status;
virDomainPtr domainPtr;
@@ -135,12 +132,14 @@ update_pcpus_mask(struct virtual_machine_info *vm_info)
}
int
-set_pcpus_mask(char *vm_name, unsigned vcpu, uint64_t core_mask)
+set_pcpus_mask(char *vm_name, unsigned int vcpu, char *core_mask)
{
unsigned i = 0;
int flags = VIR_DOMAIN_AFFECT_LIVE|VIR_DOMAIN_AFFECT_CONFIG;
struct virtual_machine_info *vm_info;
- uint64_t mask = core_mask;
+ char mask[POWER_MGR_MAX_CPUS];
+
+ memcpy(mask, core_mask, POWER_MGR_MAX_CPUS);
if (vcpu >= CHANNEL_CMDS_MAX_CPUS) {
RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds max allowable(%d)\n",
@@ -156,8 +155,8 @@ set_pcpus_mask(char *vm_name, unsigned vcpu, uint64_t core_mask)
if (!virDomainIsActive(vm_info->domainPtr)) {
RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU "
- "mask(0x%"PRIx64") for VM '%s', VM is not active\n",
- vcpu, core_mask, vm_info->name);
+ " for VM '%s', VM is not active\n",
+ vcpu, vm_info->name);
return -1;
}
@@ -167,22 +166,25 @@ set_pcpus_mask(char *vm_name, unsigned vcpu, uint64_t core_mask)
return -1;
}
memset(global_cpumaps, 0 , CHANNEL_CMDS_MAX_CPUS * global_maplen);
- ITERATIVE_BITMASK_CHECK_64(mask, i) {
- VIR_USE_CPU(global_cpumaps, i);
- if (i >= global_n_host_cpus) {
- RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available "
- "number of CPUs(%u)\n", i, global_n_host_cpus);
- return -1;
+ for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
+ if (mask[i] == 1) {
Here and in other places - early exit would've avoided unneeded extra
indents, e.g.

if (mask[i] != 1)
continue;
<do stuff>
Post by David Hunt
+ VIR_USE_CPU(global_cpumaps, i);
+ if (i >= global_n_host_cpus) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available "
+ "number of CPUs(%u)\n",
+ i, global_n_host_cpus);
+ return -1;
+ }
}
}
if (virDomainPinVcpuFlags(vm_info->domainPtr, vcpu, global_cpumaps,
global_maplen, flags) < 0) {
RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU "
- "mask(0x%"PRIx64") for VM '%s'\n", vcpu, core_mask,
+ " for VM '%s'\n", vcpu,
vm_info->name);
return -1;
}
- rte_atomic64_set(&vm_info->pcpu_mask[vcpu], core_mask);
+ memcpy(&vm_info->pcpu_mask[vcpu], core_mask, POWER_MGR_MAX_CPUS);
I'm probably missing something here, but at face value, replacing an
atomic set operation with a simple memcpy is not equivalent. Is there
any locking that's missing from here? Or was it that atomics were not
necessary in the first place?
Post by David Hunt
return 0;
}
@@ -190,7 +192,11 @@ set_pcpus_mask(char *vm_name, unsigned vcpu, uint64_t core_mask)
int
set_pcpu(char *vm_name, unsigned vcpu, unsigned core_num)
{
- uint64_t mask = 1ULL << core_num;
+ char mask[POWER_MGR_MAX_CPUS];
+
+ memset(mask, 0, POWER_MGR_MAX_CPUS);
+
+ mask[core_num] = 1;
return set_pcpus_mask(vm_name, vcpu, mask);
This was a thin wrapper to get around dealing with masks. Now that
you're dealing with array indexes, this function seems redundant (and
creates an extra mask/memset in the process).
Post by David Hunt
}
@@ -211,7 +217,7 @@ static inline int
channel_exists(struct virtual_machine_info *vm_info, unsigned channel_num)
{
rte_spinlock_lock(&(vm_info->config_spinlock));
- if (vm_info->channel_mask & (1ULL << channel_num)) {
+ if (vm_info->channel_mask[channel_num]) {
You seem to be using (val) and (val == 1) interchangeably. This worked
for masks, but will not work for char arrays. Comparisons for values
should be explicit in this and other similar cases.
Post by David Hunt
rte_spinlock_unlock(&(vm_info->config_spinlock));
return 1;
}
@@ -343,7 +349,7 @@ setup_channel_info(struct virtual_machine_info **vm_info_dptr,
}
rte_spinlock_lock(&(vm_info->config_spinlock));
vm_info->num_channels++;
- vm_info->channel_mask |= 1ULL << channel_num;
+ vm_info->channel_mask[channel_num] = 1;
vm_info->channels[channel_num] = chan_info;
chan_info->status = CHANNEL_MGR_CHANNEL_CONNECTED;
rte_spinlock_unlock(&(vm_info->config_spinlock));
@@ -590,7 +596,7 @@ remove_channel(struct channel_info **chan_info_dptr)
vm_info = (struct virtual_machine_info *)chan_info->priv_info;
rte_spinlock_lock(&(vm_info->config_spinlock));
- vm_info->channel_mask &= ~(1ULL << chan_info->channel_num);
+ vm_info->channel_mask[chan_info->channel_num] = 0;
vm_info->num_channels--;
rte_spinlock_unlock(&(vm_info->config_spinlock));
@@ -603,7 +609,7 @@ set_channel_status_all(const char *vm_name, enum channel_status status)
{
struct virtual_machine_info *vm_info;
unsigned i;
- uint64_t mask;
+ char mask[POWER_MGR_MAX_CPUS];
int num_channels_changed = 0;
if (!(status == CHANNEL_MGR_CHANNEL_CONNECTED ||
@@ -619,10 +625,12 @@ set_channel_status_all(const char *vm_name, enum channel_status status)
}
rte_spinlock_lock(&(vm_info->config_spinlock));
- mask = vm_info->channel_mask;
- ITERATIVE_BITMASK_CHECK_64(mask, i) {
- vm_info->channels[i]->status = status;
- num_channels_changed++;
+ memcpy(mask, (char *)vm_info->channel_mask, POWER_MGR_MAX_CPUS);
+ for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
+ if (mask[i] == 1) {
+ vm_info->channels[i]->status = status;
+ num_channels_changed++;
+ }
Same as above re: indent - perhaps, early exit (well, early continue...)
would make things easier to read.
Post by David Hunt
}
rte_spinlock_unlock(&(vm_info->config_spinlock));
return num_channels_changed;
@@ -665,7 +673,7 @@ get_all_vm(int *num_vm, int *num_vcpu)
virNodeInfo node_info;
virDomainPtr *domptr;
- uint64_t mask;
+ char mask[POWER_MGR_MAX_CPUS];
int i, ii, numVcpus[MAX_VCPUS], cpu, n_vcpus;
unsigned int jj;
const char *vm_name;
@@ -714,15 +722,16 @@ get_all_vm(int *num_vm, int *num_vcpu)
/* Save pcpu in use by libvirt VMs */
for (ii = 0; ii < n_vcpus; ii++) {
- mask = 0;
+ memset(mask, 0, POWER_MGR_MAX_CPUS);
for (jj = 0; jj < global_n_host_cpus; jj++) {
if (VIR_CPU_USABLE(global_cpumaps,
global_maplen, ii, jj) > 0) {
- mask |= 1ULL << jj;
+ mask[jj] = 1;
}
}
- ITERATIVE_BITMASK_CHECK_64(mask, cpu) {
- lvm_info[i].pcpus[ii] = cpu;
+ for (cpu = 0; cpu < POWER_MGR_MAX_CPUS; cpu++) {
+ if (mask[cpu])
+ lvm_info[i].pcpus[ii] = cpu;
Same, should be mask[cpu] == 1.

Also, are two loops necessary?
Post by David Hunt
}
}
}
@@ -733,7 +742,7 @@ get_info_vm(const char *vm_name, struct vm_info *info)
{
struct virtual_machine_info *vm_info;
unsigned i, channel_num = 0;
- uint64_t mask;
+ char mask[POWER_MGR_MAX_CPUS];
vm_info = find_domain_by_name(vm_name);
if (vm_info == NULL) {
@@ -746,14 +755,19 @@ get_info_vm(const char *vm_name, struct vm_info *info)
rte_spinlock_lock(&(vm_info->config_spinlock));
- mask = vm_info->channel_mask;
- ITERATIVE_BITMASK_CHECK_64(mask, i) {
- info->channels[channel_num].channel_num = i;
- memcpy(info->channels[channel_num].channel_path,
- vm_info->channels[i]->channel_path, UNIX_PATH_MAX);
- info->channels[channel_num].status = vm_info->channels[i]->status;
- info->channels[channel_num].fd = vm_info->channels[i]->fd;
- channel_num++;
+ memcpy(mask, (char *)vm_info->channel_mask, POWER_MGR_MAX_CPUS);
+ for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
+ if (mask[i] == 1) {
Same - early exit would make things more readable.
Post by David Hunt
+ info->channels[channel_num].channel_num = i;
+ memcpy(info->channels[channel_num].channel_path,
+ vm_info->channels[i]->channel_path,
+ UNIX_PATH_MAX);
+ info->channels[channel_num].status =
+ vm_info->channels[i]->status;
+ info->channels[channel_num].fd =
+ vm_info->channels[i]->fd;
+ channel_num++;
+ }
}
info->num_channels = channel_num;
@@ -822,7 +836,7 @@ add_vm(const char *vm_name)
}
strncpy(new_domain->name, vm_name, sizeof(new_domain->name));
new_domain->name[sizeof(new_domain->name) - 1] = '\0';
- new_domain->channel_mask = 0;
+ memset(new_domain->channel_mask, 0, POWER_MGR_MAX_CPUS);
new_domain->num_channels = 0;
if (!virDomainIsActive(dom_ptr))
@@ -940,18 +954,21 @@ void
channel_manager_exit(void)
{
unsigned i;
- uint64_t mask;
+ char mask[POWER_MGR_MAX_CPUS];
struct virtual_machine_info *vm_info;
LIST_FOREACH(vm_info, &vm_list_head, vms_info) {
rte_spinlock_lock(&(vm_info->config_spinlock));
- mask = vm_info->channel_mask;
- ITERATIVE_BITMASK_CHECK_64(mask, i) {
- remove_channel_from_monitor(vm_info->channels[i]);
- close(vm_info->channels[i]->fd);
- rte_free(vm_info->channels[i]);
+ memcpy(mask, (char *)vm_info->channel_mask, POWER_MGR_MAX_CPUS);
+ for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
+ if (mask[i] == 1) {
Same - early exit.
Post by David Hunt
+ remove_channel_from_monitor(
+ vm_info->channels[i]);
+ close(vm_info->channels[i]->fd);
+ rte_free(vm_info->channels[i]);
+ }
}
rte_spinlock_unlock(&(vm_info->config_spinlock));
diff --git a/examples/vm_power_manager/channel_manager.h b/examples/vm_power_manager/channel_manager.h
index d948b304c..c2520ab6f 100644
--- a/examples/vm_power_manager/channel_manager.h
+++ b/examples/vm_power_manager/channel_manager.h
@@ -149,7 +149,7 @@ uint64_t get_pcpus_mask(struct channel_info *chan_info, unsigned vcpu);
* - 0 on success.
* - Negative on error.
*/
-int set_pcpus_mask(char *vm_name, unsigned vcpu, uint64_t core_mask);
+int set_pcpus_mask(char *vm_name, unsigned int vcpu, char *core_mask);
/**
* Set the Physical CPU for the specified vCPU.
diff --git a/examples/vm_power_manager/vm_power_cli.c b/examples/vm_power_manager/vm_power_cli.c
index d588d38aa..101e67c9c 100644
--- a/examples/vm_power_manager/vm_power_cli.c
+++ b/examples/vm_power_manager/vm_power_cli.c
@@ -128,7 +128,7 @@ struct cmd_set_pcpu_mask_result {
cmdline_fixed_string_t set_pcpu_mask;
cmdline_fixed_string_t vm_name;
uint8_t vcpu;
- uint64_t core_mask;
+ char core_mask[POWER_MGR_MAX_CPUS];
};
static void
@@ -139,10 +139,10 @@ cmd_set_pcpu_mask_parsed(void *parsed_result, struct cmdline *cl,
if (set_pcpus_mask(res->vm_name, res->vcpu, res->core_mask) == 0)
cmdline_printf(cl, "Pinned vCPU(%"PRId8") to pCPU core "
- "mask(0x%"PRIx64")\n", res->vcpu, res->core_mask);
+ "\n", res->vcpu);
else
cmdline_printf(cl, "Unable to pin vCPU(%"PRId8") to pCPU core "
- "mask(0x%"PRIx64")\n", res->vcpu, res->core_mask);
+ "\n", res->vcpu);
}
cmdline_parse_token_string_t cmd_set_pcpu_mask =
--
Thanks,
Anatoly
David Hunt
2018-11-22 17:02:18 UTC
Permalink
since we're moving to allowing greater than 64 cores, the mask functions
that use uint64_t to perform functions on a masked set of cores are no
longer feasable, so removing them.

Signed-off-by: David Hunt <***@intel.com>
---
examples/vm_power_manager/channel_monitor.c | 24 ----
examples/vm_power_manager/power_manager.c | 68 ---------
examples/vm_power_manager/vm_power_cli.c | 149 --------------------
3 files changed, 241 deletions(-)

diff --git a/examples/vm_power_manager/channel_monitor.c b/examples/vm_power_manager/channel_monitor.c
index 5da531542..4189bbf1b 100644
--- a/examples/vm_power_manager/channel_monitor.c
+++ b/examples/vm_power_manager/channel_monitor.c
@@ -681,30 +681,6 @@ process_request(struct channel_packet *pkt, struct channel_info *chan_info)
default:
break;
}
- } else {
- switch (pkt->unit) {
- case(CPU_POWER_SCALE_MIN):
- power_manager_scale_mask_min(core_mask);
- break;
- case(CPU_POWER_SCALE_MAX):
- power_manager_scale_mask_max(core_mask);
- break;
- case(CPU_POWER_SCALE_DOWN):
- power_manager_scale_mask_down(core_mask);
- break;
- case(CPU_POWER_SCALE_UP):
- power_manager_scale_mask_up(core_mask);
- break;
- case(CPU_POWER_ENABLE_TURBO):
- power_manager_enable_turbo_mask(core_mask);
- break;
- case(CPU_POWER_DISABLE_TURBO):
- power_manager_disable_turbo_mask(core_mask);
- break;
- default:
- break;
- }
-
}
}

diff --git a/examples/vm_power_manager/power_manager.c b/examples/vm_power_manager/power_manager.c
index f9e8c0abd..21dc3a727 100644
--- a/examples/vm_power_manager/power_manager.c
+++ b/examples/vm_power_manager/power_manager.c
@@ -33,20 +33,6 @@
rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl); \
} while (0)

-#define POWER_SCALE_MASK(DIRECTION, core_mask, ret) do { \
- int i; \
- for (i = 0; core_mask; core_mask &= ~(1 << i++)) { \
- if ((core_mask >> i) & 1) { \
- if (!(ci.cd[i].global_enabled_cpus)) \
- continue; \
- rte_spinlock_lock(&global_core_freq_info[i].power_sl); \
- if (rte_power_freq_##DIRECTION(i) != 1) \
- ret = -1; \
- rte_spinlock_unlock(&global_core_freq_info[i].power_sl); \
- } \
- } \
-} while (0)
-
struct freq_info {
rte_spinlock_t power_sl;
uint32_t freqs[RTE_MAX_LCORE_FREQS];
@@ -199,60 +185,6 @@ power_manager_exit(void)
return ret;
}

-int
-power_manager_scale_mask_up(uint64_t core_mask)
-{
- int ret = 0;
-
- POWER_SCALE_MASK(up, core_mask, ret);
- return ret;
-}
-
-int
-power_manager_scale_mask_down(uint64_t core_mask)
-{
- int ret = 0;
-
- POWER_SCALE_MASK(down, core_mask, ret);
- return ret;
-}
-
-int
-power_manager_scale_mask_min(uint64_t core_mask)
-{
- int ret = 0;
-
- POWER_SCALE_MASK(min, core_mask, ret);
- return ret;
-}
-
-int
-power_manager_scale_mask_max(uint64_t core_mask)
-{
- int ret = 0;
-
- POWER_SCALE_MASK(max, core_mask, ret);
- return ret;
-}
-
-int
-power_manager_enable_turbo_mask(uint64_t core_mask)
-{
- int ret = 0;
-
- POWER_SCALE_MASK(enable_turbo, core_mask, ret);
- return ret;
-}
-
-int
-power_manager_disable_turbo_mask(uint64_t core_mask)
-{
- int ret = 0;
-
- POWER_SCALE_MASK(disable_turbo, core_mask, ret);
- return ret;
-}
-
int
power_manager_scale_core_up(unsigned core_num)
{
diff --git a/examples/vm_power_manager/vm_power_cli.c b/examples/vm_power_manager/vm_power_cli.c
index 101e67c9c..34546809a 100644
--- a/examples/vm_power_manager/vm_power_cli.c
+++ b/examples/vm_power_manager/vm_power_cli.c
@@ -124,54 +124,7 @@ cmdline_parse_inst_t cmd_show_vm_set = {
};

/* *** vCPU to pCPU mapping operations *** */
-struct cmd_set_pcpu_mask_result {
- cmdline_fixed_string_t set_pcpu_mask;
- cmdline_fixed_string_t vm_name;
- uint8_t vcpu;
- char core_mask[POWER_MGR_MAX_CPUS];
-};

-static void
-cmd_set_pcpu_mask_parsed(void *parsed_result, struct cmdline *cl,
- __attribute__((unused)) void *data)
-{
- struct cmd_set_pcpu_mask_result *res = parsed_result;
-
- if (set_pcpus_mask(res->vm_name, res->vcpu, res->core_mask) == 0)
- cmdline_printf(cl, "Pinned vCPU(%"PRId8") to pCPU core "
- "\n", res->vcpu);
- else
- cmdline_printf(cl, "Unable to pin vCPU(%"PRId8") to pCPU core "
- "\n", res->vcpu);
-}
-
-cmdline_parse_token_string_t cmd_set_pcpu_mask =
- TOKEN_STRING_INITIALIZER(struct cmd_set_pcpu_mask_result,
- set_pcpu_mask, "set_pcpu_mask");
-cmdline_parse_token_string_t cmd_set_pcpu_mask_vm_name =
- TOKEN_STRING_INITIALIZER(struct cmd_set_pcpu_mask_result,
- vm_name, NULL);
-cmdline_parse_token_num_t set_pcpu_mask_vcpu =
- TOKEN_NUM_INITIALIZER(struct cmd_set_pcpu_mask_result,
- vcpu, UINT8);
-cmdline_parse_token_num_t set_pcpu_mask_core_mask =
- TOKEN_NUM_INITIALIZER(struct cmd_set_pcpu_mask_result,
- core_mask, UINT64);
-
-
-cmdline_parse_inst_t cmd_set_pcpu_mask_set = {
- .f = cmd_set_pcpu_mask_parsed,
- .data = NULL,
- .help_str = "set_pcpu_mask <vm_name> <vcpu> <pcpu>, Set the binding "
- "of Virtual CPU on VM to the Physical CPU mask.",
- .tokens = {
- (void *)&cmd_set_pcpu_mask,
- (void *)&cmd_set_pcpu_mask_vm_name,
- (void *)&set_pcpu_mask_vcpu,
- (void *)&set_pcpu_mask_core_mask,
- NULL,
- },
-};

struct cmd_set_pcpu_result {
cmdline_fixed_string_t set_pcpu;
@@ -428,105 +381,6 @@ cmdline_parse_inst_t cmd_channels_status_op_set = {
};

/* *** CPU Frequency operations *** */
-struct cmd_show_cpu_freq_mask_result {
- cmdline_fixed_string_t show_cpu_freq_mask;
- uint64_t core_mask;
-};
-
-static void
-cmd_show_cpu_freq_mask_parsed(void *parsed_result, struct cmdline *cl,
- __attribute__((unused)) void *data)
-{
- struct cmd_show_cpu_freq_mask_result *res = parsed_result;
- unsigned i;
- uint64_t mask = res->core_mask;
- uint32_t freq;
-
- for (i = 0; mask; mask &= ~(1ULL << i++)) {
- if ((mask >> i) & 1) {
- freq = power_manager_get_current_frequency(i);
- if (freq > 0)
- cmdline_printf(cl, "Core %u: %"PRId32"\n", i, freq);
- }
- }
-}
-
-cmdline_parse_token_string_t cmd_show_cpu_freq_mask =
- TOKEN_STRING_INITIALIZER(struct cmd_show_cpu_freq_mask_result,
- show_cpu_freq_mask, "show_cpu_freq_mask");
-cmdline_parse_token_num_t cmd_show_cpu_freq_mask_core_mask =
- TOKEN_NUM_INITIALIZER(struct cmd_show_cpu_freq_mask_result,
- core_mask, UINT64);
-
-cmdline_parse_inst_t cmd_show_cpu_freq_mask_set = {
- .f = cmd_show_cpu_freq_mask_parsed,
- .data = NULL,
- .help_str = "show_cpu_freq_mask <mask>, Get the current frequency for each "
- "core specified in the mask",
- .tokens = {
- (void *)&cmd_show_cpu_freq_mask,
- (void *)&cmd_show_cpu_freq_mask_core_mask,
- NULL,
- },
-};
-
-struct cmd_set_cpu_freq_mask_result {
- cmdline_fixed_string_t set_cpu_freq_mask;
- uint64_t core_mask;
- cmdline_fixed_string_t cmd;
-};
-
-static void
-cmd_set_cpu_freq_mask_parsed(void *parsed_result, struct cmdline *cl,
- __attribute__((unused)) void *data)
-{
- struct cmd_set_cpu_freq_mask_result *res = parsed_result;
- int ret = -1;
-
- if (!strcmp(res->cmd , "up"))
- ret = power_manager_scale_mask_up(res->core_mask);
- else if (!strcmp(res->cmd , "down"))
- ret = power_manager_scale_mask_down(res->core_mask);
- else if (!strcmp(res->cmd , "min"))
- ret = power_manager_scale_mask_min(res->core_mask);
- else if (!strcmp(res->cmd , "max"))
- ret = power_manager_scale_mask_max(res->core_mask);
- else if (!strcmp(res->cmd, "enable_turbo"))
- ret = power_manager_enable_turbo_mask(res->core_mask);
- else if (!strcmp(res->cmd, "disable_turbo"))
- ret = power_manager_disable_turbo_mask(res->core_mask);
- if (ret < 0) {
- cmdline_printf(cl, "Error scaling core_mask(0x%"PRIx64") '%s' , not "
- "all cores specified have been scaled\n",
- res->core_mask, res->cmd);
- };
-}
-
-cmdline_parse_token_string_t cmd_set_cpu_freq_mask =
- TOKEN_STRING_INITIALIZER(struct cmd_set_cpu_freq_mask_result,
- set_cpu_freq_mask, "set_cpu_freq_mask");
-cmdline_parse_token_num_t cmd_set_cpu_freq_mask_core_mask =
- TOKEN_NUM_INITIALIZER(struct cmd_set_cpu_freq_mask_result,
- core_mask, UINT64);
-cmdline_parse_token_string_t cmd_set_cpu_freq_mask_result =
- TOKEN_STRING_INITIALIZER(struct cmd_set_cpu_freq_mask_result,
- cmd, "up#down#min#max#enable_turbo#disable_turbo");
-
-cmdline_parse_inst_t cmd_set_cpu_freq_mask_set = {
- .f = cmd_set_cpu_freq_mask_parsed,
- .data = NULL,
- .help_str = "set_cpu_freq <core_mask> <up|down|min|max|enable_turbo|disable_turbo>, adjust the current "
- "frequency for the cores specified in <core_mask>",
- .tokens = {
- (void *)&cmd_set_cpu_freq_mask,
- (void *)&cmd_set_cpu_freq_mask_core_mask,
- (void *)&cmd_set_cpu_freq_mask_result,
- NULL,
- },
-};
-
-
-
struct cmd_show_cpu_freq_result {
cmdline_fixed_string_t show_cpu_freq;
uint8_t core_num;
@@ -627,11 +481,8 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_channels_op_set,
(cmdline_parse_inst_t *)&cmd_channels_status_op_set,
(cmdline_parse_inst_t *)&cmd_show_vm_set,
- (cmdline_parse_inst_t *)&cmd_show_cpu_freq_mask_set,
- (cmdline_parse_inst_t *)&cmd_set_cpu_freq_mask_set,
(cmdline_parse_inst_t *)&cmd_show_cpu_freq_set,
(cmdline_parse_inst_t *)&cmd_set_cpu_freq_set,
- (cmdline_parse_inst_t *)&cmd_set_pcpu_mask_set,
(cmdline_parse_inst_t *)&cmd_set_pcpu_set,
NULL,
};
--
2.17.1
Burakov, Anatoly
2018-12-10 12:30:44 UTC
Permalink
Post by David Hunt
since we're moving to allowing greater than 64 cores, the mask functions
that use uint64_t to perform functions on a masked set of cores are no
longer feasable, so removing them.
Perhaps "needed" is a better word, rather than "feasible" :)

Please correct me if i'm wrong, but as of this patch, some of the
functionality is left in half-working state, and this patch should
really be merged with patch 3?
--
Thanks,
Anatoly
David Hunt
2018-11-22 17:02:19 UTC
Permalink
Extending the functionality to allow vms to power manage cores beyond 63.

Signed-off-by: David Hunt <***@intel.com>
---
examples/vm_power_manager/channel_manager.c | 59 ++++++++-------------
examples/vm_power_manager/channel_manager.h | 30 ++---------
examples/vm_power_manager/channel_monitor.c | 56 +++++++------------
examples/vm_power_manager/vm_power_cli.c | 4 +-
4 files changed, 48 insertions(+), 101 deletions(-)

diff --git a/examples/vm_power_manager/channel_manager.c b/examples/vm_power_manager/channel_manager.c
index 5af4996db..3d493c179 100644
--- a/examples/vm_power_manager/channel_manager.c
+++ b/examples/vm_power_manager/channel_manager.c
@@ -49,7 +49,7 @@ static bool global_hypervisor_available;
*/
struct virtual_machine_info {
char name[CHANNEL_MGR_MAX_NAME_LEN];
- rte_atomic64_t pcpu_mask[CHANNEL_CMDS_MAX_CPUS];
+ uint16_t pcpu_map[CHANNEL_CMDS_MAX_CPUS];
struct channel_info *channels[CHANNEL_CMDS_MAX_VM_CHANNELS];
char channel_mask[POWER_MGR_MAX_CPUS];
uint8_t num_channels;
@@ -79,7 +79,7 @@ update_pcpus_mask(struct virtual_machine_info *vm_info)
virVcpuInfoPtr cpuinfo;
unsigned i, j;
int n_vcpus;
- uint64_t mask;
+ uint16_t pcpu;

memset(global_cpumaps, 0, CHANNEL_CMDS_MAX_CPUS*global_maplen);

@@ -120,26 +120,23 @@ update_pcpus_mask(struct virtual_machine_info *vm_info)
vm_info->info.nrVirtCpu = n_vcpus;
}
for (i = 0; i < vm_info->info.nrVirtCpu; i++) {
- mask = 0;
+ pcpu = 0;
for (j = 0; j < global_n_host_cpus; j++) {
if (VIR_CPU_USABLE(global_cpumaps, global_maplen, i, j) > 0) {
- mask |= 1ULL << j;
+ pcpu = j;
}
}
- rte_atomic64_set(&vm_info->pcpu_mask[i], mask);
+ vm_info->pcpu_map[i] = pcpu;
}
return 0;
}

int
-set_pcpus_mask(char *vm_name, unsigned int vcpu, char *core_mask)
+set_pcpu(char *vm_name, unsigned int vcpu, unsigned int pcpu)
{
unsigned i = 0;
int flags = VIR_DOMAIN_AFFECT_LIVE|VIR_DOMAIN_AFFECT_CONFIG;
struct virtual_machine_info *vm_info;
- char mask[POWER_MGR_MAX_CPUS];
-
- memcpy(mask, core_mask, POWER_MGR_MAX_CPUS);

if (vcpu >= CHANNEL_CMDS_MAX_CPUS) {
RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds max allowable(%d)\n",
@@ -166,17 +163,16 @@ set_pcpus_mask(char *vm_name, unsigned int vcpu, char *core_mask)
return -1;
}
memset(global_cpumaps, 0 , CHANNEL_CMDS_MAX_CPUS * global_maplen);
- for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
- if (mask[i] == 1) {
- VIR_USE_CPU(global_cpumaps, i);
- if (i >= global_n_host_cpus) {
- RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available "
- "number of CPUs(%u)\n",
- i, global_n_host_cpus);
- return -1;
- }
- }
+
+ VIR_USE_CPU(global_cpumaps, i);
+
+ if (pcpu >= global_n_host_cpus) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available "
+ "number of CPUs(%u)\n",
+ i, global_n_host_cpus);
+ return -1;
}
+
if (virDomainPinVcpuFlags(vm_info->domainPtr, vcpu, global_cpumaps,
global_maplen, flags) < 0) {
RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU "
@@ -184,31 +180,20 @@ set_pcpus_mask(char *vm_name, unsigned int vcpu, char *core_mask)
vm_info->name);
return -1;
}
- memcpy(&vm_info->pcpu_mask[vcpu], core_mask, POWER_MGR_MAX_CPUS);
- return 0;
-
-}
-
-int
-set_pcpu(char *vm_name, unsigned vcpu, unsigned core_num)
-{
- char mask[POWER_MGR_MAX_CPUS];

- memset(mask, 0, POWER_MGR_MAX_CPUS);
+ vm_info->pcpu_map[vcpu] = pcpu;

- mask[core_num] = 1;
-
- return set_pcpus_mask(vm_name, vcpu, mask);
+ return 0;
}

-uint64_t
-get_pcpus_mask(struct channel_info *chan_info, unsigned vcpu)
+uint16_t
+get_pcpu(struct channel_info *chan_info, unsigned int vcpu)
{
struct virtual_machine_info *vm_info =
(struct virtual_machine_info *)chan_info->priv_info;

if (global_hypervisor_available && (vm_info != NULL))
- return rte_atomic64_read(&vm_info->pcpu_mask[vcpu]);
+ return vm_info->pcpu_map[vcpu];
else
return 0;
}
@@ -776,7 +761,7 @@ get_info_vm(const char *vm_name, struct vm_info *info)

memcpy(info->name, vm_info->name, sizeof(vm_info->name));
for (i = 0; i < info->num_vcpus; i++) {
- info->pcpu_mask[i] = rte_atomic64_read(&vm_info->pcpu_mask[i]);
+ info->pcpu_map[i] = vm_info->pcpu_map[i];
}
return 0;
}
@@ -827,7 +812,7 @@ add_vm(const char *vm_name)
}

for (i = 0; i < CHANNEL_CMDS_MAX_CPUS; i++) {
- rte_atomic64_init(&new_domain->pcpu_mask[i]);
+ new_domain->pcpu_map[i] = 0;
}
if (update_pcpus_mask(new_domain) < 0) {
RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting physical CPU pinning\n");
diff --git a/examples/vm_power_manager/channel_manager.h b/examples/vm_power_manager/channel_manager.h
index c2520ab6f..119b0d5cc 100644
--- a/examples/vm_power_manager/channel_manager.h
+++ b/examples/vm_power_manager/channel_manager.h
@@ -40,7 +40,6 @@ struct sockaddr_un _sockaddr_un;
#define MAX_CLIENTS 64
#define MAX_VCPUS 20

-
struct libvirt_vm_info {
const char *vm_name;
unsigned int pcpus[MAX_VCPUS];
@@ -82,7 +81,7 @@ struct channel_info {
struct vm_info {
char name[CHANNEL_MGR_MAX_NAME_LEN]; /**< VM name */
enum vm_status status; /**< libvirt status */
- uint64_t pcpu_mask[CHANNEL_CMDS_MAX_CPUS]; /**< pCPU mask for each vCPU */
+ uint16_t pcpu_map[CHANNEL_CMDS_MAX_CPUS]; /**< pCPU map to vCPU */
unsigned num_vcpus; /**< number of vCPUS */
struct channel_info channels[CHANNEL_MGR_MAX_CHANNELS]; /**< Array of channel_info */
unsigned num_channels; /**< Number of channels */
@@ -115,8 +114,7 @@ int channel_manager_init(const char *path);
void channel_manager_exit(void);

/**
- * Get the Physical CPU mask for VM lcore channel(vcpu), result is assigned to
- * core_mask.
+ * Get the Physical CPU for VM lcore channel(vcpu).
* It is not thread-safe.
*
* @param chan_info
@@ -130,26 +128,7 @@ void channel_manager_exit(void);
* - 0 on error.
* - >0 on success.
*/
-uint64_t get_pcpus_mask(struct channel_info *chan_info, unsigned vcpu);
-
-/**
- * Set the Physical CPU mask for the specified vCPU.
- * It is not thread-safe.
- *
- * @param name
- * Virtual Machine name to lookup
- *
- * @param vcpu
- * The virtual CPU to set.
- *
- * @param core_mask
- * The core mask of the physical CPU(s) to bind the vCPU
- *
- * @return
- * - 0 on success.
- * - Negative on error.
- */
-int set_pcpus_mask(char *vm_name, unsigned int vcpu, char *core_mask);
+uint16_t get_pcpu(struct channel_info *chan_info, unsigned int vcpu);

/**
* Set the Physical CPU for the specified vCPU.
@@ -168,7 +147,8 @@ int set_pcpus_mask(char *vm_name, unsigned int vcpu, char *core_mask);
* - 0 on success.
* - Negative on error.
*/
-int set_pcpu(char *vm_name, unsigned vcpu, unsigned core_num);
+int set_pcpu(char *vm_name, unsigned int vcpu, unsigned int pcpu);
+
/**
* Add a VM as specified by name to the Channel Manager. The name must
* correspond to a valid libvirt domain name.
diff --git a/examples/vm_power_manager/channel_monitor.c b/examples/vm_power_manager/channel_monitor.c
index 4189bbf1b..85622e7cb 100644
--- a/examples/vm_power_manager/channel_monitor.c
+++ b/examples/vm_power_manager/channel_monitor.c
@@ -356,7 +356,6 @@ get_pcpu_to_control(struct policy *pol)
/* Convert vcpu to pcpu. */
struct vm_info info;
int pcpu, count;
- uint64_t mask_u64b;
struct core_info *ci;

ci = get_core_info();
@@ -377,13 +376,8 @@ get_pcpu_to_control(struct policy *pol)
*/
get_info_vm(pol->pkt.vm_name, &info);
for (count = 0; count < pol->pkt.num_vcpu; count++) {
- mask_u64b =
- info.pcpu_mask[pol->pkt.vcpu_to_control[count]];
- for (pcpu = 0; mask_u64b;
- mask_u64b &= ~(1ULL << pcpu++)) {
- if ((mask_u64b >> pcpu) & 1)
- pcpu_monitor(pol, ci, pcpu, count);
- }
+ pcpu = info.pcpu_map[pol->pkt.vcpu_to_control[count]];
+ pcpu_monitor(pol, ci, pcpu, count);
}
} else {
/*
@@ -636,8 +630,6 @@ apply_policy(struct policy *pol)
static int
process_request(struct channel_packet *pkt, struct channel_info *chan_info)
{
- uint64_t core_mask;
-
if (chan_info == NULL)
return -1;

@@ -646,41 +638,31 @@ process_request(struct channel_packet *pkt, struct channel_info *chan_info)
return -1;

if (pkt->command == CPU_POWER) {
- core_mask = get_pcpus_mask(chan_info, pkt->resource_id);
- if (core_mask == 0) {
- /*
- * Core mask will be 0 in the case where
- * hypervisor is not available so we're working in
- * the host, so use the core as the mask.
- */
- core_mask = 1ULL << pkt->resource_id;
- }
- if (__builtin_popcountll(core_mask) == 1) {
+ unsigned int core_num;

- unsigned core_num = __builtin_ffsll(core_mask) - 1;
+ core_num = get_pcpu(chan_info, pkt->resource_id);

- switch (pkt->unit) {
- case(CPU_POWER_SCALE_MIN):
- power_manager_scale_core_min(core_num);
+ switch (pkt->unit) {
+ case(CPU_POWER_SCALE_MIN):
+ power_manager_scale_core_min(core_num);
break;
- case(CPU_POWER_SCALE_MAX):
- power_manager_scale_core_max(core_num);
+ case(CPU_POWER_SCALE_MAX):
+ power_manager_scale_core_max(core_num);
break;
- case(CPU_POWER_SCALE_DOWN):
- power_manager_scale_core_down(core_num);
+ case(CPU_POWER_SCALE_DOWN):
+ power_manager_scale_core_down(core_num);
break;
- case(CPU_POWER_SCALE_UP):
- power_manager_scale_core_up(core_num);
+ case(CPU_POWER_SCALE_UP):
+ power_manager_scale_core_up(core_num);
break;
- case(CPU_POWER_ENABLE_TURBO):
- power_manager_enable_turbo_core(core_num);
+ case(CPU_POWER_ENABLE_TURBO):
+ power_manager_enable_turbo_core(core_num);
break;
- case(CPU_POWER_DISABLE_TURBO):
- power_manager_disable_turbo_core(core_num);
+ case(CPU_POWER_DISABLE_TURBO):
+ power_manager_disable_turbo_core(core_num);
+ break;
+ default:
break;
- default:
- break;
- }
}
}

diff --git a/examples/vm_power_manager/vm_power_cli.c b/examples/vm_power_manager/vm_power_cli.c
index 34546809a..41e89ff20 100644
--- a/examples/vm_power_manager/vm_power_cli.c
+++ b/examples/vm_power_manager/vm_power_cli.c
@@ -95,8 +95,8 @@ cmd_show_vm_parsed(void *parsed_result, struct cmdline *cl,
}
cmdline_printf(cl, "Virtual CPU(s): %u\n", info.num_vcpus);
for (i = 0; i < info.num_vcpus; i++) {
- cmdline_printf(cl, " [%u]: Physical CPU Mask 0x%"PRIx64"\n", i,
- info.pcpu_mask[i]);
+ cmdline_printf(cl, " [%u]: Physical CPU %d\n", i,
+ info.pcpu_map[i]);
}
}
--
2.17.1
Burakov, Anatoly
2018-12-10 13:06:11 UTC
Permalink
Post by David Hunt
Extending the functionality to allow vms to power manage cores beyond 63.
---
examples/vm_power_manager/channel_manager.c | 59 ++++++++-------------
examples/vm_power_manager/channel_manager.h | 30 ++---------
examples/vm_power_manager/channel_monitor.c | 56 +++++++------------
examples/vm_power_manager/vm_power_cli.c | 4 +-
4 files changed, 48 insertions(+), 101 deletions(-)
diff --git a/examples/vm_power_manager/channel_manager.c b/examples/vm_power_manager/channel_manager.c
index 5af4996db..3d493c179 100644
--- a/examples/vm_power_manager/channel_manager.c
+++ b/examples/vm_power_manager/channel_manager.c
@@ -49,7 +49,7 @@ static bool global_hypervisor_available;
*/
struct virtual_machine_info {
char name[CHANNEL_MGR_MAX_NAME_LEN];
- rte_atomic64_t pcpu_mask[CHANNEL_CMDS_MAX_CPUS];
+ uint16_t pcpu_map[CHANNEL_CMDS_MAX_CPUS];
struct channel_info *channels[CHANNEL_CMDS_MAX_VM_CHANNELS];
char channel_mask[POWER_MGR_MAX_CPUS];
uint8_t num_channels;
@@ -79,7 +79,7 @@ update_pcpus_mask(struct virtual_machine_info *vm_info)
virVcpuInfoPtr cpuinfo;
unsigned i, j;
int n_vcpus;
- uint64_t mask;
+ uint16_t pcpu;
memset(global_cpumaps, 0, CHANNEL_CMDS_MAX_CPUS*global_maplen);
@@ -120,26 +120,23 @@ update_pcpus_mask(struct virtual_machine_info *vm_info)
vm_info->info.nrVirtCpu = n_vcpus;
}
for (i = 0; i < vm_info->info.nrVirtCpu; i++) {
- mask = 0;
+ pcpu = 0;
for (j = 0; j < global_n_host_cpus; j++) {
if (VIR_CPU_USABLE(global_cpumaps, global_maplen, i, j) > 0) {
- mask |= 1ULL << j;
+ pcpu = j;
Not sure what this does.

Initial code goes through all CPU's, marks all that are usable into mask
(i.e. code implies there can be several), and then sets up this mask.

Now, you're going through all CPU's, store *one* arbitrary CPU index,
and set the map up with that single index.

That doesn't look like an equivalent replacement.
Post by David Hunt
}
}
- rte_atomic64_set(&vm_info->pcpu_mask[i], mask);
+ vm_info->pcpu_map[i] = pcpu;
}
return 0;
}
int
-set_pcpus_mask(char *vm_name, unsigned int vcpu, char *core_mask)
+set_pcpu(char *vm_name, unsigned int vcpu, unsigned int pcpu)
See patch 1 comments - it would've made this change easier to parse if
set_pcpu was removed there, instead of here.
Post by David Hunt
{
unsigned i = 0;
int flags = VIR_DOMAIN_AFFECT_LIVE|VIR_DOMAIN_AFFECT_CONFIG;
struct virtual_machine_info *vm_info;
- char mask[POWER_MGR_MAX_CPUS];
-
- memcpy(mask, core_mask, POWER_MGR_MAX_CPUS);
if (vcpu >= CHANNEL_CMDS_MAX_CPUS) {
RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds max allowable(%d)\n",
@@ -166,17 +163,16 @@ set_pcpus_mask(char *vm_name, unsigned int vcpu, char *core_mask)
return -1;
}
memset(global_cpumaps, 0 , CHANNEL_CMDS_MAX_CPUS * global_maplen);
- for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
- if (mask[i] == 1) {
- VIR_USE_CPU(global_cpumaps, i);
- if (i >= global_n_host_cpus) {
- RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available "
- "number of CPUs(%u)\n",
- i, global_n_host_cpus);
- return -1;
- }
- }
+
+ VIR_USE_CPU(global_cpumaps, i);
+
+ if (pcpu >= global_n_host_cpus) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available "
+ "number of CPUs(%u)\n",
+ i, global_n_host_cpus);
+ return -1;
}
Some comments on what the above code does would have been nice. Why the
check was removed?
Post by David Hunt
+
--
Thanks,
Anatoly
David Hunt
2018-11-22 17:02:20 UTC
Permalink
Increase the number of addressable cores from 64 to 256. Also remove the
warning that incresing this number beyond 64 will cause problems (because
of the previous use of uint64_t masks). Now this number can be increased
significantly without causing problems.

Signed-off-by: David Hunt <***@intel.com>
---
examples/vm_power_manager/channel_manager.h | 8 ++------
examples/vm_power_manager/power_manager.h | 2 +-
2 files changed, 3 insertions(+), 7 deletions(-)

diff --git a/examples/vm_power_manager/channel_manager.h b/examples/vm_power_manager/channel_manager.h
index 119b0d5cc..d2a398216 100644
--- a/examples/vm_power_manager/channel_manager.h
+++ b/examples/vm_power_manager/channel_manager.h
@@ -14,17 +14,13 @@ extern "C" {
#include <rte_atomic.h>

/* Maximum number of CPUs */
-#define CHANNEL_CMDS_MAX_CPUS 64
-#if CHANNEL_CMDS_MAX_CPUS > 64
-#error Maximum number of cores is 64, overflow is guaranteed to \
- cause problems with VM Power Management
-#endif
+#define CHANNEL_CMDS_MAX_CPUS 256

/* Maximum name length including '\0' terminator */
#define CHANNEL_MGR_MAX_NAME_LEN 64

/* Maximum number of channels to each Virtual Machine */
-#define CHANNEL_MGR_MAX_CHANNELS 64
+#define CHANNEL_MGR_MAX_CHANNELS 256

/* Hypervisor Path for libvirt(qemu/KVM) */
#define CHANNEL_MGR_DEFAULT_HV_PATH "qemu:///system"
diff --git a/examples/vm_power_manager/power_manager.h b/examples/vm_power_manager/power_manager.h
index 605b3c8f6..c3673844c 100644
--- a/examples/vm_power_manager/power_manager.h
+++ b/examples/vm_power_manager/power_manager.h
@@ -33,7 +33,7 @@ core_info_init(void);
#define RTE_LOGTYPE_POWER_MANAGER RTE_LOGTYPE_USER1

/* Maximum number of CPUS to manage */
-#define POWER_MGR_MAX_CPUS 64
+#define POWER_MGR_MAX_CPUS 256
/**
* Initialize power management.
* Initializes resources and verifies the number of CPUs on the system.
--
2.17.1
Burakov, Anatoly
2018-12-10 12:31:16 UTC
Permalink
Post by David Hunt
Increase the number of addressable cores from 64 to 256. Also remove the
warning that incresing this number beyond 64 will cause problems (because
of the previous use of uint64_t masks). Now this number can be increased
significantly without causing problems.
---
Reviewed-by: Anatoly Burakov <***@intel.com>
--
Thanks,
Anatoly
Loading...