diff --git a/NEWS b/NEWS index 864a73abc944fd3e23071254d6ffe325fc405802..d1d4d4b77cc7af23162560685c3fd54b7901d5ce 100644 --- a/NEWS +++ b/NEWS @@ -269,6 +269,7 @@ documents those changes that are of interest to users and administrators. -- Cray: Avoid running application/step Node Health Check on the external job step. -- Optimization enhancements for partition based job preemption. + -- Address some build warnings from GCC 7.1. * Changes in Slurm 17.02.7 ========================== diff --git a/src/common/env.c b/src/common/env.c index 0620a079e41b76af9acba19d75a04ad2470579e1..c28947d30f3fda7d64b29f52b19b66320f74d190 100644 --- a/src/common/env.c +++ b/src/common/env.c @@ -324,7 +324,7 @@ char *getenvp(char **env, const char *name) size_t len = strlen(name); char **ep; - if ((env == NULL) || (env[0] == '\0')) + if ((env == NULL) || (env[0] == NULL)) return (NULL); ep = _find_name_in_env (env, name); diff --git a/src/slurmctld/controller.c b/src/slurmctld/controller.c index 42c5a9401be385c68dd1872c7def4af8b10e2726..4eb38998363482ce1bd0a216723f3ca77a5d7cc3 100644 --- a/src/slurmctld/controller.c +++ b/src/slurmctld/controller.c @@ -1279,7 +1279,7 @@ static int _accounting_mark_all_nodes_down(char *reason) node_ptr = node_record_table_ptr; for (i = 0; i < node_record_count; i++, node_ptr++) { - if (node_ptr->name == '\0') + if (!node_ptr->name) continue; if ((rc = clusteracct_storage_g_node_down( acct_db_conn, @@ -2219,7 +2219,7 @@ extern void set_cluster_tres(bool assoc_mgr_locked) node_ptr = node_record_table_ptr; for (i = 0; i < node_record_count; i++, node_ptr++) { uint64_t cpu_count = 0, mem_count = 0; - if (node_ptr->name == '\0') + if (!node_ptr->name) continue; if (slurmctld_conf.fast_schedule) { diff --git a/src/slurmctld/job_scheduler.c b/src/slurmctld/job_scheduler.c index ac63be65e8bcd34a3fe3745faff31b48d99d06f5..c01df66dd3d05866c9ef93c3d42e39c0be54c630 100644 --- a/src/slurmctld/job_scheduler.c +++ b/src/slurmctld/job_scheduler.c @@ -3613,7 +3613,7 @@ extern int job_start_data(job_desc_msg_t *job_desc_msg, return ESLURM_DISABLED; if ((job_desc_msg->req_nodes == NULL) || - (job_desc_msg->req_nodes == '\0')) { + (job_desc_msg->req_nodes[0] == '\0')) { /* assume all nodes available to job for testing */ avail_bitmap = bit_alloc(node_record_count); bit_nset(avail_bitmap, 0, (node_record_count - 1)); diff --git a/src/slurmctld/node_mgr.c b/src/slurmctld/node_mgr.c index d837992a085355c95fd4dc7fa388cc6605134eb0..266e5b8e508d57b50341c5d4a20101ad209a266a 100644 --- a/src/slurmctld/node_mgr.c +++ b/src/slurmctld/node_mgr.c @@ -3783,7 +3783,7 @@ extern int send_nodes_to_accounting(time_t event_time) reason = node_ptr->reason; else reason = "First Registration"; - if (node_ptr->name == '\0' || + if (!node_ptr->name || (!IS_NODE_DRAIN(node_ptr) && !IS_NODE_FAIL(node_ptr) && !IS_NODE_DOWN(node_ptr))) { /* At this point, the node appears to be up, diff --git a/src/slurmctld/partition_mgr.c b/src/slurmctld/partition_mgr.c index 02aacd6bcddae234e870754ac4cd05d4dc826e2a..4c9d8912b696ebcf194e4566bbf8c993b09896e2 100644 --- a/src/slurmctld/partition_mgr.c +++ b/src/slurmctld/partition_mgr.c @@ -1643,7 +1643,7 @@ extern int update_part (update_part_msg_t * part_desc, bool create_flag) if (part_desc->deny_accounts != NULL) { xfree(part_ptr->deny_accounts); - if (part_desc->deny_accounts == '\0') + if (part_desc->deny_accounts[0] == '\0') xfree(part_desc->deny_accounts); part_ptr->deny_accounts = part_desc->deny_accounts; part_desc->deny_accounts = NULL;