diff --git a/NEWS b/NEWS index e31a417a52bfd9ff1bfebe64d500d933a842f6a4..43c3da5aab67162b4095904d3ceb0cb9342f3d72 100644 --- a/NEWS +++ b/NEWS @@ -6,6 +6,11 @@ documents those changes that are of interest to users and admins. -- Remove configuration parameter ShedulerAuth (defunct). -- Add NextJobId to "scontrol show config" output. +* Changes in SLURM 1.2.0-pre2 +============================= + -- added function slurm_init_slurm_msg to be used to init any slurm_msg_t + you no longer need do any other type of initialization to the type. + * Changes in SLURM 1.2.0-pre2 ============================= -- Fixed task dist to work with hostfile and warn about asking for more tasks diff --git a/slurm/slurm.h.in b/slurm/slurm.h.in index 441ed07743837081df2203466949aa950ee62837..40e23ac2dde1cf6be02e79795d0a966153bbe87e 100644 --- a/slurm/slurm.h.in +++ b/slurm/slurm.h.in @@ -776,7 +776,6 @@ typedef struct partition_info update_part_msg_t; * slurm_spawn_kill functions */ typedef struct slurm_step_ctx_struct *slurm_step_ctx; - /*****************************************************************************\ * RESOURCE ALLOCATION FUNCTIONS \*****************************************************************************/ diff --git a/src/api/allocate.c b/src/api/allocate.c index 7a103a9dec8f680a7aa934f13cff1301b362ac8a..b181d82bb6aa655e2b2e2ca49694e42a234a0c37 100644 --- a/src/api/allocate.c +++ b/src/api/allocate.c @@ -98,6 +98,10 @@ slurm_allocate_resources (job_desc_msg_t *req, slurm_msg_t resp_msg; bool host_set = false; char host[64]; + + slurm_init_slurm_msg(&req_msg, NULL); + slurm_init_slurm_msg(&resp_msg, NULL); + /* * set Node and session id for this request */ @@ -112,13 +116,7 @@ slurm_allocate_resources (job_desc_msg_t *req, req_msg.msg_type = REQUEST_RESOURCE_ALLOCATION; req_msg.data = req; - forward_init(&req_msg.forward, NULL); - forward_init(&resp_msg.forward, NULL); - req_msg.ret_list = NULL; - resp_msg.ret_list = NULL; - req_msg.forward_struct_init = 0; - resp_msg.forward_struct_init = 0; - + rc = slurm_send_recv_controller_msg(&req_msg, &resp_msg); /* @@ -184,6 +182,9 @@ slurm_allocate_resources_blocking (const job_desc_msg_t *user_req, if (timeout == 0) timeout = (time_t)-1; + slurm_init_slurm_msg(&req_msg, NULL); + slurm_init_slurm_msg(&resp_msg, NULL); + /* make a copy of the user's job description struct so that we * can make changes before contacting the controller */ req = (job_desc_msg_t *)xmalloc(sizeof(job_desc_msg_t)); @@ -219,10 +220,6 @@ slurm_allocate_resources_blocking (const job_desc_msg_t *user_req, req_msg.msg_type = REQUEST_RESOURCE_ALLOCATION; req_msg.data = req; - forward_init(&req_msg.forward, NULL); - forward_init(&resp_msg.forward, NULL); - req_msg.ret_list = NULL; - resp_msg.ret_list = NULL; rc = slurm_send_recv_controller_msg(&req_msg, &resp_msg); diff --git a/src/api/init_msg.c b/src/api/init_msg.c index fc1e247b18df4fae485a281708f5c3fd3543f9d8..92f9149efb92a358736e54c73b77103f53098dba 100644 --- a/src/api/init_msg.c +++ b/src/api/init_msg.c @@ -46,6 +46,7 @@ #include <slurm/slurm.h> #include "src/common/slurm_protocol_api.h" +#include "src/common/forward.h" /* * slurm_init_job_desc_msg - initialize job descriptor with @@ -132,4 +133,3 @@ void slurm_init_part_desc_msg (update_part_msg_t * update_part_msg) update_part_msg->state_up = (uint16_t) NO_VAL; } - diff --git a/src/api/job_info.c b/src/api/job_info.c index 344fa53c7f942c95e0ef5da6852140ad2f5a4b9d..cc1fb74d329e4bc7d5f46acf8132624d7e7384e3 100644 --- a/src/api/job_info.c +++ b/src/api/job_info.c @@ -409,7 +409,9 @@ slurm_pid2jobid (pid_t job_pid, uint32_t *jobid) List ret_list; memset(&req_msg, 0, sizeof(slurm_msg_t)); + slurm_init_slurm_msg(&req_msg, NULL); memset(&resp_msg, 0, sizeof(slurm_msg_t)); + slurm_init_slurm_msg(&resp_msg, NULL); /* * Set request message address to slurmd on localhost @@ -420,10 +422,6 @@ slurm_pid2jobid (pid_t job_pid, uint32_t *jobid) req.job_pid = job_pid; req_msg.msg_type = REQUEST_JOB_ID; req_msg.data = &req; - forward_init(&req_msg.forward, NULL); - req_msg.ret_list = NULL; - req_msg.orig_addr.sin_addr.s_addr = 0; - req_msg.forward_struct_init = 0; ret_list = slurm_send_recv_node_msg(&req_msg, &resp_msg, 0); diff --git a/src/api/reconfigure.c b/src/api/reconfigure.c index dddfec06ce55e08bdff2ad3b6c6de80fe1298fc6..5af6c78072e48cbf83ad716529afd3ca24f28296 100644 --- a/src/api/reconfigure.c +++ b/src/api/reconfigure.c @@ -136,9 +136,8 @@ _send_message_controller (enum controller_id dest, slurm_msg_t *req) List ret_list = NULL; /*always only going to 1 node */ - forward_init(&req->forward, NULL); - req->ret_list = NULL; - req->forward_struct_init = 0; + slurm_init_slurm_msg(req, NULL); + slurm_init_slurm_msg(&resp_msg, NULL); if ((fd = slurm_open_controller_conn_spec(dest)) < 0) slurm_seterrno_ret(SLURMCTLD_COMMUNICATIONS_CONNECTION_ERROR); diff --git a/src/api/slurm_pmi.c b/src/api/slurm_pmi.c index 8ec309f6cb73fc66797688f7b2e12ac85c13c381..b27ecf94b2da334f6cd8cec7cd7d9d7a34ebf90c 100644 --- a/src/api/slurm_pmi.c +++ b/src/api/slurm_pmi.c @@ -84,12 +84,10 @@ int slurm_send_kvs_comm_set(struct kvs_comm_set *kvs_set_ptr, if ((rc = _get_addr()) != SLURM_SUCCESS) return rc; + slurm_init_slurm_msg(&msg_send, NULL); msg_send.address = srun_addr; msg_send.msg_type = PMI_KVS_PUT_REQ; msg_send.data = (void *) kvs_set_ptr; - forward_init(&msg_send.forward, NULL); - msg_send.ret_list = NULL; - msg_send.forward_struct_init = 0; /* Send the RPC to the local srun communcation manager. * Since the srun can be sent thousands of messages at @@ -155,15 +153,11 @@ int slurm_get_kvs_comm_set(struct kvs_comm_set **kvs_set_ptr, data.size = pmi_size; data.port = port; data.hostname = hostname; + slurm_init_slurm_msg(&msg_send, NULL); msg_send.address = srun_addr; msg_send.msg_type = PMI_KVS_GET_REQ; msg_send.data = &data; - /* Send the RPC to the srun communcation manager */ - forward_init(&msg_send.forward, NULL); - msg_send.ret_list = NULL; - msg_send.forward_struct_init = 0; - /* Send the RPC to the local srun communcation manager. * Since the srun can be sent thousands of messages at * the same time and refuse some connections, retry as diff --git a/src/api/step_launch.c b/src/api/step_launch.c index c71d7b9587b00f262e62196e4a4b45e41aef6543..a7d9e193786108602d15f5005ebdca2a5647ab8c 100644 --- a/src/api/step_launch.c +++ b/src/api/step_launch.c @@ -515,10 +515,7 @@ static int _message_socket_accept(eio_obj_t *obj, List objs) fflush(stdout); msg = xmalloc(sizeof(slurm_msg_t)); - forward_init(&msg->forward, NULL); - msg->ret_list = NULL; - msg->conn_fd = fd; - msg->forward_struct_init = 0; + slurm_init_slurm_msg(msg, NULL); /* multiple jobs (easily induced via no_alloc) and highly * parallel jobs using PMI sometimes result in slow message diff --git a/src/common/slurm_protocol_api.c b/src/common/slurm_protocol_api.c index 2f7178ac0b2d7492527c90d794fbedc06b4526a4..0022ce81c317928cb9d314d1510a60a52a9d5747 100644 --- a/src/common/slurm_protocol_api.c +++ b/src/common/slurm_protocol_api.c @@ -746,8 +746,7 @@ List slurm_receive_msg(slurm_fd fd, slurm_msg_t *msg, int timeout) /* ListIterator itr; */ List ret_list = list_create(destroy_ret_types); - msg->forward_struct = NULL; - msg->forward_struct_init = 0; + slurm_init_slurm_msg(msg, NULL); xassert(fd >= 0); @@ -1460,10 +1459,8 @@ int slurm_send_recv_controller_msg(slurm_msg_t *req, slurm_msg_t *resp) bool backup_controller_flag; uint16_t slurmctld_timeout; - forward_init(&req->forward, NULL); - req->ret_list = NULL; - req->orig_addr.sin_addr.s_addr = 0; - req->forward_struct_init = 0; + slurm_init_slurm_msg(req, NULL); + if ((fd = slurm_open_controller_conn()) < 0) { rc = -1; goto cleanup; @@ -1793,10 +1790,7 @@ int slurm_send_recv_rc_msg_only_one(slurm_msg_t *req, int *rc, int timeout) ret_types_t *ret_type = NULL; int ret_c = 0; - forward_init(&req->forward, NULL); - req->ret_list = NULL; - req->orig_addr.sin_addr.s_addr = 0; - /* no need to init forward_struct_init here */ + slurm_init_slurm_msg(req, NULL); if ((fd = slurm_open_msg_conn(&req->address)) < 0) { return -1; @@ -1834,10 +1828,7 @@ int slurm_send_recv_controller_rc_msg(slurm_msg_t *req, int *rc) ret_types_t *ret_type = NULL; int ret_val = 0; - forward_init(&req->forward, NULL); - req->ret_list = NULL; - req->orig_addr.sin_addr.s_addr = 0; - /* no need to init forward_struct_init here */ + slurm_init_slurm_msg(req, NULL); if ((fd = slurm_open_controller_conn()) < 0) return -1; diff --git a/src/common/slurm_protocol_defs.c b/src/common/slurm_protocol_defs.c index b107d2abe0cd3fc2c990a0759a93bf6bbf91bb3f..67bb29d6cc4db94abf4008322614282d3208012e 100644 --- a/src/common/slurm_protocol_defs.c +++ b/src/common/slurm_protocol_defs.c @@ -57,6 +57,7 @@ #include "src/common/switch.h" #include "src/common/xmalloc.h" #include "src/common/job_options.h" +#include "src/common/forward.h" static void _free_all_job_info (job_info_msg_t *msg); static void _slurm_free_job_info_members (job_info_t * job); @@ -70,6 +71,29 @@ static void _slurm_free_partition_info_members (partition_info_t * part); static void _free_all_step_info (job_step_info_response_msg_t *msg); static void _slurm_free_job_step_info_members (job_step_info_t * msg); +/* + * slurm_init_slurm_msg - initialize slurm message + * OUT msg - user defined slurm message + * IN in_msg - NULL if fresh initialization, or already initialized message to + * initialize from. (Usually for reponse messages send in + * the request message) + */ +void slurm_init_slurm_msg (slurm_msg_t * msg, slurm_msg_t * in_msg) +{ + if(in_msg) { + msg->forward = in_msg->forward; + msg->ret_list = msg->ret_list; + msg->forward_struct_init = msg->forward_struct_init; + msg->forward_struct = msg->forward_struct; + } else { + forward_init(&msg->forward, NULL); + msg->ret_list = NULL; + msg->forward_struct_init = 0; + msg->forward_struct = NULL; + } + msg->orig_addr.sin_addr.s_addr = 0; + return; +} void slurm_free_last_update_msg(last_update_msg_t * msg) { @@ -999,7 +1023,7 @@ void inline slurm_free_node_select_msg( } -extern int slurm_free_msg_data(uint32_t type, void *data) +extern int slurm_free_msg_data(slurm_msg_type_t type, void *data) { switch(type) { case REQUEST_BUILD_INFO: @@ -1135,7 +1159,7 @@ extern int slurm_free_msg_data(uint32_t type, void *data) return SLURM_SUCCESS; } -extern uint32_t slurm_get_return_code(uint32_t type, void *data) +extern uint32_t slurm_get_return_code(slurm_msg_type_t type, void *data) { uint32_t rc = 0; diff --git a/src/common/slurm_protocol_defs.h b/src/common/slurm_protocol_defs.h index 2c02b410cb503addab9f51003eacab668c12be4f..6385ff7bfa1cd592c8ce3d356e51e5f26d9871b8 100644 --- a/src/common/slurm_protocol_defs.h +++ b/src/common/slurm_protocol_defs.h @@ -278,13 +278,14 @@ typedef struct ret_data_info { char *node_name; slurm_addr addr; uint32_t nodeid; - void *data; /* what is this? - CJM */ + void *data; /* used to hold the return message data (i.e. + return_code_msg_t */ } ret_data_info_t; typedef struct ret_types { - uint32_t msg_rc; + uint32_t msg_rc; /* message return code */ uint32_t err; - uint32_t type; + slurm_msg_type_t type; /* message type */ List ret_data_list; /* list of ret_data_info_t pointers */ } ret_types_t; @@ -632,6 +633,18 @@ typedef struct slurm_node_registration_status_msg { } slurm_node_registration_status_msg_t; typedef struct slurm_ctl_conf slurm_ctl_conf_info_msg_t; +/*****************************************************************************\ + * SLURM MESSAGE INITIALIZATION +\*****************************************************************************/ + +/* + * slurm_init_slurm_msg - initialize slurm message + * OUT msg - user defined slurm message + * IN in_msg - NULL if fresh initialization, or already initialized message to + * initialize from. (Usually for reponse messages send in + * the request message) + */ +extern void slurm_init_slurm_msg (slurm_msg_t * msg, slurm_msg_t * in_msg); /* free message functions */ void inline slurm_free_last_update_msg(last_update_msg_t * msg); @@ -717,8 +730,8 @@ void inline slurm_free_step_complete_msg(step_complete_msg_t *msg); void inline slurm_free_stat_jobacct_msg(stat_jobacct_msg_t *msg); void inline slurm_free_node_select_msg( node_info_select_request_msg_t *msg); -extern int slurm_free_msg_data(uint32_t type, void *data); -extern uint32_t slurm_get_return_code(uint32_t type, void *data); +extern int slurm_free_msg_data(slurm_msg_type_t type, void *data); +extern uint32_t slurm_get_return_code(slurm_msg_type_t type, void *data); extern char *job_reason_string(enum job_wait_reason inx); extern char *job_state_string(enum job_states inx); diff --git a/src/common/slurm_protocol_pack.c b/src/common/slurm_protocol_pack.c index 03bc95cd3cc753814a08556d140c57d25bf1845b..f6d03587fea9d7372a6e6a407cd3f041862cc59b 100644 --- a/src/common/slurm_protocol_pack.c +++ b/src/common/slurm_protocol_pack.c @@ -681,7 +681,7 @@ pack_msg(slurm_msg_t const *msg, Buf buffer) case PMI_KVS_PUT_RESP: break; /* no data in message */ default: - debug("No pack method for msg type %i", msg->msg_type); + debug("No pack method for msg type %u", msg->msg_type); return EINVAL; break; @@ -991,7 +991,7 @@ unpack_msg(slurm_msg_t * msg, Buf buffer) case PMI_KVS_PUT_RESP: break; /* no data */ default: - debug("No unpack method for msg type %i", msg->msg_type); + debug("No unpack method for msg type %u", msg->msg_type); return EINVAL; break; } @@ -3279,7 +3279,7 @@ _pack_ret_list(List ret_list, while((ret_type = list_next(itr)) != NULL) { pack32((uint32_t)ret_type->msg_rc, buffer); pack32((uint32_t)ret_type->err, buffer); - pack32((uint32_t)ret_type->type, buffer); + pack16((uint16_t)ret_type->type, buffer); msg.msg_type = ret_type->type; @@ -3314,7 +3314,7 @@ _unpack_ret_list(List *ret_list, list_push(*ret_list, ret_type); safe_unpack32((uint32_t *)&ret_type->msg_rc, buffer); safe_unpack32((uint32_t *)&ret_type->err, buffer); - safe_unpack32((uint32_t *)&ret_type->type, buffer); + safe_unpack16((uint16_t *)&ret_type->type, buffer); msg.msg_type = ret_type->type; diff --git a/src/salloc/msg.c b/src/salloc/msg.c index c6fddc5f97fde6c838fac596ce6a91ec7205d849..278e2c8e58daa44a89da1a0662f6ad2dd0477eae 100644 --- a/src/salloc/msg.c +++ b/src/salloc/msg.c @@ -179,11 +179,9 @@ static int _message_socket_accept(eio_obj_t *obj, List objs) fflush(stdout); msg = xmalloc(sizeof(slurm_msg_t)); - forward_init(&msg->forward, NULL); - msg->ret_list = NULL; + slurm_init_slurm_msg(msg, NULL); msg->conn_fd = fd; - msg->forward_struct_init = 0; - + timeout = slurm_get_msg_timeout(); again: ret_list = slurm_receive_msg(fd, msg, timeout); diff --git a/src/salloc/opt.c b/src/salloc/opt.c index ea278aac40b0b3788d06e8a4ed3258c150ba1715..302877bb9ef71fdf22e8f3548478bd8cfed83825 100644 --- a/src/salloc/opt.c +++ b/src/salloc/opt.c @@ -139,7 +139,6 @@ static char *_print_mail_type(const uint16_t type); static int _parse_signal(const char *signal_name); static long _to_bytes(const char *arg); static void _usage(void); -static bool _valid_node_list(char **node_list_pptr); static bool _verify_node_count(const char *arg, int *min, int *max); static int _verify_geometry(const char *arg, uint16_t *geometry); static int _verify_conn_type(const char *arg); diff --git a/src/sattach/sattach.c b/src/sattach/sattach.c index 77059bbdf59c758d8cfd4fec226ce86e540e1e01..8d891c990c4c7ca0b683da5166c9c4bb6d85169c 100644 --- a/src/sattach/sattach.c +++ b/src/sattach/sattach.c @@ -300,8 +300,10 @@ static int _attach_to_tasks(uint32_t jobid, debug("Entering _attach_to_tasks"); /* Lets make sure that the slurm_msg_t are zeroed out at the start */ memset(&msg, 0, sizeof(slurm_msg_t)); + slurm_init_slurm_msg(&msg, NULL); memset(&dummy_resp_msg, 0, sizeof(slurm_msg_t)); - + slurm_init_slurm_msg(&dummy_resp_msg, NULL); + timeout = slurm_get_msg_timeout(); reattach_msg.job_id = jobid; @@ -315,8 +317,7 @@ static int _attach_to_tasks(uint32_t jobid, msg.msg_type = REQUEST_REATTACH_TASKS; msg.data = &reattach_msg; msg.srun_node_id = 0; - msg.orig_addr.sin_addr.s_addr = 0; - forward_init(&msg.forward, NULL); + msg.forward.cnt = layout->node_cnt - 1; msg.forward.node_id = _create_range_array(1, layout->node_cnt-1); info("msg.forward.cnt = %d", msg.forward.cnt); @@ -325,9 +326,6 @@ static int _attach_to_tasks(uint32_t jobid, info("msg.forward.name = %s", msg.forward.name); msg.forward.addr = layout->node_addr + 1; msg.forward.timeout = timeout * 1000; /* sec to msec */ - msg.forward_struct = NULL; - msg.forward_struct_init = 0; - msg.ret_list = NULL; memcpy(&msg.address, layout->node_addr + 0, sizeof(slurm_addr)); ret_list = slurm_send_recv_node_msg(&msg, &dummy_resp_msg, timeout); @@ -481,10 +479,7 @@ static int _message_socket_accept(eio_obj_t *obj, List objs) fflush(stdout); msg = xmalloc(sizeof(slurm_msg_t)); - forward_init(&msg->forward, NULL); - msg->ret_list = NULL; - msg->conn_fd = fd; - msg->forward_struct_init = 0; + slurm_init_slurm_msg(msg, NULL); /* multiple jobs (easily induced via no_alloc) and highly * parallel jobs using PMI sometimes result in slow message diff --git a/src/slurmctld/agent.c b/src/slurmctld/agent.c index 0d534614a4ada773b4e2a3cd2c0c3468d3c50f88..e8b431d4b3a97b9a1f846de3a8a72181d50d72e0 100644 --- a/src/slurmctld/agent.c +++ b/src/slurmctld/agent.c @@ -817,12 +817,9 @@ static void *_thread_per_group_rpc(void *args) msg.address = thread_ptr->slurm_addr; msg.msg_type = msg_type; msg.data = task_ptr->msg_args_ptr; + slurm_init_slurm_msg(&msg, NULL); forward_init(&msg.forward, &thread_ptr->forward); - msg.ret_list = NULL; - msg.orig_addr.sin_addr.s_addr = 0; - msg.srun_node_id = 0; - msg.forward_struct_init = 0; - + //info("%s forwarding to %d",thread_ptr->node_name, msg.forward.cnt); thread_ptr->end_time = thread_ptr->start_time + COMMAND_TIMEOUT; if (task_ptr->get_reply) { diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c index 10e4041a7b9ff243ebb1a7ba28699526815e670d..7c2d3cd93c2aa17a7357aa38da3a1a1c523ded28 100644 --- a/src/slurmctld/job_mgr.c +++ b/src/slurmctld/job_mgr.c @@ -4237,9 +4237,7 @@ extern int job_suspend(suspend_msg_t *sus_ptr, uid_t uid, rc_msg.return_code = rc; resp_msg.msg_type = RESPONSE_SLURM_RC; resp_msg.data = &rc_msg; - forward_init(&resp_msg.forward, NULL); - resp_msg.ret_list = NULL; - resp_msg.forward_struct_init = 0; + slurm_init_slurm_msg(&resp_msg, NULL); slurm_send_node_msg(conn_fd, &resp_msg); } return rc; @@ -4331,9 +4329,7 @@ extern int job_requeue (uid_t uid, uint32_t job_id, slurm_fd conn_fd) rc_msg.return_code = rc; resp_msg.msg_type = RESPONSE_SLURM_RC; resp_msg.data = &rc_msg; - forward_init(&resp_msg.forward, NULL); - resp_msg.ret_list = NULL; - resp_msg.forward_struct_init = 0; + slurm_init_slurm_msg(&resp_msg, NULL); slurm_send_node_msg(conn_fd, &resp_msg); return rc; } diff --git a/src/slurmctld/proc_req.c b/src/slurmctld/proc_req.c index d536aa55aa553594fc30af8ebee91ee593d41312..41ac90ea8f6ec05d9ccc05eed44e80200f378a57 100644 --- a/src/slurmctld/proc_req.c +++ b/src/slurmctld/proc_req.c @@ -525,9 +525,7 @@ static void _slurm_rpc_allocate_resources(slurm_msg_t * msg) response_msg.msg_type = RESPONSE_RESOURCE_ALLOCATION; response_msg.data = &alloc_msg; - forward_init(&response_msg.forward, NULL); - response_msg.ret_list = NULL; - response_msg.forward_struct_init = 0; + slurm_init_slurm_msg(&response_msg, NULL); if (slurm_send_node_msg(msg->conn_fd, &response_msg) < 0) _kill_job_on_msg_fail(job_ptr->job_id); @@ -576,10 +574,8 @@ static void _slurm_rpc_dump_conf(slurm_msg_t * msg) response_msg.address = msg->address; response_msg.msg_type = RESPONSE_BUILD_INFO; response_msg.data = &config_tbl; - forward_init(&response_msg.forward, NULL); - response_msg.ret_list = NULL; - response_msg.forward_struct_init = 0; - + slurm_init_slurm_msg(&response_msg, NULL); + /* send message */ slurm_send_node_msg(msg->conn_fd, &response_msg); free_slurm_conf(&config_tbl); @@ -621,10 +617,8 @@ static void _slurm_rpc_dump_jobs(slurm_msg_t * msg) response_msg.msg_type = RESPONSE_JOB_INFO; response_msg.data = dump; response_msg.data_size = dump_size; - forward_init(&response_msg.forward, NULL); - response_msg.ret_list = NULL; - response_msg.forward_struct_init = 0; - + slurm_init_slurm_msg(&response_msg, NULL); + /* send message */ slurm_send_node_msg(msg->conn_fd, &response_msg); xfree(dump); @@ -657,9 +651,7 @@ static void _slurm_rpc_end_time(slurm_msg_t * msg) response_msg.address = msg->address; response_msg.msg_type = SRUN_TIMEOUT; response_msg.data = &timeout_msg; - forward_init(&response_msg.forward, NULL); - response_msg.ret_list = NULL; - response_msg.forward_struct_init = 0; + slurm_init_slurm_msg(&response_msg, NULL); slurm_send_node_msg(msg->conn_fd, &response_msg); } debug2("_slurm_rpc_end_time jobid=%u %s", @@ -700,9 +692,7 @@ static void _slurm_rpc_dump_nodes(slurm_msg_t * msg) response_msg.msg_type = RESPONSE_NODE_INFO; response_msg.data = dump; response_msg.data_size = dump_size; - forward_init(&response_msg.forward, NULL); - response_msg.ret_list = NULL; - response_msg.forward_struct_init = 0; + slurm_init_slurm_msg(&response_msg, NULL); /* send message */ slurm_send_node_msg(msg->conn_fd, &response_msg); @@ -743,10 +733,8 @@ static void _slurm_rpc_dump_partitions(slurm_msg_t * msg) response_msg.msg_type = RESPONSE_PARTITION_INFO; response_msg.data = dump; response_msg.data_size = dump_size; - forward_init(&response_msg.forward, NULL); - response_msg.ret_list = NULL; - response_msg.forward_struct_init = 0; - + slurm_init_slurm_msg(&response_msg, NULL); + /* send message */ slurm_send_node_msg(msg->conn_fd, &response_msg); xfree(dump); @@ -1072,10 +1060,8 @@ static void _slurm_rpc_job_step_create(slurm_msg_t * msg) resp.address = msg->address; resp.msg_type = RESPONSE_JOB_STEP_CREATE; resp.data = &job_step_resp; - forward_init(&resp.forward, NULL); - resp.ret_list = NULL; - resp.forward_struct_init = 0; - + slurm_init_slurm_msg(&resp, NULL); + slurm_send_node_msg(msg->conn_fd, &resp); slurm_step_layout_destroy(job_step_resp.step_layout); slurm_cred_destroy(slurm_cred); @@ -1138,9 +1124,7 @@ static void _slurm_rpc_job_step_get_info(slurm_msg_t * msg) response_msg.msg_type = RESPONSE_JOB_STEP_INFO; response_msg.data = resp_buffer; response_msg.data_size = resp_buffer_size; - forward_init(&response_msg.forward, NULL); - response_msg.ret_list = NULL; - response_msg.forward_struct_init = 0; + slurm_init_slurm_msg(&response_msg, NULL); slurm_send_node_msg(msg->conn_fd, &response_msg); xfree(resp_buffer); } @@ -1317,10 +1301,8 @@ static void _slurm_rpc_job_alloc_info(slurm_msg_t * msg) response_msg.msg_type = RESPONSE_JOB_ALLOCATION_INFO; response_msg.data = &job_info_resp_msg; - forward_init(&response_msg.forward, NULL); - response_msg.ret_list = NULL; - response_msg.forward_struct_init = 0; - + slurm_init_slurm_msg(&response_msg, NULL); + slurm_send_node_msg(msg->conn_fd, &response_msg); select_g_free_jobinfo(&job_info_resp_msg.select_jobinfo); xfree(job_info_resp_msg.cpu_count_reps); @@ -1390,10 +1372,8 @@ static void _slurm_rpc_job_alloc_info_lite(slurm_msg_t * msg) response_msg.msg_type = RESPONSE_JOB_ALLOCATION_INFO_LITE; response_msg.data = &job_info_resp_msg; - forward_init(&response_msg.forward, NULL); - response_msg.ret_list = NULL; - response_msg.forward_struct_init = 0; - + slurm_init_slurm_msg(&response_msg, NULL); + slurm_send_node_msg(msg->conn_fd, &response_msg); select_g_free_jobinfo(&job_info_resp_msg.select_jobinfo); xfree(job_info_resp_msg.cpu_count_reps); @@ -1694,9 +1674,7 @@ static void _slurm_rpc_step_layout(slurm_msg_t *msg) response_msg.msg_type = RESPONSE_STEP_LAYOUT; response_msg.data = step_layout; - forward_init(&response_msg.forward, NULL); - response_msg.ret_list = NULL; - response_msg.forward_struct_init = 0; + slurm_init_slurm_msg(&response_msg, NULL); slurm_send_node_msg(msg->conn_fd, &response_msg); slurm_step_layout_destroy(step_layout); @@ -1761,10 +1739,8 @@ static void _slurm_rpc_submit_batch_job(slurm_msg_t * msg) submit_msg.error_code = error_code; response_msg.msg_type = RESPONSE_SUBMIT_BATCH_JOB; - forward_init(&response_msg.forward, NULL); - response_msg.ret_list = NULL; - response_msg.forward_struct_init = 0; - + slurm_init_slurm_msg(&response_msg, NULL); + response_msg.data = &submit_msg; slurm_send_node_msg(msg->conn_fd, &response_msg); @@ -1797,9 +1773,7 @@ static void _slurm_rpc_submit_batch_job(slurm_msg_t * msg) submit_msg.error_code = error_code; response_msg.msg_type = RESPONSE_SUBMIT_BATCH_JOB; response_msg.data = &submit_msg; - forward_init(&response_msg.forward, NULL); - response_msg.ret_list = NULL; - response_msg.forward_struct_init = 0; + slurm_init_slurm_msg(&response_msg, NULL); slurm_send_node_msg(msg->conn_fd, &response_msg); schedule(); /* has own locks */ schedule_job_save(); /* has own locks */ @@ -2066,9 +2040,7 @@ static void _slurm_rpc_job_ready(slurm_msg_t * msg) response_msg.address = msg->address; response_msg.msg_type = RESPONSE_JOB_READY; response_msg.data = &rc_msg; - forward_init(&response_msg.forward, NULL); - response_msg.ret_list = NULL; - response_msg.forward_struct_init = 0; + slurm_init_slurm_msg(&response_msg, NULL); slurm_send_node_msg(msg->conn_fd, &response_msg); } } @@ -2098,9 +2070,7 @@ static void _slurm_rpc_node_select_info(slurm_msg_t * msg) response_msg.msg_type = RESPONSE_NODE_SELECT_INFO; response_msg.data = get_buf_data(buffer); response_msg.data_size = get_buf_offset(buffer); - forward_init(&response_msg.forward, NULL); - response_msg.ret_list = NULL; - response_msg.forward_struct_init = 0; + slurm_init_slurm_msg(&response_msg, NULL); /* send message */ slurm_send_node_msg(msg->conn_fd, &response_msg); diff --git a/src/slurmctld/step_mgr.c b/src/slurmctld/step_mgr.c index ebcbe38facc4e0345efa5126209cc79969c85c5b..68a52168197bbd0e051a7fe5cc1e8e90fe9c3082 100644 --- a/src/slurmctld/step_mgr.c +++ b/src/slurmctld/step_mgr.c @@ -1003,9 +1003,7 @@ extern int job_step_checkpoint(checkpoint_msg_t *ckpt_ptr, checkpoint_resp_msg_t resp_data; slurm_msg_t resp_msg; - forward_init(&resp_msg.forward, NULL); - resp_msg.ret_list = NULL; - resp_msg.forward_struct_init = 0; + slurm_init_slurm_msg(&resp_msg, NULL); /* find the job */ job_ptr = find_job_record (ckpt_ptr->job_id); @@ -1098,9 +1096,7 @@ extern int job_step_checkpoint_comp(checkpoint_comp_msg_t *ckpt_ptr, slurm_msg_t resp_msg; return_code_msg_t rc_msg; - forward_init(&resp_msg.forward, NULL); - resp_msg.ret_list = NULL; - resp_msg.forward_struct_init = 0; + slurm_init_slurm_msg(&resp_msg, NULL); /* find the job */ job_ptr = find_job_record (ckpt_ptr->job_id); diff --git a/src/slurmd/slurmd/req.c b/src/slurmd/slurmd/req.c index bb044a2077b286ff76e3504e548980a5afcbd76d..f1162a75ebbc4a24fe1d4f23a331c869d408ae0b 100644 --- a/src/slurmd/slurmd/req.c +++ b/src/slurmd/slurmd/req.c @@ -943,6 +943,7 @@ _abort_job(uint32_t job_id) { complete_batch_script_msg_t resp; slurm_msg_t resp_msg; + slurm_init_slurm_msg(&resp_msg, NULL); resp.job_id = job_id; resp.job_rc = 1; @@ -950,8 +951,6 @@ _abort_job(uint32_t job_id) resp.node_name = NULL; /* unused */ resp_msg.msg_type = REQUEST_COMPLETE_BATCH_SCRIPT; resp_msg.data = &resp; - forward_init(&resp_msg.forward, NULL); - resp_msg.ret_list = NULL; return slurm_send_only_controller_msg(&resp_msg); } @@ -1195,6 +1194,7 @@ _rpc_stat_jobacct(slurm_msg_t *msg) } } resp = xmalloc(sizeof(stat_jobacct_msg_t)); + slurm_init_slurm_msg(&resp_msg, msg); resp->job_id = req->job_id; resp->step_id = req->step_id; resp->return_code = SLURM_SUCCESS; @@ -1216,11 +1216,7 @@ _rpc_stat_jobacct(slurm_msg_t *msg) resp_msg.msg_type = MESSAGE_STAT_JOBACCT; resp_msg.data = resp; - resp_msg.forward = msg->forward; - resp_msg.ret_list = msg->ret_list; - resp_msg.forward_struct_init = msg->forward_struct_init; - resp_msg.forward_struct = msg->forward_struct; - + slurm_send_node_msg(msg->conn_fd, &resp_msg); slurm_free_stat_jobacct_msg(resp); return SLURM_SUCCESS; @@ -1282,6 +1278,7 @@ static void _rpc_pid2jid(slurm_msg_t *msg) continue; if (stepd_pid_in_container(fd, req->job_pid) || req->job_pid == stepd_daemon_pid(fd)) { + slurm_init_slurm_msg(&resp_msg, msg); resp.job_id = stepd->jobid; resp.return_code = SLURM_SUCCESS; found = true; @@ -1299,11 +1296,7 @@ static void _rpc_pid2jid(slurm_msg_t *msg) resp_msg.address = msg->address; resp_msg.msg_type = RESPONSE_JOB_ID; resp_msg.data = &resp; - resp_msg.forward = msg->forward; - resp_msg.ret_list = msg->ret_list; - resp_msg.forward_struct_init = msg->forward_struct_init; - resp_msg.forward_struct = msg->forward_struct; - + slurm_send_node_msg(msg->conn_fd, &resp_msg); } else { debug3("_rpc_pid2jid: pid(%u) not found", req->job_pid); @@ -1428,6 +1421,7 @@ _rpc_reattach_tasks(slurm_msg_t *msg) uint32_t nodeid = (uint32_t)NO_VAL; memset(&resp_msg, 0, sizeof(slurm_msg_t)); + slurm_init_slurm_msg(&resp_msg, msg); fd = stepd_connect(conf->spooldir, conf->node_name, req->job_id, req->job_step_id); if (fd == -1) { @@ -1502,10 +1496,6 @@ done: debug2("update step addrs rc = %d", rc); resp_msg.data = resp; resp_msg.msg_type = RESPONSE_REATTACH_TASKS; - resp_msg.forward = msg->forward; - resp_msg.forward_struct = msg->forward_struct; - resp_msg.forward_struct_init = msg->forward_struct_init; - resp_msg.ret_list = msg->ret_list; resp->node_name = xstrdup(conf->node_name); resp->srun_node_id = nodeid; resp->return_code = rc; @@ -1761,6 +1751,8 @@ _epilog_complete(uint32_t jobid, int rc) slurm_msg_t msg; epilog_complete_msg_t req; + slurm_init_slurm_msg(&msg, NULL); + _wait_state_completed(jobid, 5); req.job_id = jobid; @@ -1773,8 +1765,6 @@ _epilog_complete(uint32_t jobid, int rc) msg.msg_type = MESSAGE_EPILOG_COMPLETE; msg.data = &req; - forward_init(&msg.forward, NULL); - msg.ret_list = NULL; if (slurm_send_only_controller_msg(&msg) < 0) { error("Unable to send epilog complete message: %m"); diff --git a/src/slurmd/slurmd/slurmd.c b/src/slurmd/slurmd/slurmd.c index d7265fae01ccd2802b98f1e0b1f6c0f566bcbc44..4f48e1dc1b34df857d5806573be450c31850df32 100644 --- a/src/slurmd/slurmd/slurmd.c +++ b/src/slurmd/slurmd/slurmd.c @@ -360,14 +360,12 @@ _service_connection(void *arg) conn_t *con = (conn_t *) arg; List ret_list = NULL; slurm_msg_t *msg = xmalloc(sizeof(slurm_msg_t)); - - + debug3("in the service_connection"); + slurm_init_slurm_msg(msg, NULL); msg->conn_fd = con->fd; /* this could change if being forwarded to */ memcpy(&msg->orig_addr, con->cli_addr, sizeof(slurm_addr)); - forward_init(&msg->forward, NULL); - msg->ret_list = NULL; ret_list = slurm_receive_msg(con->fd, msg, 0); if(!ret_list || errno != SLURM_SUCCESS) { @@ -406,12 +404,8 @@ send_registration_msg(uint32_t status, bool startup) slurm_node_registration_status_msg_t *msg = xmalloc (sizeof (slurm_node_registration_status_msg_t)); - forward_init(&req.forward, NULL); - req.ret_list = NULL; - req.forward_struct_init = 0; - forward_init(&req.forward, NULL); - resp.ret_list = NULL; - resp.forward_struct_init = 0; + slurm_init_slurm_msg(&req, NULL); + slurm_init_slurm_msg(&resp, NULL); msg->startup = (uint16_t) startup; _fill_registration_msg(msg); diff --git a/src/slurmd/slurmstepd/mgr.c b/src/slurmd/slurmstepd/mgr.c index ada8907aa43035396de1ce579181e4e1bf3f7d45..50febfc2c2a9e02601f188811303fcbbb7f667a6 100644 --- a/src/slurmd/slurmstepd/mgr.c +++ b/src/slurmd/slurmstepd/mgr.c @@ -444,9 +444,8 @@ _send_exit_msg(slurmd_job_t *job, uint32_t *tid, int n, int status) resp.data = &msg; resp.msg_type = MESSAGE_TASK_EXIT; resp.srun_node_id = job->nodeid; - resp.forward.cnt = 0; - resp.ret_list = NULL; - resp.orig_addr.sin_addr.s_addr = 0; + slurm_init_slurm_msg(&resp, NULL); + /* * XXX Hack for TCP timeouts on exit of large, synchronized @@ -1337,13 +1336,10 @@ _send_launch_failure (launch_tasks_request_msg_t *msg, slurm_addr *cli, int rc) slurm_set_addr(&resp_msg.address, msg->resp_port[msg->srun_node_id % msg->num_resp_port], NULL); + slurm_init_slurm_msg(&resp_msg, NULL); resp_msg.data = &resp; resp_msg.msg_type = RESPONSE_LAUNCH_TASKS; - forward_init(&resp_msg.forward, NULL); - resp_msg.ret_list = NULL; - resp_msg.orig_addr.sin_addr.s_addr = 0; - resp_msg.forward_struct_init = 0; - + resp.node_name = conf->node_name; resp.srun_node_id = msg->srun_node_id; resp.return_code = rc ? rc : -1; @@ -1367,13 +1363,10 @@ _send_launch_resp(slurmd_job_t *job, int rc) debug("Sending launch resp rc=%d", rc); + slurm_init_slurm_msg(&resp_msg, NULL); resp_msg.address = srun->resp_addr; resp_msg.data = &resp; resp_msg.msg_type = RESPONSE_LAUNCH_TASKS; - forward_init(&resp_msg.forward, NULL); - resp_msg.ret_list = NULL; - resp_msg.orig_addr.sin_addr.s_addr = 0; - resp_msg.forward_struct_init = 0; resp.node_name = conf->node_name; resp.srun_node_id = job->nodeid; @@ -1405,13 +1398,11 @@ _complete_batch_script(slurmd_job_t *job, int err, int status) req.job_rc = status; req.slurm_rc = err; + slurm_init_slurm_msg(&req_msg, NULL); req.node_name = conf->node_name; req_msg.msg_type= REQUEST_COMPLETE_BATCH_SCRIPT; req_msg.data = &req; - forward_init(&req_msg.forward, NULL); - req_msg.ret_list = NULL; - req_msg.forward_struct_init = 0; - + info("sending REQUEST_COMPLETE_BATCH_SCRIPT"); /* Note: these log messages don't go to slurmd.log from here */ diff --git a/src/srun/allocate.c b/src/srun/allocate.c index 5508ca0ec3df1b9b12ce2cd7c65a26f8fd89a974..eb500b5a2294a97c7e664977d9e2b52a0207f34d 100644 --- a/src/srun/allocate.c +++ b/src/srun/allocate.c @@ -274,11 +274,9 @@ _accept_msg_connection(slurm_fd slurmctld_fd, debug2("got message connection from %s:%d", host, port); msg = xmalloc(sizeof(slurm_msg_t)); - forward_init(&msg->forward, NULL); - msg->ret_list = NULL; + slurm_init_slurm_msg(msg, NULL); msg->conn_fd = fd; - msg->forward_struct_init = 0; - + again: ret_list = slurm_receive_msg(fd, msg, 0); diff --git a/src/srun/launch.c b/src/srun/launch.c index d77562ae6c9876a781a606fe270e8b7d248bfe3a..f888a600f4a1a8f96d062b49768b81a1c8451657 100644 --- a/src/srun/launch.c +++ b/src/srun/launch.c @@ -217,13 +217,12 @@ launch(void *arg) free(host); m = &msg_array_ptr[job->thr_count]; + slurm_init_slurm_msg(m, NULL); m->srun_node_id = (uint32_t)i; m->msg_type = REQUEST_LAUNCH_TASKS; m->data = &r; - m->ret_list = NULL; - m->orig_addr.sin_addr.s_addr = 0; - m->buffer = buffer; + m->buffer = buffer; memcpy(&m->address, &job->step_layout->node_addr[i], diff --git a/src/srun/msg.c b/src/srun/msg.c index eab2b4b1f262b875355c7cbfba59eb73e5580a9a..087e79f70097159e3184b597406caaff91c64340 100644 --- a/src/srun/msg.c +++ b/src/srun/msg.c @@ -913,11 +913,9 @@ _accept_msg_connection(srun_job_t *job, int fdnum) uc[0], uc[1], uc[2], uc[3], ntohs(port)); msg = xmalloc(sizeof(slurm_msg_t)); - forward_init(&msg->forward, NULL); - msg->ret_list = NULL; + slurm_init_slurm_msg(msg, NULL); msg->conn_fd = fd; - msg->forward_struct_init = 0; - + /* multiple jobs (easily induced via no_alloc) and highly * parallel jobs using PMI sometimes result in slow message * responses and timeouts. Raise the default timeout for srun. */ diff --git a/src/srun/reattach.c b/src/srun/reattach.c index 4dde93a07f3e4828327292f0ac92976929ece0ce..cba3b0c41caeae2af73ccf3610e206a413e2701a 100644 --- a/src/srun/reattach.c +++ b/src/srun/reattach.c @@ -341,10 +341,8 @@ _attach_to_job(srun_job_t *job) r->cred = job->cred; m->data = r; m->msg_type = REQUEST_REATTACH_TASKS; - forward_init(&m->forward, NULL); - m->ret_list = NULL; - msg->forward_struct_init = 0; - + slurm_init_slurm_msg(m, NULL); + memcpy(&m->address, &job->step_layout->node_addr[i], sizeof(slurm_addr)); } diff --git a/src/srun/signals.c b/src/srun/signals.c index 34b4683666ab72786cbe9404c1db6b4252f5c131..89c6884c73fd602d141b585ce38bacc7b576e9ee 100644 --- a/src/srun/signals.c +++ b/src/srun/signals.c @@ -71,13 +71,6 @@ static int srun_sigarray[] = { SIGALRM, SIGUSR1, SIGUSR2, SIGPIPE, 0 }; -typedef struct task_info { - slurm_msg_t *req_ptr; - srun_job_t *job_ptr; - int host_inx; -} task_info_t; - - /* * Static prototypes */ diff --git a/src/sview/job_info.c b/src/sview/job_info.c index 701efd3d44d98e71dd6e262760d7d76c7b9e4057..c1694e7810e841671ce86079d5c377ac202e54a6 100644 --- a/src/sview/job_info.c +++ b/src/sview/job_info.c @@ -36,6 +36,7 @@ DEF_TIMERS; enum { SORTID_POS = POS_LOC, + SORTID_ACTION, SORTID_JOBID, SORTID_ALLOC, SORTID_PARTITION, @@ -83,6 +84,8 @@ enum { static display_data_t display_data_job[] = { {G_TYPE_INT, SORTID_POS, NULL, FALSE, -1, refresh_job, create_model_job, admin_edit_job}, + {G_TYPE_STRING, SORTID_ACTION, "Action", TRUE, 0, refresh_job, + create_model_job, admin_edit_job}, {G_TYPE_INT, SORTID_JOBID, "JobID", TRUE, -1, refresh_job, create_model_job, admin_edit_job}, {G_TYPE_INT, SORTID_ALLOC, NULL, FALSE, -1, refresh_job, @@ -797,6 +800,29 @@ extern GtkListStore *create_model_job(int type) GtkTreeIter iter; switch(type) { + case SORTID_ACTION: + model = gtk_list_store_new(1, G_TYPE_STRING); + gtk_list_store_append(model, &iter); + gtk_list_store_set(model, &iter, + 0, "cancel", + -1); + gtk_list_store_append(model, &iter); + gtk_list_store_set(model, &iter, + 0, "suspend", + -1); + gtk_list_store_append(model, &iter); + gtk_list_store_set(model, &iter, + 0, "resume", + -1); + gtk_list_store_append(model, &iter); + gtk_list_store_set(model, &iter, + 0, "checkpoint", + -1); + gtk_list_store_append(model, &iter); + gtk_list_store_set(model, &iter, + 0, "requeue", + -1); + break; case SORTID_TIMELIMIT: break; case SORTID_PRIORITY: diff --git a/src/sview/sview.c b/src/sview/sview.c index c436c5774e81136085d8b31c33a0ef70e37a7d30..1e533f2685c97f8c5ab04cfe281a8d3ae1dc96f8 100644 --- a/src/sview/sview.c +++ b/src/sview/sview.c @@ -257,6 +257,65 @@ static void _change_refresh(GtkToggleAction *action, gpointer user_data) return; } +static void _create_display_popup(GtkToggleAction *action, gpointer user_data) +{ + GtkWidget *table = gtk_table_new(1, 2, FALSE); + GtkWidget *label = gtk_label_new("Interval in Seconds "); + GtkObject *adjustment = gtk_adjustment_new(global_sleep_time, + 1, 10000, + 5, 60, + 1); + GtkWidget *spin_button = + gtk_spin_button_new(GTK_ADJUSTMENT(adjustment), 1, 0); + GtkWidget *popup = gtk_dialog_new_with_buttons( + "Refresh Interval", + GTK_WINDOW (user_data), + GTK_DIALOG_MODAL | GTK_DIALOG_DESTROY_WITH_PARENT, + GTK_STOCK_OK, + GTK_RESPONSE_OK, + GTK_STOCK_CANCEL, + GTK_RESPONSE_CANCEL, + NULL); + GError *error = NULL; + int response = 0; + char *temp = NULL; + + gtk_container_set_border_width(GTK_CONTAINER(table), 10); + + gtk_box_pack_start(GTK_BOX(GTK_DIALOG(popup)->vbox), + table, FALSE, FALSE, 0); + + gtk_table_attach_defaults(GTK_TABLE(table), label, 0, 1, 0, 1); + gtk_table_attach_defaults(GTK_TABLE(table), spin_button, 1, 2, 0, 1); + + gtk_widget_show_all(popup); + response = gtk_dialog_run (GTK_DIALOG(popup)); + + if (response == GTK_RESPONSE_OK) + { + global_sleep_time = + gtk_spin_button_get_value_as_int(GTK_SPIN_BUTTON(spin_button)); + temp = g_strdup_printf("Refresh Interval set to %d seconds.", + global_sleep_time); + gtk_statusbar_pop(GTK_STATUSBAR(main_statusbar), + STATUS_REFRESH); + response = gtk_statusbar_push(GTK_STATUSBAR(main_statusbar), + STATUS_REFRESH, + temp); + g_free(temp); + if (!g_thread_create(_refresh_thr, GINT_TO_POINTER(response), + FALSE, &error)) + { + g_printerr ("Failed to create refresh thread: %s\n", + error->message); + } + } + + gtk_widget_destroy(popup); + + return; +} + static void _tab_pos(GtkRadioAction *action, GtkRadioAction *extra, GtkNotebook *notebook) @@ -296,55 +355,86 @@ static GtkWidget *_get_menubar_menu(GtkWidget *window, GtkWidget *notebook) /* Our menu*/ const char *ui_description = "<ui>" - " <menubar name='MainMenu'>" - " <menu action='Options'>" - " <menuitem action='Set Refresh Interval'/>" - " <menuitem action='Refresh'/>" + " <menubar name='main'>" + " <menu action='options'>" + " <menuitem action='interval'/>" + " <menuitem action='refresh'/>" " <separator/>" - " <menuitem action='Admin Mode'/>" + " <menuitem action='admin'/>" + " <menuitem action='reconfig'/>" " <separator/>" - " <menu action='Tab Pos'>" - " <menuitem action='Top'/>" - " <menuitem action='Bottom'/>" - " <menuitem action='Left'/>" - " <menuitem action='Right'/>" + " <menu action='tab_pos'>" + " <menuitem action='tab_top'/>" + " <menuitem action='tab_bottom'/>" + " <menuitem action='tab_left'/>" + " <menuitem action='tab_right'/>" " </menu>" " <separator/>" - " <menuitem action='Exit'/>" + " <menuitem action='exit'/>" + " </menu>" + " <menu action='displays'>" + " <menuitem action='config'/>" + " <menuitem action='deamons'/>" + " <menuitem action='nodes'/>" + " <menuitem action='jobs'/>" + " <menuitem action='steps'/>" + " <menuitem action='partitions'/>" " </menu>" - " <menu action='Help'>" - " <menuitem action='About'/>" + " <menu action='help'>" + " <menuitem action='about'/>" " </menu>" " </menubar>" "</ui>"; GtkActionEntry entries[] = { - {"Options", NULL, "_Options"}, - {"Tab Pos", NULL, "_Tab Pos"}, - {"Set Refresh Interval", NULL, "Set _Refresh Interval", + {"options", NULL, "_Options"}, + {"displays", NULL, "_Static Displays"}, + {"tab_pos", NULL, "_Tab Pos"}, + {"interval", NULL, "Set _Refresh Interval", "<control>r", "Change Refresh Interval", G_CALLBACK(_change_refresh)}, - {"Refresh", NULL, "Refresh", + {"refresh", NULL, "Refresh", "F5", "Refreshes page", G_CALLBACK(refresh_main)}, - {"Exit", NULL, "E_xit", + {"reconfig", NULL, "_SLURM Reconfigure", + "<control>s", "Reconfigures System", + G_CALLBACK(slurm_reconfigure)}, + {"config", NULL, "Config _Info", + "<control>i", "Displays info from slurm.conf file", + G_CALLBACK(_create_display_popup)}, + {"deamons", NULL, "_Deamons", + "<control>d", "Displays Deamons running on node", + G_CALLBACK(_create_display_popup)}, + {"nodes", NULL, "_Nodes", + "<control>n", "Displays info about all nodes", + G_CALLBACK(_create_display_popup)}, + {"jobs", NULL, "_Jobs", + "<control>j", "Displays info about all jobs", + G_CALLBACK(_create_display_popup)}, + {"steps", NULL, "St_eps", + "<control>e", "Displays info about all job steps", + G_CALLBACK(_create_display_popup)}, + {"partitions", NULL, "_Partitions", + "<control>p", "Displays info about all partitions", + G_CALLBACK(_create_display_popup)}, + {"exit", NULL, "E_xit", "<control>x", "Exits Program", G_CALLBACK(_delete)}, - {"Help", NULL, "_Help"}, - {"About", NULL, "_About"} + {"help", NULL, "_Help"}, + {"about", NULL, "_About"} }; GtkRadioActionEntry radio_entries[] = { - {"Top", NULL, "_Top", + {"tab_top", NULL, "_Top", "<control>T", "Move tabs to top", 2}, - {"Bottom", NULL, "_Bottom", + {"tab_bottom", NULL, "_Bottom", "<control>B", "Move tabs to the bottom", 3}, - {"Left", NULL, "_Left", + {"tab_left", NULL, "_Left", "<control>L", "Move tabs to the Left", 4}, - {"Right", NULL, "_Right", + {"tab_right", NULL, "_Right", "<control>R", "Move tabs to the Right", 1} }; GtkToggleActionEntry toggle_entries[] = { - {"Admin Mode", NULL, + {"admin", NULL, "_Admin Mode", "<control>a", "Allows user to change or update information", G_CALLBACK(_set_admin_mode), @@ -377,7 +467,7 @@ static GtkWidget *_get_menubar_menu(GtkWidget *window, GtkWidget *notebook) } /* Finally, return the actual menu bar created by the item factory. */ - return gtk_ui_manager_get_widget (ui_manager, "/MainMenu"); + return gtk_ui_manager_get_widget (ui_manager, "/main"); } void *_popup_thr_main(void *arg) {