From 497462b49789de48b10ad1eb2642d8f8477f7891 Mon Sep 17 00:00:00 2001 From: Danny Auble <da@llnl.gov> Date: Tue, 29 Sep 2009 21:12:26 +0000 Subject: [PATCH] Fixed issue when max nodes wasn't specified and was later set by limit to not request that as the actual maximum. --- NEWS | 4 +- slurm/slurm.h.in | 1 + src/common/slurm_protocol_pack.c | 1 + src/slurmctld/job_mgr.c | 62 +++++++++++++++--------- src/slurmctld/job_scheduler.c | 3 +- src/slurmctld/node_scheduler.c | 3 +- src/slurmctld/slurmctld.h | 2 + src/sview/job_info.c | 83 +++++++++++++++++++------------- 8 files changed, 99 insertions(+), 60 deletions(-) diff --git a/NEWS b/NEWS index 03ecf0b2e33..381d21cd7cf 100644 --- a/NEWS +++ b/NEWS @@ -33,7 +33,9 @@ documents those changes that are of interest to users and admins. is replaced by NumProcs (number of processors requested or actually allocated) and ReqNodes (number of nodes requested) is replaced by NumNodes (number of nodes requested or actually allocated). - + -- Fixed issue when max nodes wasn't specified and was later set by limit + to not request that as the actual maximum. + * Changes in SLURM 2.1.0-pre4 ============================= -- Move processing of node configuration information in slurm.conf and diff --git a/slurm/slurm.h.in b/slurm/slurm.h.in index 15b5c28b6fc..4b67461ca8d 100644 --- a/slurm/slurm.h.in +++ b/slurm/slurm.h.in @@ -673,6 +673,7 @@ typedef struct job_info { uint16_t contiguous; /* 1 if job requires contiguous nodes */ uint16_t cpus_per_task; /* number of processors required for each task */ char *dependency; /* syncrhonize job execution with other jobs */ + time_t eligible_time; /* time job is eligible for running */ time_t end_time; /* time of termination, actual or expected */ char *exc_nodes; /* comma separated list of excluded nodes */ int *exc_node_inx; /* excluded list index pairs into node_table: diff --git a/src/common/slurm_protocol_pack.c b/src/common/slurm_protocol_pack.c index ffd6daa0435..4b9d2dc6fee 100644 --- a/src/common/slurm_protocol_pack.c +++ b/src/common/slurm_protocol_pack.c @@ -2793,6 +2793,7 @@ _unpack_job_info_members(job_info_t * job, Buf buffer) safe_unpack32(&job->time_limit, buffer); safe_unpack_time(&job->submit_time, buffer); + safe_unpack_time(&job->eligible_time, buffer); safe_unpack_time(&job->start_time, buffer); safe_unpack_time(&job->end_time, buffer); safe_unpack_time(&job->suspend_time, buffer); diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c index c52804a5c19..8bb4d6d0d52 100644 --- a/src/slurmctld/job_mgr.c +++ b/src/slurmctld/job_mgr.c @@ -173,7 +173,8 @@ static bool _top_priority(struct job_record *job_ptr); static bool _validate_acct_policy(job_desc_msg_t *job_desc, struct part_record *part_ptr, acct_association_rec_t *assoc_in, - acct_qos_rec_t *qos_ptr); + acct_qos_rec_t *qos_ptr, + bool *limit_set_max_nodes); static int _validate_job_create_req(job_desc_msg_t * job_desc); static int _validate_job_desc(job_desc_msg_t * job_desc_msg, int allocate, uid_t submit_uid); @@ -2383,6 +2384,7 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run, List license_list = NULL; bool valid; acct_qos_rec_t qos_rec, *qos_ptr; + bool limit_set_max_nodes = 0; #ifdef HAVE_BG uint16_t geo[SYSTEM_DIMENSIONS]; @@ -2535,7 +2537,9 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run, return qos_error; if ((accounting_enforce & ACCOUNTING_ENFORCE_LIMITS) && - (!_validate_acct_policy(job_desc, part_ptr, assoc_ptr, qos_ptr))) { + (!_validate_acct_policy(job_desc, part_ptr, + assoc_ptr, qos_ptr, + &limit_set_max_nodes))) { info("_job_create: exceeded association's node or time limit " "for user %u", job_desc->user_id); error_code = ESLURM_ACCOUNTING_POLICY; @@ -2614,7 +2618,7 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run, #ifdef HAVE_BG select_g_select_jobinfo_get(job_desc->select_jobinfo, - SELECT_JOBDATA_GEOMETRY, &geo); + SELECT_JOBDATA_GEOMETRY, &geo); if (geo[0] == (uint16_t) NO_VAL) { for (i=0; i<SYSTEM_DIMENSIONS; i++) { geo[i] = 0; @@ -2711,7 +2715,9 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run, } job_ptr = *job_pptr; - + + job_ptr->limit_set_max_nodes = limit_set_max_nodes; + job_ptr->assoc_id = assoc_rec.id; job_ptr->assoc_ptr = (void *) assoc_ptr; job_ptr->qos_ptr = (void *) qos_ptr; @@ -4239,17 +4245,16 @@ void pack_job(struct job_record *dump_job_ptr, uint16_t show_flags, Buf buffer) else pack32(dump_job_ptr->time_limit, buffer); - if (dump_job_ptr->details) + if (dump_job_ptr->details) { pack_time(dump_job_ptr->details->submit_time, buffer); - else - pack_time((time_t) 0, buffer); - if (IS_JOB_PENDING(dump_job_ptr) && dump_job_ptr->details && - dump_job_ptr->details->begin_time && - (dump_job_ptr->details->begin_time > time(NULL))) { /* Earliest possible begin time */ pack_time(dump_job_ptr->details->begin_time, buffer); - } else /* Actual or expected start time */ - pack_time(dump_job_ptr->start_time, buffer); + } else { + pack_time((time_t) 0, buffer); + pack_time((time_t) 0, buffer); + } + /* Actual or expected start time */ + pack_time(dump_job_ptr->start_time, buffer); pack_time(dump_job_ptr->end_time, buffer); pack_time(dump_job_ptr->suspend_time, buffer); pack_time(dump_job_ptr->pre_sus_time, buffer); @@ -6827,16 +6832,19 @@ extern void update_job_nodes_completing(void) static bool _validate_acct_policy(job_desc_msg_t *job_desc, struct part_record *part_ptr, acct_association_rec_t *assoc_in, - acct_qos_rec_t *qos_ptr) + acct_qos_rec_t *qos_ptr, + bool *limit_set_max_nodes) { uint32_t time_limit; acct_association_rec_t *assoc_ptr = assoc_in; int parent = 0; int timelimit_set = 0; - int max_nodes_set = 0; char *user_name = assoc_ptr->user; bool rc = true; + xassert(limit_set_max_nodes); + (*limit_set_max_nodes) = 0; + slurm_mutex_lock(&assoc_mgr_qos_lock); if(qos_ptr) { /* for validation we don't need to look at @@ -6864,11 +6872,11 @@ static bool _validate_acct_policy(job_desc_msg_t *job_desc, rc = false; goto end_qos; } else if (job_desc->max_nodes == 0 - || (max_nodes_set + || (*limit_set_max_nodes && (job_desc->max_nodes > qos_ptr->grp_nodes))) { job_desc->max_nodes = qos_ptr->grp_nodes; - max_nodes_set = 1; + (*limit_set_max_nodes) = 1; } else if (job_desc->max_nodes > qos_ptr->grp_nodes) { info("job submit for user %s(%u): " @@ -6878,7 +6886,9 @@ static bool _validate_acct_policy(job_desc_msg_t *job_desc, job_desc->user_id, job_desc->max_nodes, qos_ptr->grp_nodes); - job_desc->max_nodes = qos_ptr->grp_nodes; + if(job_desc->max_nodes == NO_VAL) + (*limit_set_max_nodes) = 1; + job_desc->max_nodes = qos_ptr->grp_nodes; } } @@ -6926,11 +6936,11 @@ static bool _validate_acct_policy(job_desc_msg_t *job_desc, rc = false; goto end_qos; } else if (job_desc->max_nodes == 0 - || (max_nodes_set + || (*limit_set_max_nodes && (job_desc->max_nodes > qos_ptr->max_nodes_pj))) { job_desc->max_nodes = qos_ptr->max_nodes_pj; - max_nodes_set = 1; + (*limit_set_max_nodes) = 1; } else if (job_desc->max_nodes > qos_ptr->max_nodes_pj) { info("job submit for user %s(%u): " @@ -6940,6 +6950,8 @@ static bool _validate_acct_policy(job_desc_msg_t *job_desc, job_desc->user_id, job_desc->max_nodes, qos_ptr->max_nodes_pj); + if(job_desc->max_nodes == NO_VAL) + (*limit_set_max_nodes) = 1; job_desc->max_nodes = qos_ptr->max_nodes_pj; } } @@ -7023,11 +7035,11 @@ static bool _validate_acct_policy(job_desc_msg_t *job_desc, rc = false; break; } else if (job_desc->max_nodes == 0 - || (max_nodes_set + || (*limit_set_max_nodes && (job_desc->max_nodes > assoc_ptr->grp_nodes))) { job_desc->max_nodes = assoc_ptr->grp_nodes; - max_nodes_set = 1; + (*limit_set_max_nodes) = 1; } else if (job_desc->max_nodes > assoc_ptr->grp_nodes) { info("job submit for user %s(%u): " @@ -7037,6 +7049,8 @@ static bool _validate_acct_policy(job_desc_msg_t *job_desc, job_desc->user_id, job_desc->max_nodes, assoc_ptr->grp_nodes); + if(job_desc->max_nodes == NO_VAL) + (*limit_set_max_nodes) = 1; job_desc->max_nodes = assoc_ptr->grp_nodes; } } @@ -7097,11 +7111,11 @@ static bool _validate_acct_policy(job_desc_msg_t *job_desc, rc = false; break; } else if (job_desc->max_nodes == 0 - || (max_nodes_set + || (*limit_set_max_nodes && (job_desc->max_nodes > assoc_ptr->max_nodes_pj))) { job_desc->max_nodes = assoc_ptr->max_nodes_pj; - max_nodes_set = 1; + (*limit_set_max_nodes) = 1; } else if (job_desc->max_nodes > assoc_ptr->max_nodes_pj) { info("job submit for user %s(%u): " @@ -7111,6 +7125,8 @@ static bool _validate_acct_policy(job_desc_msg_t *job_desc, job_desc->user_id, job_desc->max_nodes, assoc_ptr->max_nodes_pj); + if(job_desc->max_nodes == NO_VAL) + (*limit_set_max_nodes) = 1; job_desc->max_nodes = assoc_ptr->max_nodes_pj; } } diff --git a/src/slurmctld/job_scheduler.c b/src/slurmctld/job_scheduler.c index c54b91c4b1f..6dc173bd9e6 100644 --- a/src/slurmctld/job_scheduler.c +++ b/src/slurmctld/job_scheduler.c @@ -1003,7 +1003,8 @@ extern int job_start_data(job_desc_msg_t *job_desc_msg, max_nodes = MIN(job_ptr->details->max_nodes, part_ptr->max_nodes); max_nodes = MIN(max_nodes, 500000); /* prevent overflows */ - if (job_ptr->details->max_nodes) + if (!job_ptr->limit_set_max_nodes + && job_ptr->details->max_nodes) req_nodes = max_nodes; else req_nodes = min_nodes; diff --git a/src/slurmctld/node_scheduler.c b/src/slurmctld/node_scheduler.c index 2a8a2ca2415..b356e1b30f9 100644 --- a/src/slurmctld/node_scheduler.c +++ b/src/slurmctld/node_scheduler.c @@ -1015,7 +1015,8 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only, max_nodes = MIN(job_ptr->details->max_nodes, part_ptr->max_nodes); max_nodes = MIN(max_nodes, 500000); /* prevent overflows */ - if (job_ptr->details->max_nodes) + if (!job_ptr->limit_set_max_nodes + && job_ptr->details->max_nodes) req_nodes = max_nodes; else req_nodes = min_nodes; diff --git a/src/slurmctld/slurmctld.h b/src/slurmctld/slurmctld.h index 59db3c1ad4c..978330d1653 100644 --- a/src/slurmctld/slurmctld.h +++ b/src/slurmctld/slurmctld.h @@ -407,6 +407,8 @@ struct job_record { * in progress */ char *licenses; /* licenses required by the job */ List license_list; /* structure with license info */ + bool limit_set_max_nodes; /* if max_nodes was set from + * a limit false if user set */ uint16_t mail_type; /* see MAIL_JOB_* in slurm.h */ char *mail_user; /* user to get e-mail notification */ uint32_t magic; /* magic cookie for data integrity */ diff --git a/src/sview/job_info.c b/src/sview/job_info.c index d88c770b308..08bc6563550 100644 --- a/src/sview/job_info.c +++ b/src/sview/job_info.c @@ -70,6 +70,8 @@ enum { #ifdef HAVE_BG SORTID_BLRTSIMAGE, SORTID_NODELIST, + SORTID_NODELIST_EXC, + SORTID_NODELIST_REQ, SORTID_BLOCK, #endif SORTID_COLOR, @@ -80,8 +82,6 @@ enum { SORTID_CONTIGUOUS, SORTID_DEPENDENCY, SORTID_CPUS_PER_TASK, - SORTID_END_TIME, - SORTID_EXC_NODELIST, SORTID_FEATURES, SORTID_EXIT_CODE, #ifdef HAVE_BG @@ -113,6 +113,8 @@ enum { SORTID_NICE, #ifndef HAVE_BG SORTID_NODELIST, + SORTID_NODELIST_EXC, + SORTID_NODELIST_REQ, #endif SORTID_NODES, SORTID_NTASKS_PER_CORE, @@ -125,20 +127,21 @@ enum { SORTID_RAMDISKIMAGE, #endif SORTID_REASON, - SORTID_REQ_NODELIST, SORTID_REQ_PROCS, SORTID_RESV_NAME, #ifdef HAVE_BG SORTID_ROTATE, #endif SORTID_SHARED, - SORTID_START_TIME, SORTID_STATE, SORTID_STATE_NUM, - SORTID_SUBMIT_TIME, - SORTID_SUSPEND_TIME, SORTID_TASKS, SORTID_TIME, + SORTID_TIME_ELIGIBLE, + SORTID_TIME_END, + SORTID_TIME_START, + SORTID_TIME_SUBMIT, + SORTID_TIME_SUSPEND, SORTID_TIMELIMIT, SORTID_TMP_DISK, SORTID_UPDATED, @@ -200,36 +203,39 @@ static display_data_t display_data_job[] = { create_model_job, admin_edit_job}, {G_TYPE_INT, SORTID_STATE_NUM, NULL, FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job}, - {G_TYPE_STRING, SORTID_TIME, "Running Time", TRUE, + {G_TYPE_STRING, SORTID_TIME, "Time:Running", TRUE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job}, - {G_TYPE_STRING, SORTID_SUBMIT_TIME, "Submit Time", FALSE, + {G_TYPE_STRING, SORTID_TIME_SUBMIT, "Time:Submit", FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job}, - {G_TYPE_STRING, SORTID_START_TIME, "Start Time", FALSE, + {G_TYPE_STRING, SORTID_TIME_ELIGIBLE, "Time:Eligible", FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job}, - {G_TYPE_STRING, SORTID_END_TIME, "End Time", FALSE, + {G_TYPE_STRING, SORTID_TIME_START, "Time:Start", FALSE, + EDIT_TEXTBOX, refresh_job, + create_model_job, admin_edit_job}, + {G_TYPE_STRING, SORTID_TIME_END, "Time:End", FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job}, - {G_TYPE_STRING, SORTID_SUSPEND_TIME, "Suspended Time", FALSE, + {G_TYPE_STRING, SORTID_TIME_SUSPEND, "Time:Suspended", FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job}, - {G_TYPE_STRING, SORTID_TIMELIMIT, "Time Limit", FALSE, + {G_TYPE_STRING, SORTID_TIMELIMIT, "Time:Limit", FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job}, {G_TYPE_STRING, SORTID_NODES, "Nodes", TRUE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job}, #ifdef HAVE_BG {G_TYPE_STRING, SORTID_NODELIST, "BP List", TRUE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job}, - {G_TYPE_STRING, SORTID_REQ_NODELIST, "Requested BP List", + {G_TYPE_STRING, SORTID_NODELIST_EXC, "BP List Excluded", FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job}, - {G_TYPE_STRING, SORTID_EXC_NODELIST, "Excluded BP List", + {G_TYPE_STRING, SORTID_NODELIST_REQ, "BP List Requested", FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job}, #else {G_TYPE_STRING, SORTID_NODELIST, "Nodelist", TRUE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job}, - {G_TYPE_STRING, SORTID_REQ_NODELIST, "Requested NodeList", + {G_TYPE_STRING, SORTID_NODELIST_EXC, "NodeList Excluded", FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job}, - {G_TYPE_STRING, SORTID_EXC_NODELIST, "Excluded NodeList", + {G_TYPE_STRING, SORTID_NODELIST_REQ, "NodeList Requested", FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job}, #endif {G_TYPE_STRING, SORTID_CONTIGUOUS, "Contiguous", FALSE, EDIT_MODEL, @@ -280,7 +286,7 @@ static display_data_t display_data_job[] = { FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job}, {G_TYPE_STRING, SORTID_NICE, "Nice", FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job}, - {G_TYPE_STRING, SORTID_ACCOUNT, "Account Charged", + {G_TYPE_STRING, SORTID_ACCOUNT, "Account", FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job}, {G_TYPE_STRING, SORTID_REASON, "Reason Waiting", FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job}, @@ -698,11 +704,11 @@ static const char *_set_job_msg(job_desc_msg_t *job_msg, const char *new_text, type = "contiguous"; break; - case SORTID_REQ_NODELIST: + case SORTID_NODELIST_REQ: job_msg->req_nodes = xstrdup(new_text); type = "requested nodelist"; break; - case SORTID_EXC_NODELIST: + case SORTID_NODELIST_EXC: job_msg->exc_nodes = xstrdup(new_text); type = "excluded nodelist"; break; @@ -825,7 +831,7 @@ static const char *_set_job_msg(job_desc_msg_t *job_msg, const char *new_text, (void *) new_text); break; #endif - case SORTID_START_TIME: + case SORTID_TIME_START: job_msg->begin_time = parse_time((char *)new_text, 0); type = "start time"; break; @@ -1061,13 +1067,19 @@ static void _layout_job_record(GtkTreeView *treeview, sizeof(tmp_char)); add_display_treestore_line(update, treestore, &iter, find_col_name(display_data_job, - SORTID_SUBMIT_TIME), + SORTID_TIME_SUBMIT), + tmp_char); + slurm_make_time_str((time_t *)&job_ptr->eligible_time, tmp_char, + sizeof(tmp_char)); + add_display_treestore_line(update, treestore, &iter, + find_col_name(display_data_job, + SORTID_TIME_ELIGIBLE), tmp_char); slurm_make_time_str((time_t *)&job_ptr->start_time, tmp_char, sizeof(tmp_char)); add_display_treestore_line(update, treestore, &iter, find_col_name(display_data_job, - SORTID_START_TIME), + SORTID_TIME_START), tmp_char); if ((job_ptr->time_limit == INFINITE) && (job_ptr->end_time > time(NULL))) @@ -1077,12 +1089,12 @@ static void _layout_job_record(GtkTreeView *treeview, sizeof(tmp_char)); add_display_treestore_line(update, treestore, &iter, find_col_name(display_data_job, - SORTID_END_TIME), + SORTID_TIME_END), tmp_char); secs2time_str(job_ptr->suspend_time, tmp_char, sizeof(tmp_char)); add_display_treestore_line(update, treestore, &iter, find_col_name(display_data_job, - SORTID_SUSPEND_TIME), + SORTID_TIME_SUSPEND), tmp_char); if (job_ptr->time_limit == NO_VAL) @@ -1218,11 +1230,11 @@ static void _layout_job_record(GtkTreeView *treeview, tmp_char); add_display_treestore_line(update, treestore, &iter, find_col_name(display_data_job, - SORTID_REQ_NODELIST), + SORTID_NODELIST_REQ), job_ptr->req_nodes); add_display_treestore_line(update, treestore, &iter, find_col_name(display_data_job, - SORTID_EXC_NODELIST), + SORTID_NODELIST_EXC), job_ptr->exc_nodes); #ifdef HAVE_BG add_display_treestore_line(update, treestore, &iter, @@ -1466,20 +1478,23 @@ static void _update_job_record(sview_job_info_t *sview_job_info_ptr, gtk_tree_store_set(treestore, iter, SORTID_TIME, tmp_char, -1); slurm_make_time_str((time_t *)&job_ptr->submit_time, tmp_char, sizeof(tmp_char)); - gtk_tree_store_set(treestore, iter, SORTID_SUBMIT_TIME, tmp_char, -1); + gtk_tree_store_set(treestore, iter, SORTID_TIME_SUBMIT, tmp_char, -1); + slurm_make_time_str((time_t *)&job_ptr->eligible_time, tmp_char, + sizeof(tmp_char)); + gtk_tree_store_set(treestore, iter, SORTID_TIME_ELIGIBLE, tmp_char, -1); slurm_make_time_str((time_t *)&job_ptr->start_time, tmp_char, sizeof(tmp_char)); - gtk_tree_store_set(treestore, iter, SORTID_START_TIME, tmp_char, -1); + gtk_tree_store_set(treestore, iter, SORTID_TIME_START, tmp_char, -1); if ((job_ptr->time_limit == INFINITE) && (job_ptr->end_time > time(NULL))) sprintf(tmp_char, "NONE"); else slurm_make_time_str((time_t *)&job_ptr->end_time, tmp_char, sizeof(tmp_char)); - gtk_tree_store_set(treestore, iter, SORTID_END_TIME, tmp_char, -1); + gtk_tree_store_set(treestore, iter, SORTID_TIME_END, tmp_char, -1); slurm_make_time_str((time_t *)&job_ptr->suspend_time, tmp_char, sizeof(tmp_char)); - gtk_tree_store_set(treestore, iter, SORTID_SUSPEND_TIME, tmp_char, -1); + gtk_tree_store_set(treestore, iter, SORTID_TIME_SUSPEND, tmp_char, -1); if (job_ptr->time_limit == NO_VAL) sprintf(tmp_char, "Partition Limit"); @@ -1623,9 +1638,9 @@ static void _update_job_record(sview_job_info_t *sview_job_info_ptr, gtk_tree_store_set(treestore, iter, SORTID_NODE_INX, job_ptr->node_inx, -1); - gtk_tree_store_set(treestore, iter, SORTID_REQ_NODELIST, + gtk_tree_store_set(treestore, iter, SORTID_NODELIST_REQ, job_ptr->req_nodes, -1); - gtk_tree_store_set(treestore, iter, SORTID_EXC_NODELIST, + gtk_tree_store_set(treestore, iter, SORTID_NODELIST_EXC, job_ptr->exc_nodes, -1); if(job_ptr->contiguous) @@ -2750,7 +2765,7 @@ display_it: to the treestore we don't really care about the return value */ create_treestore(tree_view, display_data_job, - SORTID_CNT, SORTID_SUBMIT_TIME, SORTID_COLOR); + SORTID_CNT, SORTID_TIME_SUBMIT, SORTID_COLOR); } view = INFO_VIEW; @@ -2861,7 +2876,7 @@ display_it: to the treestore we don't really care about the return value */ create_treestore(tree_view, popup_win->display_data, - SORTID_CNT, SORTID_SUBMIT_TIME, SORTID_COLOR); + SORTID_CNT, SORTID_TIME_SUBMIT, SORTID_COLOR); } setup_popup_grid_list(popup_win); -- GitLab