def compute_metrics(model, N, depth, coord): # if GPU is enabled if USE_GPU: model.cuda() for y in range(601, 701): gt_path = os.path.join(GT_PATH, str(y)) gt_files = os.listdir(os.path.join(gt_path, 'maps')) gt_files_sorted = sorted(gt_files, key=lambda x: int(x.split(".")[0])) predictions = inference(model, y, N, depth, coord) gt_prediction = zip(gt_files_sorted, predictions) from joblib import Parallel, delayed metric_list = Parallel(n_jobs=8)( delayed(inner_worker)(n, pack, y) for n, pack in enumerate(gt_prediction)) aucj_mean = np.mean([x[0] for x in metric_list]) aucs_mean = np.mean([x[1] for x in metric_list]) nss_mean = np.mean([x[2] for x in metric_list]) cc_mean = np.mean([x[3] for x in metric_list]) sim_mean = np.mean([x[4] for x in metric_list]) message = 'For video number {} the metrics are:\nAUC-JUDD is {}\nAUC-SHUFFLED is {}\nNSS is {}\nCC is {}\nSIM is {}\n=============================='.format( y, aucj_mean, aucs_mean, nss_mean, cc_mean, sim_mean) send(message) final_metric_list.append( (aucj_mean, aucs_mean, nss_mean, cc_mean, sim_mean)) Aucj = np.mean([y[0] for y in final_metric_list]) Aucs = np.mean([y[1] for y in final_metric_list]) Nss = np.mean([y[2] for y in final_metric_list]) Cc = np.mean([y[3] for y in final_metric_list]) Sim = np.mean([y[4] for y in final_metric_list]) results['AUC Judd'] = Aucj results['AUC Shuff'] = Aucs results['NSS'] = Nss results['CC'] = Cc results['SIM'] = Sim finalMessage = "Final average of metrics is:\nAUC-JUDD is {}\nAUC-SHUFFLED is {}\nNSS is {}\nCC is {}\nSIM is {}".format( Aucj, Aucs, Nss, Cc, Sim) send(finalMessage) return results
# metrics = get_saliency_metrics(data_iterator, model, N=100) # # # log metric values # for metric in metrics.keys(): # log_value("Metrics/{}".format(metric), # metrics[metric], id_epoch) # get epoch loss print("--> {} epoch {}".format(mode, id_epoch)) epoch_loss = train_eval(mode, model, optimizer, dataloader) lr = list(get_lr_optimizer(optimizer))[0] print("-----------") print("Done! {} epoch {} loss {} lr {}".format(mode, id_epoch, epoch_loss, lr)) send("{} epoch {}/{} loss {}".format(mode, id_epoch, n_epochs, epoch_loss)) # record loss log_value("loss/{}".format(mode), epoch_loss, id_epoch) log_value("lr/{}".format(mode), lr, id_epoch) for v in model.state_dict(): log_histogram("Layer {}".format(v), model.state_dict()[v], id_epoch) #save_model(model, optimizer, id_epoch, path_out, name_model='{:03d}'.format(id_epoch)) # store model if val loss improves if mode==VAL: if best_loss > epoch_loss: # update loss
cc_mean = np.mean([x[3] for x in metric_list]) sim_mean = np.mean([x[4] for x in metric_list]) print("For video number {} the metrics are:".format(y)) print("AUC-JUDD is {}".format(aucj_mean)) print("AUC-SHUFFLED is {}".format(aucs_mean)) print("NSS is {}".format(nss_mean)) print("CC is {}".format(cc_mean)) print("SIM is {}".format(sim_mean)) print("Time elapsed so far: {}".format(datetime.datetime.now().replace( microsecond=0) - start)) print("==============================") message = 'For video number {} the metrics are:\nAUC-JUDD is {}\nAUC-SHUFFLED is {}\nNSS is {}\nCC is {}\nSIM is {}\nTime elapsed so far: {}\n=============================='.format( y, aucj_mean, aucs_mean, nss_mean, cc_mean, sim_mean, datetime.datetime.now().replace(microsecond=0) - start) send(message) final_metric_list.append( (aucj_mean, aucs_mean, nss_mean, cc_mean, sim_mean)) Aucj = np.mean([y[0] for y in final_metric_list]) Aucs = np.mean([y[1] for y in final_metric_list]) Nss = np.mean([y[2] for y in final_metric_list]) Cc = np.mean([y[3] for y in final_metric_list]) Sim = np.mean([y[4] for y in final_metric_list]) print("Final average of metrics is:") print("AUC-JUDD is {}".format(Aucj)) print("AUC-SHUFFLED is {}".format(Aucs)) print("NSS is {}".format(Nss)) print("CC is {}".format(Cc)) print("SIM is {}".format(Sim))
for i, (a, p) in enumerate(model.named_parameters()): if i>25: # print(i, a, p.shape) decoder_parameters.append(p) else: base_params.append(p) p.requires_grad = False # ADAM OPTIMIZER optimizer = Adam(decoder_parameters, lr = lr, weight_decay=0.000001) trainable_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) print("Trainable parameters: ", trainable_parameters) send("Trainable parameters: " + str(trainable_parameters)) send("Experiment: " + args.path_out) # STOCHASTIC GRADIENT DESCENT # optimizer = SGD(model.parameters(), # lr = 0.00001, # momentum=0.9, # weight_decay=0.00001, # nesterov=True) # set learning rate scheduler # ReduceLROnPlateau( # optimizer, # mode (str) 'min':lr es reduira quan la metrica no es redueixi mes, 'max' al contrari, # factor (float) factor de reduccio de la lr, # patience (int) num epochs sense millora a partir dels quals es redueix lr,
# # log metric values # for metric in metrics.keys(): # log_value("Metrics/{}".format(metric), # metrics[metric], id_epoch) # get epoch loss print("--> {} epoch {}".format(mode, id_epoch)) epoch_lossD = train_eval(mode, modelG, modelD, optimizerD, dataloader) lrD = list(get_lr_optimizer(optimizerD))[0] print("-----------") print("\033[FDone! {} epoch {}\n\tDiscrimintator loss {}".format( mode, id_epoch, epoch_lossD)) send("Done! {} epoch {}\n\tDiscrimintator loss {}".format( mode, id_epoch, epoch_lossD)) print("\n") # record loss log_value("lossD/{}".format(mode), epoch_lossD, id_epoch) log_value("lrD/{}".format(mode), lrD, id_epoch) # for v in modelG.state_dict(): # log_histogram("Layer {}".format(v), modelG.state_dict()[v], id_epoch) # save_model(model, optimizer, id_epoch, path_out, name_model='{:03d}'.format(id_epoch)) # store model if val loss improves if mode == VAL: if best_lossD > epoch_lossD: # update loss