def batch(send, recv, images_batch, labels_batch, send_opt=None, recv_opt=None): imgsa_s, imgsb_s, imgsa_r, imgsb_r, targets, _ = get_images(images_batch, labels_batch) probs_s, message, logprobs_s, entropy_s = send.model(imgsa_s, imgsb_s) probs_r, actions, logprobs_r, entropy_r = recv.model(imgsa_r, imgsb_r, message.detach()) error = reward(actions, targets) #torch.abs(act - targets) il - e` gia nell'update mi pare acc = accuracy(actions, targets) #torch.mean(error.detach().double()) send_loss = send.loss(error, logprobs_s, entropy_s) recv_loss = recv.loss(error, logprobs_r, entropy_r) if send_opt is not None: # SENDER LOSS send_opt.zero_grad() send_loss.backward() send_opt.step() if recv_opt is not None: # RECEIVER LOSS recv_opt.zero_grad() recv_loss.backward() recv_opt.step() return error, send_loss, recv_loss, len(imgsa_s), acc
for t in tqdm(range(len(test_X_res))): for l in range(lgth): ctl = int(np.sum(Y_l[test_X_res[t], l])) y_l_1 = ph_1[l] * P_eh_1[l, ctl] y_l_0 = ph_0[l] * P_eh_0[l, ctl] # print("PEH: ", P_eh_1[l, ctl], P_eh_0[l, ctl]) # print("PH: ", ph_1[l], ph_0[l]) if y_l_1 > y_l_0: test_L[t, l] = 1 else: test_L[t, l] = 0 return test_L t_X = np.load("features_genbase_train.npy").astype("float64") tst_X = np.load("features_genbase_test.npy").astype("float64") Y_lab = np.load("labels_genbase_train.npy") tst_lab = np.load("labels_genbase_test.npy") print(Y_lab.shape) U, theta, x_data_sp = comput_u_theta(t_X) test_sp = (tst_X - U) / np.sqrt(theta) tst_L = (ml_KNN(x_data_sp, Y_lab, test_sp, 10, 1)) print("genbase: ") h_l = los.hamming_loss(tst_lab, tst_L) print(h_l) acc = los.accuracy(tst_lab, tst_L) print(acc) P, R, F = los.evaluate_matrix(tst_lab, tst_L) print(P, R, F)
valid_iter = threaded_iterator(valid_iter) # Count the number of trainable parameters in the model num_params = nn.layers.count_params(output_layer, trainable=True) print('Number of trainable parameters: {}'.format(num_params)) # Construct loss function & accuracy predictions = nn.layers.get_output(output_layer) train_log_loss = categorical_crossentropy(predictions, y) train_log_loss = train_log_loss.mean() train_kappa_loss = quad_kappa_loss(predictions, y, y_pow=y_pow) params = nn.layers.get_all_params(output_layer, regularizable=True) regularization = sum(T.sum(p**2) for p in params) train_hybrid_loss = train_kappa_loss + log_scale * T.clip( train_log_loss, log_cutoff, 10**3) + l2_reg * regularization train_accuracy = accuracy(predictions, y) valid_predictions = nn.layers.get_output(output_layer, deterministic=True) valid_log_loss = categorical_crossentropy(valid_predictions, y) valid_log_loss = valid_log_loss.mean() valid_kappa_loss = quad_kappa_loss(valid_predictions, y) valid_loss = valid_kappa_loss valid_accuracy = accuracy(valid_predictions, y) # Scale grads all_params = nn.layers.get_all_params(output_layer, trainable=True) all_grads = T.grad(train_hybrid_loss, all_params) grads_norms = T.sqrt([T.sum(g**2) for g in all_grads]) scaled_grads = nn.updates.total_norm_constraint(all_grads, max_norm=10, return_norm=False)
# Transform batchiterator to a threaded iterator train_iter = threaded_iterator(train_iter) valid_iter = threaded_iterator(valid_iter) # Construct loss function & accuracy predictions = nn.layers.get_output(output_layer) train_loss = categorical_crossentropy(predictions, y) train_loss = train_loss.mean() #params = nn.layers.get_all_params(output_layer, regularizable=True) #regularization = sum(T.sum(p ** 2) for p in params) #l2_penalty = regularization * l2_reg all_layers = nn.layers.get_all_layers(output_layer) l2_penalty = nn.regularization.regularize_layer_params(all_layers, nn.regularization.l2) * l2_reg train_loss = train_loss + l2_penalty train_accuracy = accuracy(predictions, y) train_kappa = quad_kappa(predictions, y) valid_predictions = nn.layers.get_output(output_layer, deterministic=True) valid_loss = categorical_crossentropy(valid_predictions, y) valid_loss = valid_loss.mean() valid_accuracy = accuracy(valid_predictions, y) valid_kappa = quad_kappa(valid_predictions, y) # Scale grads all_params = nn.layers.get_all_params(output_layer, trainable=True) all_grads = T.grad(train_loss, all_params) #scaled_grads = nn.updates.total_norm_constraint(all_grads, max_norm=10, return_norm=False) # Construct update
train_iter = threaded_iterator(train_iter) valid_iter = threaded_iterator(valid_iter) # Construct loss function & accuracy def discrete_predict(predictions): return T.cast(T.round(T.clip(predictions, 0, 4)), 'int32') predictions = nn.layers.get_output(output_layer, deterministic=False) mse_loss = squared_error(predictions, y).mean() params = nn.layers.get_all_params(output_layer, regularizable=True) regularization = sum(T.sum(p ** 2) for p in params) train_loss = mse_loss + l2_reg * regularization one_hot_pred = T.eye(num_class, dtype='int32')[discrete_predict(predictions)] one_hot_target = T.eye(num_class, dtype='int32')[y] train_accuracy = accuracy(one_hot_pred, one_hot_target) train_kappa = quad_kappa(one_hot_pred, one_hot_target) valid_predictions = nn.layers.get_output(output_layer, deterministic=True) valid_loss = squared_error(valid_predictions, y).mean() one_hot_pred_val = T.eye(num_class)[discrete_predict(valid_predictions)] valid_accuracy = accuracy(one_hot_pred_val, one_hot_target) valid_kappa = quad_kappa(one_hot_pred_val, one_hot_target) # Scale grads all_params = nn.layers.get_all_params(output_layer, trainable=True) all_grads = T.grad(train_loss, all_params) #scaled_grads = nn.updates.total_norm_constraint(all_grads, max_norm=5, return_norm=False) # Construct update
return kappa_loss + 0.5 * T.clip(log_loss, 0.6, 10 ** 3) predictions = nn.layers.get_output(output_layer, deterministic=True) train_log_loss, train_reg_loss, train_multi_loss = multi_task_loss(predictions, y) # train_loss1 = categorical_crossentropy(predictions[:, :num_class], y).mean() # train_loss2 = categorical_crossentropy(predictions[:, num_class:], y).mean() # train_hybrid_loss = hybrid_loss(predictions, y) params = nn.layers.get_all_params(output_layer, regularizable=True) regularization = sum(T.sum(p ** 2) for p in params) # train_loss = train_loss1 + train_loss2 + l2_reg * regularization train_loss = train_multi_loss + l2_reg * regularization # train_loss = train_hybrid_loss + l2_reg * regularization # train_accuracy = accuracy(predictions[:, num_class:], y) # train_kappa = quad_kappa_loss(predictions[:, num_class:], y) train_accuracy = accuracy(predictions[:, :num_class], y) train_kappa = quad_kappa_loss(predictions[:, :num_class], y) # train_accuracy = accuracy(predictions, y) # train_kappa = quad_kappa_loss(predictions, y) valid_predictions = nn.layers.get_output(output_layer, deterministic=True) valid_log_loss, valid_reg_loss, valid_multi_loss = multi_task_loss(valid_predictions, y) # valid_loss = categorical_crossentropy(valid_predictions[:, num_class:], y).mean() # valid_loss = hybrid_loss(valid_predictions, y) valid_accuracy = accuracy(valid_predictions[:, :num_class], y) # valid_accuracy = accuracy(valid_predictions, y) valid_kappa = quad_kappa_loss(valid_predictions[:, :num_class], y) # valid_kappa = quad_kappa_loss(valid_predictions, y) # Scale grads
predictions = nn.layers.get_output(output_layer, deterministic=True) train_log_loss, train_reg_loss, train_multi_loss = multi_task_loss( predictions, y) #train_loss1 = categorical_crossentropy(predictions[:, :num_class], y).mean() #train_loss2 = categorical_crossentropy(predictions[:, num_class:], y).mean() #train_hybrid_loss = hybrid_loss(predictions, y) params = nn.layers.get_all_params(output_layer, regularizable=True) regularization = sum(T.sum(p**2) for p in params) #train_loss = train_loss1 + train_loss2 + l2_reg * regularization train_loss = train_multi_loss + l2_reg * regularization #train_loss = train_hybrid_loss + l2_reg * regularization #train_accuracy = accuracy(predictions[:, num_class:], y) #train_kappa = quad_kappa_loss(predictions[:, num_class:], y) train_accuracy = accuracy(predictions[:, :num_class], y) train_kappa = quad_kappa_loss(predictions[:, :num_class], y) #train_accuracy = accuracy(predictions, y) #train_kappa = quad_kappa_loss(predictions, y) valid_predictions = nn.layers.get_output(output_layer, deterministic=True) valid_log_loss, valid_reg_loss, valid_multi_loss = multi_task_loss( valid_predictions, y) #valid_loss = categorical_crossentropy(valid_predictions[:, num_class:], y).mean() #valid_loss = hybrid_loss(valid_predictions, y) valid_accuracy = accuracy(valid_predictions[:, :num_class], y) #valid_accuracy = accuracy(valid_predictions, y) valid_kappa = quad_kappa_loss(valid_predictions[:, :num_class], y) #valid_kappa = quad_kappa_loss(valid_predictions, y) # Scale grads
A_out.append(slice) for i in range(k): A.remove(slice[i]) A_out.append(A) return A_out # label_all = np.load("labels_bird_train.npy") x_data = np.load("features_bird_train.npy").astype("float64") test = np.load("features_bird_test.npy").astype("float64") label_true = np.load("labels_bird_test.npy") print(x_data.shape) print(test.shape) U, theta, x_data_sp = comput_u_theta(x_data) test_sp = (test - U) / np.sqrt(theta) res = label_space_partition(x_data_sp, label_all, test_sp, 3) # label space partition # res = label_space_partition(x_data_sp, label_all, test_sp, 10) # label power set print("bird: ") h_l = los.hamming_loss(label_true, res) print(h_l) acc = los.accuracy(label_true, res) print(acc) P, R, F = los.evaluate_matrix(label_true, res) print(P, R, F)
format(model_name)) # Display training inforamtions visualize.display_loss(name, save=True) # Make video if VIDEO and not (model_name == 'Unet2D'): visualize.save_png_video(net, X_test, thresh=thresh, format_im=type_im, name=name + '_demo_video') visualize.automatic_process(name + '_demo_video', cut=10, nb_movie=2, nb_seq_in_video=130) ## Compute score on test t_pred = time.time() y_pred = np.array(net.predict(X_test) > thresh, np.uint8).flatten() t_pred = time.time() - t_pred Y_test = np.array(Y_test > 0.5, dtype=np.uint8) dice_score = losses.dice_coef(Y_test.flatten(), y_pred.flatten()) jaccard_score = losses.jaccard_coef(Y_test.flatten(), y_pred.flatten()) acc = losses.accuracy(Y_test.flatten(), y_pred.flatten()) print("Prediction done in {0} seconds for {1} frames: {2} frame per second.". format(t_pred, X_test.shape[0], t_pred / X_test.shape[0])) print("Dice score: {0}. Jaccard score: {1}. Accuracy {2}.".format( dice_score, jaccard_score, acc))
valid_iter = threaded_iterator(valid_iter) # Construct loss function & accuracy def discrete_predict(predictions): return T.cast(T.round(T.clip(predictions, 0, 4)), 'int32') predictions = nn.layers.get_output(output_layer, deterministic=False) mse_loss = squared_error(predictions, y).mean() params = nn.layers.get_all_params(output_layer, regularizable=True) regularization = sum(T.sum(p**2) for p in params) train_loss = mse_loss + l2_reg * regularization one_hot_pred = T.eye(num_class, dtype='int32')[discrete_predict(predictions)] one_hot_target = T.eye(num_class, dtype='int32')[y] train_accuracy = accuracy(one_hot_pred, one_hot_target) train_kappa = quad_kappa(one_hot_pred, one_hot_target) valid_predictions = nn.layers.get_output(output_layer, deterministic=True) valid_loss = squared_error(valid_predictions, y).mean() one_hot_pred_val = T.eye(num_class)[discrete_predict(valid_predictions)] valid_accuracy = accuracy(one_hot_pred_val, one_hot_target) valid_kappa = quad_kappa(one_hot_pred_val, one_hot_target) # Scale grads all_params = nn.layers.get_all_params(output_layer, trainable=True) all_grads = T.grad(train_loss, all_params) #scaled_grads = nn.updates.total_norm_constraint(all_grads, max_norm=5, return_norm=False) # Construct update updates = nn.updates.nesterov_momentum(all_grads,