losses[:] = 100. y_true = [] y_pred = [] test_loss = 0 accuracies = [] valid_batch_ctr = 0 for data, seg, labels in data_gen_validation: # loss, acc = val_fn(data, convert_seg_map_for_crossentropy(seg, range(4)).astype(np.float32)) seg_flat = seg.flatten().astype(np.int32) w = class_frequencies2[seg_flat] loss, acc = val_fn(data, seg_flat, w) #, class_weights[seg_flat] test_loss += loss accuracies.append(acc) valid_batch_ctr += 1 y_true.append(convert_seg_flat_to_binary_label_indicator_array(seg_flat)) y_pred.append(get_class_probas(data)) if valid_batch_ctr > n_test_batches: break test_loss /= n_test_batches print "test loss: ", test_loss print "test acc: ", np.mean(accuracies), "\n" y_true = np.concatenate(y_true, axis=0) y_pred = np.concatenate(y_pred, axis=0) scores = roc_auc_score(y_true, y_pred, None) auc_all.append(scores) all_validation_losses.append(test_loss) all_validation_accuracies.append(np.mean(accuracies)) auc_scores = np.concatenate(auc_all, axis=0).reshape(-1, 5) printLosses(all_training_losses, all_training_accuracies, all_validation_losses, all_validation_accuracies, os.path.join(results_dir, "%s.png" % EXPERIMENT_NAME), n_feedbacks_per_epoch, auc_scores=auc_scores, auc_labels=["bg", "brain", "edema", "ce_tumor", "necrosis"], ylim_score=(0,0.75)) # learning_rate *= 0.62
losses[:] = 100. y_true = [] y_pred = [] test_loss = 0 accuracies = [] valid_batch_ctr = 0 for data, seg, labels in data_gen_validation: # loss, acc = val_fn(data, convert_seg_map_for_crossentropy(seg, range(4)).astype(np.float32)) seg_flat = seg.flatten().astype(np.int32) w = class_frequencies2[seg_flat] loss, acc = val_fn(data, seg_flat, w) #, class_weights[seg_flat] test_loss += loss accuracies.append(acc) valid_batch_ctr += 1 y_true.append(convert_seg_flat_to_binary_label_indicator_array(seg_flat[seg_flat!=0]-1, len(class_frequencies2)-1)) y_pred.append(get_class_probas(data)[seg_flat!=0, :][:, 1:]) if valid_batch_ctr > n_test_batches: break test_loss /= n_test_batches print "test loss: ", test_loss print "test acc: ", np.mean(accuracies), "\n" y_true = np.concatenate(y_true, axis=0) y_pred = np.concatenate(y_pred, axis=0) scores = roc_auc_score(y_true, y_pred, None) del y_pred, y_true auc_all.append(scores) all_validation_losses.append(test_loss) all_validation_accuracies.append(np.mean(accuracies)) auc_scores = np.concatenate(auc_all, axis=0).reshape(-1, len(class_frequencies2)-1) printLosses(all_training_losses, all_training_accuracies, all_validation_losses, all_validation_accuracies, os.path.join(results_dir, "%s.png" % EXPERIMENT_NAME), n_feedbacks_per_epoch, auc_scores=auc_scores, auc_labels=["brain", "1", "2", "3", "4"], ylim_score=(0,0.08))
losses[:] = train_loss y_true = [] y_pred = [] test_loss = 0 accuracies = [] valid_batch_ctr = 0 for data, seg, labels in data_gen_validation: # loss, acc = val_fn(data, convert_seg_map_for_crossentropy(seg, range(4)).astype(np.float32)) seg_flat = seg.flatten().astype(np.int32) w = class_frequencies2[seg_flat] loss, acc = val_fn(data, seg_flat, w) #, class_weights[seg_flat] test_loss += loss accuracies.append(acc) valid_batch_ctr += 1 y_true.append(convert_seg_flat_to_binary_label_indicator_array(seg_flat)) y_pred.append(get_class_probas(data)) if valid_batch_ctr > n_test_batches: break test_loss /= n_test_batches print "test loss: ", test_loss print "test acc: ", np.mean(accuracies), "\n" y_true = np.concatenate(y_true, axis=0) y_pred = np.concatenate(y_pred, axis=0) scores = roc_auc_score(y_true, y_pred, None) auc_all.append(scores) all_validation_losses.append(test_loss) all_validation_accuracies.append(np.mean(accuracies)) auc_scores = np.concatenate(auc_all, axis=0).reshape(-1, 5) printLosses(all_training_losses, all_training_accuracies, all_validation_losses, all_validation_accuracies, os.path.join(results_dir, "%s.png" % EXPERIMENT_NAME), n_feedbacks_per_epoch, auc_scores=auc_scores, auc_labels=["bg", "brain", "edema", "ce_tumor", "necrosis"], ylim_score=(0,1.5)) learning_rate *= 0.62
break train_loss /= n_batches_per_epoch y_true = [] y_pred = [] valid_loss = 0 accuracies = [] valid_batch_ctr = 0 for data, gt in data_gen_validation: # loss, acc = val_fn(data, convert_seg_map_for_crossentropy(seg, range(4)).astype(np.float32)) acc_marker, loss = val_marker(data, gt[:, 1].astype(np.int32)) valid_loss += loss accuracies.append(acc_marker) valid_batch_ctr += 1 y_true.append(convert_seg_flat_to_binary_label_indicator_array(gt[:, 1].astype(np.int32), num_classes)) y_pred.append(get_class_probas_markers(data)[0]) if valid_batch_ctr > n_validation_batches: break valid_loss /= n_validation_batches print "test loss: ", valid_loss print "test acc: ", np.mean(accuracies), "\n" y_true = np.concatenate(y_true, axis=0) y_pred = np.concatenate(y_pred, axis=0) scores = roc_auc_score(y_true, y_pred, None) del y_pred, y_true auc_all.append(scores) all_validation_losses.append(valid_loss) all_validation_accuracies.append(np.mean(accuracies)) auc_scores = np.concatenate(auc_all, axis=0).reshape(-1, num_classes) printLosses(all_training_losses, all_training_accuracies, all_validation_losses, all_validation_accuracies, os.path.join(results_dir, "%s.png" % EXPERIMENT_NAME), n_feedbacks_per_epoch, auc_scores=auc_scores, auc_labels=["0", "1"], ylim_score=None)
y_true = [] y_pred = [] test_loss = 0 accuracies = [] valid_batch_ctr = 0 for data, seg, labels in data_gen_validation: # loss, acc = val_fn(data, convert_seg_map_for_crossentropy(seg, range(4)).astype(np.float32)) seg_flat = seg.flatten().astype(np.int32) w = class_frequencies2[seg_flat] loss, acc = val_fn(data, seg_flat, w) #, class_weights[seg_flat] test_loss += loss accuracies.append(acc) valid_batch_ctr += 1 y_true.append( convert_seg_flat_to_binary_label_indicator_array(seg_flat)) y_pred.append(get_class_probas(data)) if valid_batch_ctr > n_test_batches: break test_loss /= n_test_batches print "test loss: ", test_loss print "test acc: ", np.mean(accuracies), "\n" y_true = np.concatenate(y_true, axis=0) y_pred = np.concatenate(y_pred, axis=0) scores = roc_auc_score(y_true, y_pred, None) auc_all.append(scores) all_validation_losses.append(test_loss) all_validation_accuracies.append(np.mean(accuracies)) auc_scores = np.concatenate(auc_all, axis=0).reshape(-1, 5) printLosses(all_training_losses, all_training_accuracies,
print "training loss average on epoch: ", train_loss y_true = [] y_pred = [] test_loss = 0 accuracies = [] valid_batch_ctr = 0 for data, seg, labels in data_gen_validation: # loss, acc = val_fn(data, convert_seg_map_for_crossentropy(seg, range(4)).astype(np.float32)) seg_flat = seg.flatten().astype(np.int32) w = class_weights[seg_flat] loss, acc = val_fn(data, seg_flat, w) #, class_weights[seg_flat] test_loss += loss accuracies.append(acc) valid_batch_ctr += 1 y_true.append(convert_seg_flat_to_binary_label_indicator_array(seg_flat[seg_flat!=0]-1, num_classes-1)) y_pred.append(get_class_probas(data)[seg_flat!=0, :][:, 1:]) if valid_batch_ctr > n_test_batches: break test_loss /= n_test_batches print "test loss: ", test_loss print "test acc: ", np.mean(accuracies), "\n" y_true = np.concatenate(y_true, axis=0) y_pred = np.concatenate(y_pred, axis=0) scores = roc_auc_score(y_true, y_pred, None) del y_pred, y_true auc_all.append(scores) all_validation_losses.append(test_loss) all_validation_accuracies.append(np.mean(accuracies)) auc_scores = np.concatenate(auc_all, axis=0).reshape((-1, num_classes-1)) printLosses(all_training_losses, all_training_accuracies, all_validation_losses, all_validation_accuracies, os.path.join(results_dir, "%s.png" % EXPERIMENT_NAME), n_feedbacks_per_epoch, auc_scores=auc_scores, auc_labels=["brain", "1", "2", "3", "4"], ylim_score=(0,0.08))