data_gen_train = Multithreaded_Generator(data_gen_train, 12, 100) data_gen_train._start() print "epoch: ", epoch train_loss = 0 train_acc_tmp = 0 train_loss_tmp = 0 batch_ctr = 0 for data, seg, idx in data_gen_train: if batch_ctr != 0 and batch_ctr % int(np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)) == 0: print "number of batches: ", batch_ctr, "/", n_batches_per_epoch print "training_loss since last update: ", train_loss_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch), " train accuracy: ", train_acc_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch) all_training_losses.append(train_loss_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)) all_training_accuracies.append(train_acc_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)) train_loss_tmp = 0 train_acc_tmp = 0 printLosses(all_training_losses, all_training_accuracies, all_validation_losses, all_validation_accuracies, os.path.join(results_dir, "%s.png" % EXPERIMENT_NAME), n_feedbacks_per_epoch, auc_scores=auc_scores, auc_labels=["bg", "brain", "edema", "ce_tumor", "necrosis"], ylim_score=(0,0.75)) # loss, acc = train_fn(data, convert_seg_map_for_crossentropy(seg, range(4)).astype(np.float32)) seg_flat = seg.flatten().astype(np.int32) w = class_frequencies2[seg_flat] loss_vec, acc = train_fn(data, seg_flat, w) #class_weights[seg_flat] loss = loss_vec.mean() loss_per_sample = loss_vec.reshape(BATCH_SIZE, -1).mean(axis=1) losses = update_losses(losses, idx, loss_per_sample) train_loss += loss train_loss_tmp += loss train_acc_tmp += acc batch_ctr += 1 if batch_ctr > n_batches_per_epoch: break data_gen_train._finish()
print "epoch: ", epoch train_loss = 0 train_acc_tmp = 0 train_loss_tmp = 0 batch_ctr = 0 for data, seg, idx in data_gen_train: if batch_ctr != 0 and batch_ctr % int(np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)) == 0: print "number of batches: ", batch_ctr, "/", n_batches_per_epoch print "training_loss since last update: ", train_loss_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch), " train accuracy: ", train_acc_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch) all_training_losses.append(train_loss_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)) all_training_accuracies.append(train_acc_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)) train_loss_tmp = 0 train_acc_tmp = 0 if len(auc_all) > 0: auc_scores = np.concatenate(auc_all, axis=0).reshape(-1, len(class_frequencies2)-1) printLosses(all_training_losses, all_training_accuracies, all_validation_losses, all_validation_accuracies, os.path.join(results_dir, "%s.png" % EXPERIMENT_NAME), n_feedbacks_per_epoch, auc_scores=auc_scores, auc_labels=["brain", "1", "2", "3", "4"], ylim_score=(0,0.08)) # loss, acc = train_fn(data, convert_seg_map_for_crossentropy(seg, range(4)).astype(np.float32)) seg_flat = seg.flatten().astype(np.int32) w = class_frequencies2[seg_flat] loss_vec, acc = train_fn(data, seg_flat, w) #class_weights[seg_flat] loss = loss_vec.mean() loss_per_sample = loss_vec.reshape(BATCH_SIZE, -1).mean(axis=1) losses = update_losses(losses, idx, loss_per_sample) train_loss += loss train_loss_tmp += loss train_acc_tmp += acc batch_ctr += 1 if batch_ctr > n_batches_per_epoch: break data_gen_train._finish()
data_gen_train = Multithreaded_Generator(data_gen_train, 12, 100) data_gen_train._start() print "epoch: ", epoch train_loss = 0 train_acc_tmp = 0 train_loss_tmp = 0 batch_ctr = 0 for data, seg, idx in data_gen_train: if batch_ctr != 0 and batch_ctr % int(np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)) == 0: print "number of batches: ", batch_ctr, "/", n_batches_per_epoch print "training_loss since last update: ", train_loss_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch), " train accuracy: ", train_acc_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch) all_training_losses.append(train_loss_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)) all_training_accuracies.append(train_acc_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)) train_loss_tmp = 0 train_acc_tmp = 0 printLosses(all_training_losses, all_training_accuracies, all_validation_losses, all_validation_accuracies, os.path.join(results_dir, "%s.png" % EXPERIMENT_NAME), n_feedbacks_per_epoch, auc_scores=auc_scores, auc_labels=["bg", "brain", "edema", "ce_tumor", "necrosis"], ylim_score=(0,1.5)) # loss, acc = train_fn(data, convert_seg_map_for_crossentropy(seg, range(4)).astype(np.float32)) seg_flat = seg.flatten().astype(np.int32) w = class_frequencies2[seg_flat] loss_vec, acc = train_fn(data, seg_flat, w) #class_weights[seg_flat] loss = loss_vec.mean() loss_per_sample = loss_vec.reshape(BATCH_SIZE, -1).mean(axis=1) losses = update_losses(losses, idx, loss_per_sample) train_loss += loss train_loss_tmp += loss train_acc_tmp += acc batch_ctr += 1 if batch_ctr > n_batches_per_epoch: break data_gen_train._finish()
print "epoch: ", epoch train_loss = 0 train_acc_tmp = 0 train_loss_tmp = 0 batch_ctr = 0 for data, gt in data_gen_train: if batch_ctr != 0 and batch_ctr % int(np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)) == 0: print "number of batches: ", batch_ctr, "/", n_batches_per_epoch print "training_loss since last update: ", train_loss_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch), " train accuracy: ", train_acc_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch) all_training_losses.append(train_loss_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)) all_training_accuracies.append(train_acc_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)) train_loss_tmp = 0 train_acc_tmp = 0 if len(auc_all) > 0: auc_scores = np.concatenate(auc_all, axis=0).reshape(-1, num_classes) printLosses(all_training_losses, all_training_accuracies, all_validation_losses, all_validation_accuracies, os.path.join(results_dir, "%s.png" % EXPERIMENT_NAME), n_feedbacks_per_epoch, auc_scores=auc_scores, auc_labels=["0", "1"], ylim_score=None) acc_marker, acc_domain, loss = train_marker_domain(data, gt[:, 1].astype(np.int32), gt[:, 0].astype(np.int32)) # acc_domain, loss_domain = train_domain(data, gt[:, 0].astype(np.int32)) train_loss += loss train_loss_tmp += loss train_acc_tmp += acc_marker batch_ctr += 1 if batch_ctr > n_batches_per_epoch: break train_loss /= n_batches_per_epoch y_true = [] y_pred = [] valid_loss = 0 accuracies = []
n_epochs = 10 for epoch in range(2, n_epochs): print "epoch: ", epoch train_loss = 0 train_acc_tmp = 0 train_loss_tmp = 0 batch_ctr = 0 for data, seg, labels in data_gen_train: if batch_ctr != 0 and batch_ctr % int(np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)) == 0: print "number of batches: ", batch_ctr, "/", n_batches_per_epoch print "training_loss since last update: ", train_loss_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch), " train accuracy: ", train_acc_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch) all_training_losses.append(train_loss_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)) all_training_accuracies.append(train_acc_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)) train_loss_tmp = 0 train_acc_tmp = 0 printLosses(all_training_losses, all_training_accuracies, all_validation_losses, all_validation_accuracies, "../../../results/%s.png" % EXPERIMENT_NAME, n_feedbacks_per_epoch) # loss, acc = train_fn(data, convert_seg_map_for_crossentropy(seg, range(4)).astype(np.float32)) seg_flat = seg.flatten().astype(np.int32) w = class_frequencies2[seg_flat] loss, acc = train_fn(data, seg_flat, w) #class_weights[seg_flat] train_loss += loss train_loss_tmp += loss train_acc_tmp += acc batch_ctr += 1 if batch_ctr > n_batches_per_epoch: break train_loss /= n_batches_per_epoch print "training loss average on epoch: ", train_loss test_loss = 0
num_cached=50): if batch_ctr != 0 and batch_ctr % int( np.floor(n_batches_per_epoch / 10.)) == 0: print "number of batches: ", batch_ctr, "/", n_batches_per_epoch print "training_loss since last update: ", train_loss_tmp / np.floor( n_batches_per_epoch / 10.), " train accuracy: ", train_acc_tmp / np.floor( n_batches_per_epoch / 10.) all_training_losses.append(train_loss_tmp / np.floor(n_batches_per_epoch / 10.)) all_training_accs.append(train_acc_tmp / np.floor(n_batches_per_epoch / 10.)) train_loss_tmp = 0 train_acc_tmp = 0 printLosses(all_training_losses, all_training_accs, all_validation_losses, all_validation_accuracies, "../results/%s.png" % EXPERIMENT_NAME, 10) loss, acc = train_fn(data, labels.astype(np.int32)) train_loss += loss train_loss_tmp += loss train_acc_tmp += acc batch_ctr += 1 if batch_ctr > n_batches_per_epoch: break train_loss /= n_batches_per_epoch print "training loss average on epoch: ", train_loss test_loss = 0 accuracies = [] valid_batch_ctr = 0
data_gen_train = Multithreaded_Generator(data_gen_train, 4, 20) data_gen_train._start() print "epoch: ", epoch train_loss = 0 train_acc_tmp = 0 train_loss_tmp = 0 batch_ctr = 0 for data, seg, idx in data_gen_train: if batch_ctr != 0 and batch_ctr % int(np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)) == 0: print "number of batches: ", batch_ctr, "/", n_batches_per_epoch print "training_loss since last update: ", train_loss_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch), " train accuracy: ", train_acc_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch) all_training_losses.append(train_loss_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)) all_training_accuracies.append(train_acc_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)) train_loss_tmp = 0 train_acc_tmp = 0 printLosses(all_training_losses, all_training_accuracies, all_validation_losses, all_validation_accuracies, "../../../results/%s.png" % EXPERIMENT_NAME, n_feedbacks_per_epoch, auc_scores=auc_scores, auc_labels=["bg", "brain", "edema", "ce_tumor", "necrosis"], ylim_score=(0,1.5)) # loss, acc = train_fn(data, convert_seg_map_for_crossentropy(seg, range(4)).astype(np.float32)) seg_flat = seg.flatten().astype(np.int32) w = class_frequencies2[seg_flat] loss_vec, acc = train_fn(data, seg_flat, w) #class_weights[seg_flat] loss = loss_vec.mean() loss_per_sample = loss_vec.reshape(BATCH_SIZE, -1).mean(axis=1) losses = update_losses(losses, idx, loss_per_sample) train_loss += loss train_loss_tmp += loss train_acc_tmp += acc batch_ctr += 1 if batch_ctr > n_batches_per_epoch: break data_gen_train._finish()