def estimate_class_weights(train_neg_memmap,
                           train_pos_memmap,
                           n_neg_train,
                           n_pos_train,
                           BATCH_SIZE,
                           classes,
                           n_batches=100):
    d = {}
    for c in classes:
        d[c] = 0
    n = 0
    for data, seg, labels in multi_threaded_generator(
            memmapGeneratorDataAugm_t1km_flair_adc_cbv(train_neg_memmap,
                                                       train_pos_memmap,
                                                       BATCH_SIZE, n_pos_train,
                                                       n_neg_train),
            num_threads=2):
        for c in d.keys():
            d[c] += np.sum(seg == c)
        n += 1
        if n >= n_batches:
            break
    class_weights = np.zeros(len(classes))
    n_pixels = np.sum([d[c] for c in d.keys()])
    for c in classes:
        class_weights[c] = float(n_pixels) / d[c]
    class_weights /= class_weights.sum()
    class_weights *= float(len(classes))
    return class_weights
def estimate_class_weights(train_neg_memmap, train_pos_memmap, n_neg_train, n_pos_train, BATCH_SIZE, classes, n_batches=100):
    d = {}
    for c in classes:
        d[c] = 0
    n = 0
    for data, seg, labels in multi_threaded_generator(memmapGeneratorDataAugm_t1km_flair_adc_cbv(train_neg_memmap, train_pos_memmap, BATCH_SIZE, n_pos_train, n_neg_train), num_threads=2):
        for c in d.keys():
            d[c] += np.sum(seg == c)
        n += 1
        if n >= n_batches:
            break
    class_weights = np.zeros(len(classes))
    n_pixels = np.sum([d[c] for c in d.keys()])
    for c in classes:
        class_weights[c] =  float(n_pixels) /d[c]
    class_weights /= class_weights.sum()
    class_weights *= float(len(classes))
    return class_weights
val_pos_memmap = memmap("/media/fabian/DeepLearningData/datasets/%s_val_pos.memmap" % memmap_name, dtype=np.float32, mode="r", shape=memmap_properties["val_pos_shape"])
val_neg_memmap = memmap("/media/fabian/DeepLearningData/datasets/%s_val_neg.memmap" % memmap_name, dtype=np.float32, mode="r", shape=memmap_properties["val_neg_shape"])


all_training_losses = []
all_validation_losses = []
all_validation_accuracies = []
all_training_accs = []
n_epochs = 10
for epoch in range(n_epochs):
    print "epoch: ", epoch
    train_loss = 0
    train_acc_tmp = 0
    train_loss_tmp = 0
    batch_ctr = 0
    for data, seg, labels in multi_threaded_generator(memmapGenerator_tumorClassRot(train_pos_memmap, train_neg_memmap, BATCH_SIZE, n_pos_train, n_pos_train), num_threads=2, num_cached=50):
        if batch_ctr != 0 and batch_ctr % int(np.floor(n_batches_per_epoch/10.)) == 0:
            print "number of batches: ", batch_ctr, "/", n_batches_per_epoch
            print "training_loss since last update: ", train_loss_tmp/np.floor(n_batches_per_epoch/10.), " train accuracy: ", train_acc_tmp/np.floor(n_batches_per_epoch/10.)
            all_training_losses.append(train_loss_tmp/np.floor(n_batches_per_epoch/10.))
            all_training_accs.append(train_acc_tmp/np.floor(n_batches_per_epoch/10.))
            train_loss_tmp = 0
            train_acc_tmp = 0
            printLosses(all_training_losses, all_training_accs, all_validation_losses, all_validation_accuracies, "../results/%s.png" % EXPERIMENT_NAME, 10)
        loss, acc = train_fn(data[:, :12, :, :], labels)
        train_loss += loss
        train_loss_tmp += loss
        train_acc_tmp += acc
        batch_ctr += 1
        if batch_ctr > n_batches_per_epoch:
            break
all_validation_losses = []
all_validation_accuracies = []
all_training_accs = []
'''with open("../results/%s_allLossesNAccur_ep%d.pkl"% (EXPERIMENT_NAME, 0), 'r') as f:
    [all_training_losses, all_training_accs, all_validation_losses, all_validation_accuracies] = cPickle.load(f)'''

n_epochs = 10
for epoch in range(0, n_epochs):
    print "epoch: ", epoch
    train_loss = 0
    train_acc_tmp = 0
    train_loss_tmp = 0
    batch_ctr = 0
    for data, seg, labels in multi_threaded_generator(
            memmapGeneratorDataAugm_t1km_flair_adc_cbv(train_neg_memmap,
                                                       train_pos_memmap,
                                                       BATCH_SIZE, n_pos_train,
                                                       n_neg_train),
            num_threads=2):
        if batch_ctr != 0 and batch_ctr % int(
                np.floor(n_batches_per_epoch / n_feedbacks_per_epoch)) == 0:
            print "number of batches: ", batch_ctr, "/", n_batches_per_epoch
            print "training_loss since last update: ", train_loss_tmp / np.floor(
                n_batches_per_epoch / n_feedbacks_per_epoch
            ), " train accuracy: ", train_acc_tmp / np.floor(
                n_batches_per_epoch / n_feedbacks_per_epoch)
            all_training_losses.append(
                train_loss_tmp /
                np.floor(n_batches_per_epoch / n_feedbacks_per_epoch))
            all_training_accs.append(
                train_acc_tmp /
                np.floor(n_batches_per_epoch / n_feedbacks_per_epoch))
all_training_losses = []
all_validation_losses = []
all_validation_accuracies = []
all_training_accs = []

'''with open("../results/%s_allLossesNAccur_ep%d.pkl"% (EXPERIMENT_NAME, 0), 'r') as f:
    [all_training_losses, all_training_accs, all_validation_losses, all_validation_accuracies] = cPickle.load(f)'''

n_epochs = 10
for epoch in range(0, n_epochs):
    print "epoch: ", epoch
    train_loss = 0
    train_acc_tmp = 0
    train_loss_tmp = 0
    batch_ctr = 0
    for data, seg, labels in multi_threaded_generator(memmapGeneratorDataAugm_t1km_flair_adc_cbv(train_neg_memmap, train_pos_memmap, BATCH_SIZE, n_pos_train, n_neg_train), num_threads=2):
        if batch_ctr != 0 and batch_ctr % int(np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)) == 0:
            print "number of batches: ", batch_ctr, "/", n_batches_per_epoch
            print "training_loss since last update: ", train_loss_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch), " train accuracy: ", train_acc_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)
            all_training_losses.append(train_loss_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch))
            all_training_accs.append(train_acc_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch))
            train_loss_tmp = 0
            train_acc_tmp = 0
            printLosses(all_training_losses, all_training_accs, all_validation_losses, all_validation_accuracies, "../results/%s.png" % EXPERIMENT_NAME, n_feedbacks_per_epoch)
        # loss, acc = train_fn(data, convert_seg_map_for_crossentropy(seg, range(4)).astype(np.float32))
        seg_flat = seg.flatten()
        loss, acc = train_fn(data, seg_flat) #class_weights[seg_flat]
        train_loss += loss
        train_loss_tmp += loss
        train_acc_tmp += acc
        batch_ctr += 1
    shape=memmap_properties["val_neg_shape"])

all_training_losses = []
all_validation_losses = []
all_validation_accuracies = []
all_training_accs = []
n_epochs = 10
for epoch in range(n_epochs):
    print "epoch: ", epoch
    train_loss = 0
    train_acc_tmp = 0
    train_loss_tmp = 0
    batch_ctr = 0
    for data, seg, labels in multi_threaded_generator(
            memmapGenerator_tumorClassRot(train_pos_memmap, train_neg_memmap,
                                          BATCH_SIZE, n_pos_train,
                                          n_pos_train),
            num_threads=2,
            num_cached=50):
        if batch_ctr != 0 and batch_ctr % int(
                np.floor(n_batches_per_epoch / 10.)) == 0:
            print "number of batches: ", batch_ctr, "/", n_batches_per_epoch
            print "training_loss since last update: ", train_loss_tmp / np.floor(
                n_batches_per_epoch /
                10.), " train accuracy: ", train_acc_tmp / np.floor(
                    n_batches_per_epoch / 10.)
            all_training_losses.append(train_loss_tmp /
                                       np.floor(n_batches_per_epoch / 10.))
            all_training_accs.append(train_acc_tmp /
                                     np.floor(n_batches_per_epoch / 10.))
            train_loss_tmp = 0
            train_acc_tmp = 0
Esempio n. 7
0
val_fn = theano.function([x_sym, y_sym], [loss_val, acc])
pred_fn = theano.function([x_sym], prediction_test)

all_training_losses = []
all_validation_losses = []
all_validation_accuracies = []
all_training_accs = []
n_epochs = 10
for epoch in range(n_epochs):
    print "epoch: ", epoch
    train_loss = 0
    train_acc_tmp = 0
    train_loss_tmp = 0
    batch_ctr = 0
    for data, seg, labels in multi_threaded_generator(data_gen_train,
                                                      num_threads=8,
                                                      num_cached=50):
        if batch_ctr != 0 and batch_ctr % int(
                np.floor(n_batches_per_epoch / 10.)) == 0:
            print "number of batches: ", batch_ctr, "/", n_batches_per_epoch
            print "training_loss since last update: ", train_loss_tmp / np.floor(
                n_batches_per_epoch /
                10.), " train accuracy: ", train_acc_tmp / np.floor(
                    n_batches_per_epoch / 10.)
            all_training_losses.append(train_loss_tmp /
                                       np.floor(n_batches_per_epoch / 10.))
            all_training_accs.append(train_acc_tmp /
                                     np.floor(n_batches_per_epoch / 10.))
            train_loss_tmp = 0
            train_acc_tmp = 0
            printLosses(all_training_losses, all_training_accs,