def estimate_class_weights(train_neg_memmap,
                           train_pos_memmap,
                           n_neg_train,
                           n_pos_train,
                           BATCH_SIZE,
                           classes,
                           n_batches=100):
    d = {}
    for c in classes:
        d[c] = 0
    n = 0
    for data, seg, labels in multi_threaded_generator(
            memmapGeneratorDataAugm_t1km_flair_adc_cbv(train_neg_memmap,
                                                       train_pos_memmap,
                                                       BATCH_SIZE, n_pos_train,
                                                       n_neg_train),
            num_threads=2):
        for c in d.keys():
            d[c] += np.sum(seg == c)
        n += 1
        if n >= n_batches:
            break
    class_weights = np.zeros(len(classes))
    n_pixels = np.sum([d[c] for c in d.keys()])
    for c in classes:
        class_weights[c] = float(n_pixels) / d[c]
    class_weights /= class_weights.sum()
    class_weights *= float(len(classes))
    return class_weights
def estimate_class_weights(train_neg_memmap, train_pos_memmap, n_neg_train, n_pos_train, BATCH_SIZE, classes, n_batches=100):
    d = {}
    for c in classes:
        d[c] = 0
    n = 0
    for data, seg, labels in multi_threaded_generator(memmapGeneratorDataAugm_t1km_flair_adc_cbv(train_neg_memmap, train_pos_memmap, BATCH_SIZE, n_pos_train, n_neg_train), num_threads=2):
        for c in d.keys():
            d[c] += np.sum(seg == c)
        n += 1
        if n >= n_batches:
            break
    class_weights = np.zeros(len(classes))
    n_pixels = np.sum([d[c] for c in d.keys()])
    for c in classes:
        class_weights[c] =  float(n_pixels) /d[c]
    class_weights /= class_weights.sum()
    class_weights *= float(len(classes))
    return class_weights
all_validation_losses = []
all_validation_accuracies = []
all_training_accs = []
'''with open("../results/%s_allLossesNAccur_ep%d.pkl"% (EXPERIMENT_NAME, 0), 'r') as f:
    [all_training_losses, all_training_accs, all_validation_losses, all_validation_accuracies] = cPickle.load(f)'''

n_epochs = 10
for epoch in range(0, n_epochs):
    print "epoch: ", epoch
    train_loss = 0
    train_acc_tmp = 0
    train_loss_tmp = 0
    batch_ctr = 0
    for data, seg, labels in multi_threaded_generator(
            memmapGeneratorDataAugm_t1km_flair_adc_cbv(train_neg_memmap,
                                                       train_pos_memmap,
                                                       BATCH_SIZE, n_pos_train,
                                                       n_neg_train),
            num_threads=2):
        if batch_ctr != 0 and batch_ctr % int(
                np.floor(n_batches_per_epoch / n_feedbacks_per_epoch)) == 0:
            print "number of batches: ", batch_ctr, "/", n_batches_per_epoch
            print "training_loss since last update: ", train_loss_tmp / np.floor(
                n_batches_per_epoch / n_feedbacks_per_epoch
            ), " train accuracy: ", train_acc_tmp / np.floor(
                n_batches_per_epoch / n_feedbacks_per_epoch)
            all_training_losses.append(
                train_loss_tmp /
                np.floor(n_batches_per_epoch / n_feedbacks_per_epoch))
            all_training_accs.append(
                train_acc_tmp /
                np.floor(n_batches_per_epoch / n_feedbacks_per_epoch))
Esempio n. 4
0
all_training_losses = []
all_validation_losses = []
all_validation_accuracies = []
all_training_accs = []

'''with open("../results/%s_allLossesNAccur_ep%d.pkl"% (EXPERIMENT_NAME, 0), 'r') as f:
    [all_training_losses, all_training_accs, all_validation_losses, all_validation_accuracies] = cPickle.load(f)'''

n_epochs = 10
for epoch in range(0, n_epochs):
    print "epoch: ", epoch
    train_loss = 0
    train_acc_tmp = 0
    train_loss_tmp = 0
    batch_ctr = 0
    for data, seg, labels in multi_threaded_generator(memmapGeneratorDataAugm_t1km_flair_adc_cbv(train_neg_memmap, train_pos_memmap, BATCH_SIZE, n_pos_train, n_neg_train), num_threads=2):
        if batch_ctr != 0 and batch_ctr % int(np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)) == 0:
            print "number of batches: ", batch_ctr, "/", n_batches_per_epoch
            print "training_loss since last update: ", train_loss_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch), " train accuracy: ", train_acc_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)
            all_training_losses.append(train_loss_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch))
            all_training_accs.append(train_acc_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch))
            train_loss_tmp = 0
            train_acc_tmp = 0
            printLosses(all_training_losses, all_training_accs, all_validation_losses, all_validation_accuracies, "../results/%s.png" % EXPERIMENT_NAME, n_feedbacks_per_epoch)
        # loss, acc = train_fn(data, convert_seg_map_for_crossentropy(seg, range(4)).astype(np.float32))
        seg_flat = seg.flatten()
        loss, acc = train_fn(data, seg_flat) #class_weights[seg_flat]
        train_loss += loss
        train_loss_tmp += loss
        train_acc_tmp += acc
        batch_ctr += 1
all_training_losses = []
all_validation_losses = []
all_validation_accuracies = []
all_training_accs = []

'''with open("../results/%s_allLossesNAccur_ep%d.pkl"% (EXPERIMENT_NAME, 0), 'r') as f:
    [all_training_losses, all_training_accs, all_validation_losses, all_validation_accuracies] = cPickle.load(f)'''

n_epochs = 10
for epoch in range(0, n_epochs):
    print "epoch: ", epoch
    train_loss = 0
    train_acc_tmp = 0
    train_loss_tmp = 0
    batch_ctr = 0
    for data, seg, labels in multi_threaded_generator(memmapGeneratorDataAugm_t1km_flair_adc_cbv(train_neg_memmap, train_pos_memmap, BATCH_SIZE, n_pos_train, n_neg_train), num_threads=2):
        if batch_ctr != 0 and batch_ctr % int(np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)) == 0:
            print "number of batches: ", batch_ctr, "/", n_batches_per_epoch
            print "training_loss since last update: ", train_loss_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch), " train accuracy: ", train_acc_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch)
            all_training_losses.append(train_loss_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch))
            all_training_accs.append(train_acc_tmp/np.floor(n_batches_per_epoch/n_feedbacks_per_epoch))
            train_loss_tmp = 0
            train_acc_tmp = 0
            printLosses(all_training_losses, all_training_accs, all_validation_losses, all_validation_accuracies, "../results/%s.png" % EXPERIMENT_NAME, n_feedbacks_per_epoch)
        # loss, acc = train_fn(data, convert_seg_map_for_crossentropy(seg, range(4)).astype(np.float32))
        seg_flat = seg.flatten()
        loss, acc = train_fn(data, seg_flat) #class_weights[seg_flat]
        train_loss += loss
        train_loss_tmp += loss
        train_acc_tmp += acc
        batch_ctr += 1