예제 #1
0
    if name in ['new_output', 'fc6', 'fc7']:
        layer_lr = learning_rate
    else:
        layer_lr = learning_rate / 10
    if name != 'fc8':
        layer_updates = lasagne.updates.nesterov_momentum(
            loss, layer_params, learning_rate=layer_lr, momentum=0.9)
        updates.update(layer_updates)

# Compile functions for training, validation and prediction
train_fn = theano.function([X_sym, y_sym], [loss, acc], updates=updates)
val_fn = theano.function([X_sym, y_sym], [loss, acc])
pred_fn = theano.function([X_sym], prediction)

X_train, y_train, ids_train = load_train_data()
X_test, ids_test = load_test_data(img_start_ix=0, max_img=1000)
y_pseudo = load_pseudo_labels()
pos = np.array(range(4777))
X, y, ids = np.concatenate((X_train, X_test)), np.concatenate(
    (y_train, y_pseudo)), np.concatenate((ids_train, ids_test))
X, y, ids, pos = sklearn.utils.shuffle(X, y, ids, pos, random_state=0)

new_pseudo_label_pos = []
for i in range(3777, 4777):
    k = np.where(pos == i)
    new_pseudo_label_pos.append(int(k[0]))


# generator splitting an iterable into chunks of maximum length N
def batches(iterable, N):
    chunk = []
예제 #2
0
            round(tr_acc_tot * 100, 3),
            round(val_loss_tot, 3),
            round(val_acc_tot * 100, 3)
        ]
        print('\t'.join(map(str, res)))
        #print(epoch, val_loss_tot, val_acc_tot * 100)

    # if loss is too high, convergence failure so don't bother to make predictions
    if val_loss_tot > 1.0:
        continue

    print("Predicting with test images...")
    imgs_per_batch = 100
    for j in range(0, 79726, imgs_per_batch):
        X_sub_part, sub_ids_part = load_test_data('../input',
                                                  img_shape=IMG_SHAPE,
                                                  img_start_ix=j,
                                                  max_img=imgs_per_batch)
        y_proba_part = pred_fn(X_sub_part)
        if j == 0:
            y_proba = y_proba_part
            ids = sub_ids_part
        else:
            y_proba = np.append(y_proba, y_proba_part, axis=0)
            ids = np.append(ids, sub_ids_part, axis=0)
        if j % 1000 == 0:
            print(j)

    make_submission('../output/submission_vgg16_' + str(i) + '.csv', y_proba,
                    ids)
예제 #3
0
        res = [
            epoch,
            round(tr_loss_tot, 3),
            round(tr_acc_tot * 100, 3),
            round(val_loss_tot, 3),
            round(val_acc_tot * 100, 3)
        ]
        print('\t'.join(map(str, res)))
        #print(epoch, val_loss_tot, val_acc_tot * 100)

    # if loss is too high, convergence failure so don't bother to make predictions
    if val_loss_tot > 1.0:
        continue

    print("Predicting with test images...")
    imgs_per_batch = 99
    for j in range(0, 1001, imgs_per_batch):
        X_sub_part, sub_ids_part = load_test_data(j, imgs_per_batch)
        y_proba_part = pred_fn(X_sub_part)
        if j == 0:
            y_proba = y_proba_part
            ids = sub_ids_part
        else:
            y_proba = np.append(y_proba, y_proba_part, axis=0)
            ids = np.append(ids, sub_ids_part, axis=0)
        if j % 99 == 0:
            print(j)

    make_submission('../output/submission_ggnet_' + str(i) + '.csv', y_proba,
                    ids)