示例#1
0
    update=updates.nesterov_momentum,
    update_momentum=0.9,
    update_learning_rate=theano.shared(floatX(0.001)),
    batch_iterator_train=batch_iterator_train,
    batch_iterator_test=batch_iterator_test,
    verbose=1,
    train_split=train_split,
    max_epochs=4,
)

# load pretrained model
with open('../input/pretrained/vgg16.pkl', 'rb') as f:
    params = pickle.load(f)

# replace last 2 param layers ((4096,1000)) and (1000,) with ((4096,10)) and (10,)
params['param values'][30] = params['param values'][30][:, :10]
params['param values'][31] = params['param values'][31][:10]

net.initialize_layers()
layers.set_all_param_values(net.layers_.values(), params['param values'])

print("Training neural network...")
net.fit(X, y)

del X
X_test, ids = load_test_data(path, grayscale=False, img_shape=IMG_SHAPE)

print("Predicting on test data...")
y_proba = net.predict_proba(X_test)
make_submission('../output/submission_01.csv', y_proba, ids)
示例#2
0
        res = [
            epoch,
            round(tr_loss_tot, 3),
            round(tr_acc_tot * 100, 3),
            round(val_loss_tot, 3),
            round(val_acc_tot * 100, 3)
        ]
        print('\t'.join(map(str, res)))
        #print(epoch, val_loss_tot, val_acc_tot * 100)

    # if loss is too high, convergence failure so don't bother to make predictions
    if val_loss_tot > 1.0:
        continue

    print("Predicting with test images...")
    imgs_per_batch = 99
    for j in range(0, 1001, imgs_per_batch):
        X_sub_part, sub_ids_part = load_test_data(j, imgs_per_batch)
        y_proba_part = pred_fn(X_sub_part)
        if j == 0:
            y_proba = y_proba_part
            ids = sub_ids_part
        else:
            y_proba = np.append(y_proba, y_proba_part, axis=0)
            ids = np.append(ids, sub_ids_part, axis=0)
        if j % 99 == 0:
            print(j)

    make_submission('../output/submission_vgg19_' + str(i) + '.csv', y_proba,
                    ids)
示例#3
0
        res = [
            epoch,
            round(tr_loss_tot, 3),
            round(tr_acc_tot * 100, 3),
            round(val_loss_tot, 3),
            round(val_acc_tot * 100, 3)
        ]
        print('\t'.join(map(str, res)))
        #print(epoch, val_loss_tot, val_acc_tot * 100)

    # if loss is too high, convergence failure so don't bother to make predictions
    if val_loss_tot > 1.0:
        continue

    print("Predicting with test images...")
    imgs_per_batch = 99
    for j in range(0, 1001, imgs_per_batch):
        X_sub_part, sub_ids_part = load_test_data(j, imgs_per_batch)
        y_proba_part = pred_fn(X_sub_part)
        if j == 0:
            y_proba = y_proba_part
            ids = sub_ids_part
        else:
            y_proba = np.append(y_proba, y_proba_part, axis=0)
            ids = np.append(ids, sub_ids_part, axis=0)
        if j % 99 == 0:
            print(j)

    make_submission('../output/submission_resnet50_' + str(i) + '.csv',
                    y_proba, ids)