res = [
            epoch,
            round(tr_loss_tot, 3),
            round(tr_acc_tot * 100, 3),
            round(val_loss_tot, 3),
            round(val_acc_tot * 100, 3)
        ]
        print('\t'.join(map(str, res)))
        #print(epoch, val_loss_tot, val_acc_tot * 100)

    # if loss is too high, convergence failure so don't bother to make predictions
    if val_loss_tot > 1.0:
        continue

    print("Predicting with test images...")
    imgs_per_batch = 99
    for j in range(0, 1001, imgs_per_batch):
        X_sub_part, sub_ids_part = load_test_data(j, imgs_per_batch)
        y_proba_part = pred_fn(X_sub_part)
        if j == 0:
            y_proba = y_proba_part
            ids = sub_ids_part
        else:
            y_proba = np.append(y_proba, y_proba_part, axis=0)
            ids = np.append(ids, sub_ids_part, axis=0)
        if j % 99 == 0:
            print(j)

    make_submission('../output/submission_vgs_' + str(i) + '.csv', y_proba,
                    ids)
        res = [
            epoch,
            round(tr_loss_tot, 3),
            round(tr_acc_tot * 100, 3),
            round(val_loss_tot, 3),
            round(val_acc_tot * 100, 3)
        ]
        print('\t'.join(map(str, res)))
        #print(epoch, val_loss_tot, val_acc_tot * 100)

    # if loss is too high, convergence failure so don't bother to make predictions
    if val_loss_tot > 1.0:
        continue

    print("Predicting with test images...")
    imgs_per_batch = 99
    for j in range(0, 1001, imgs_per_batch):
        X_sub_part, sub_ids_part = load_test_data(j, imgs_per_batch)
        y_proba_part = pred_fn(X_sub_part)
        if j == 0:
            y_proba = y_proba_part
            ids = sub_ids_part
        else:
            y_proba = np.append(y_proba, y_proba_part, axis=0)
            ids = np.append(ids, sub_ids_part, axis=0)
        if j % 99 == 0:
            print(j)

    make_submission('../output/submission_vgg16_zoomed' + str(i) + '.csv',
                    y_proba, ids)
Example #3
0
        tr_loss_tot /= len(t_ix[:2000])
        tr_acc_tot /= len(t_ix[:2000])

        val_loss_tot /= len(v_ix)
        val_acc_tot /= len(v_ix)

        res = [epoch, round(tr_loss_tot,3), round(tr_acc_tot*100,3), round(val_loss_tot,3), round(val_acc_tot * 100,3)]
        print('\t'.join(map(str,res)))
        #print(epoch, val_loss_tot, val_acc_tot * 100)

    # if loss is too high, convergence failure so don't bother to make predictions
    if val_loss_tot > 1.0:
        continue

    print("Predicting with test images...")
    imgs_per_batch = 99
    for j in range(0, 1001, imgs_per_batch):
        X_sub_part, sub_ids_part = load_test_data(j, imgs_per_batch)
        y_proba_part = pred_fn(X_sub_part)
        if j==0:
            y_proba = y_proba_part
            ids = sub_ids_part
        else:
            y_proba = np.append(y_proba, y_proba_part, axis=0)
            ids = np.append(ids, sub_ids_part, axis=0)
        if j%99==0:
            print(j)

    make_submission('../output/submission_resnet50_' + str(i) + '.csv', y_proba, ids)