예제 #1
0
                    '_' + str(tta) + '_' + str(ms) + '.pkl')):
            d = pickle.load(open(pkl_name, 'rb'))
            pred = d['pred']
        else:
            test_dataloader = data.DataLoader(
                test_dataset,
                batch_size=args.batch_size,
                num_workers=args.num_workers,
                pin_memory=True,
                collate_fn=tta_collate if args.use_tta else default_collate,
                shuffle=False)
            model = BaseModel(args)
            model.init_model()
            model.load_trained_model()

            pred = model.eval(test_dataloader)  # after softmax array
            pickle.dump({'pred': pred}, open(pkl_name, 'wb'))

        preds_test += pred

    preds_test /= len(args.ensemble_exp)
    # generate csv
    print('generate csv ...')
    for t in [0.29, 0.30, 0.31, 0.32, 0.33, 0.4, 0.45]:
        make_submission((preds_test > t).astype(np.uint8),
                        test['names'],
                        path='{}_{:.2f}_submission.csv'.format(csv_name, t))
    print('generate {}_submission.csv'.format(args.exp_name))

    if args.vis:
        print('vis mask ...')
예제 #2
0
    val = pickle.load(open(os.path.join(args.data_root, 'val.pkl'), 'rb'))
    image_root = os.path.join(args.data_root, 'train', 'images')

    val_dataset = SaltSet(val, image_root, BaseTransform(args.size, MEAN, STD))

    val_dataloader = data.DataLoader(val_dataset,
                                     batch_size=args.batch_size,
                                     num_workers=args.num_workers,
                                     pin_memory=True,
                                     shuffle=False)

    model = BaseModel(args)
    model.init_model()
    model.load_trained_model()

    preds_test = model.eval(val_dataloader)

    pred_dict = {
        idx: RLenc(
            np.round(
                downsample(preds_test[i].squeeze(), img_size_ori,
                           img_size_target) > 0.5))
        for i, idx in enumerate(val['names'])
    }

    sub = pd.DataFrame.from_dict(pred_dict, orient='index')
    sub.index.names = ['id']
    sub.columns = ['rle_mask']
    sub.to_csv('submission.csv')
예제 #3
0
        shuffle=False)

    pkl_name = os.path.join(
        ensemble_dict, args.exp_name + '_' + args.trained_model + '_' +
        str(args.use_tta) + '_' + str(args.ms) + '.pkl')
    print(pkl_name)

    if os.path.exists(pkl_name):
        d = pickle.load(open(pkl_name, 'rb'))
        preds_test = d['pred']
    else:
        model = BaseModel(args)
        model.init_model()
        model.load_trained_model()

        preds_test = model.eval(test_dataloader)  # (bs,101, 101)
        pickle.dump({'pred': preds_test}, open(pkl_name, 'wb'))

    # generate csv
    print('generate csv ...')
    for t in [
            0.14,
    ]:
        make_submission(
            (preds_test > t).astype(np.uint8),
            test['names'],
            path='{}_{:.2f}_submission.csv'.format(args.exp_name, t))
    # pred_dict = {idx: RLenc(np.round(preds_test[i] > 0.5)) for
    #              i, idx in tqdm(enumerate(test['names']))}
    #
    # sub = pd.DataFrame.from_dict(pred_dict, orient='index')
}

# --------------------train and val-------------------

best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0

print("Start training:")
print("Detailed training log path: {}".format("{}log.log".format(work_dir)))

for epoch in range(num_epochs):
    epoch_index = "{}/{}".format(epoch + 1, num_epochs)
    train_process(dataLoader_train, epoch_index, params)
    val_acc = val_process(dataLoader_val, epoch_index, params)
    exp_lr_scheduler.step()
    if val_acc > best_acc:
        best_acc = val_acc
        best_model_wts = copy.deepcopy(model.state_dict())
    torch.save(model.state_dict(), "{}epoch-{}.pkl".format(work_dir, epoch))

model.load_state_dict(best_model_wts)
torch.save(model.state_dict(), "{}best-val.pkl".format(work_dir))

# --------------------inference-------------------

model.load_state_dict(torch.load("{}best-val.pkl".format(work_dir), map_location=lambda storage, loc: storage))
model.eval()

print("Start testing:")
predict(test_data_path, work_dir, params)