Esempio n. 1
0
def test(args):

    # Setup Dataloader
    data_json = json.load(open('config.json'))
    data_path = data_json[args.dataset]['data_path']

    t_loader = SaltLoader(data_path, split="test")
    test_df=t_loader.test_df
    test_loader = data.DataLoader(t_loader, batch_size=args.batch_size, num_workers=8)

    # load Model
    if args.arch=='unet':
        model = Unet(start_fm=16)
    else:
        model=Unet_upsample(start_fm=16)
    model_path = data_json[args.model]['model_path']
    model.load_state_dict(torch.load(model_path)['model_state'])
    model.cuda()
    total = sum([param.nelement() for param in model.parameters()])
    print('Number of params: %.2fM' % (total / 1e6))

    #test
    pred_list=[]
    for images in test_loader:
        images = Variable(images.cuda())
        y_preds = model(images)
        y_preds_shaped = y_preds.reshape(-1,  args.img_size_target, args.img_size_target)
        for idx in range(args.batch_size):
            y_pred = y_preds_shaped[idx]
            pred = torch.sigmoid(y_pred)
            pred = pred.cpu().data.numpy()
            pred_ori = resize(pred, (args.img_size_ori, args.img_size_ori), mode='constant', preserve_range=True)
            pred_list.append(pred_ori)

    #submit the test image predictions.
    threshold_best=args.threshold
    pred_dict = {idx: RLenc(np.round(pred_list[i] > threshold_best)) for i, idx in
                 enumerate(tqdm_notebook(test_df.index.values))}
    sub = pd.DataFrame.from_dict(pred_dict, orient='index')
    sub.index.names = ['id']
    sub.columns = ['rle_mask']
    sub.to_csv('./results/{}_submission.csv'.format(args.model))
    print("The submission.csv saved in ./results")
Esempio n. 2
0
def test(args):

    # Setup Data
    data_json = json.load(open('config.json'))
    x = Variable(torch.randn(32, 1, 128, 128))
    x = x.cuda()

    # load Model
    if args.arch == 'unet':
        model = Unet(start_fm=16)
    else:
        model = Unet_upsample(start_fm=16)
    model_path = data_json[args.model]['model_path']
    model.load_state_dict(torch.load(model_path)['model_state'])
    model.cuda()
    total = sum([param.nelement() for param in model.parameters()])
    print('Number of params: %.2fM' % (total / 1e6))

    #visualize
    y = model(x)
    g = make_dot(y)
    g.render('k')
Esempio n. 3
0
def main():

    videos, audios = get_data("v1", 1)
    print("Data Loaded")
    # if os.path.exists('./Unet.pt'):
    #    unet = torch.load('./Unet.pt')
      #  frame_discriminator = torch.load()
    unet = Unet(debug=False)
    frame_discriminator = FrameDiscriminator()
    sequence_discriminator = SequenceDiscriminator()
    if cuda:
        print('SAHI JA RHA.......')
        unet = unet.cuda()
        frame_discriminator = frame_discriminator.cuda()
        sequence_discriminator = sequence_discriminator.cuda()
    # if torch.cuda.device_count() > 1:
        # print("Using ", torch.cuda.device_count(), " GPUs!")
        # unet = nn.DataParallel(unet)
        # frame_discriminator = nn.DataParallel(frame_discriminator)
        # sequence_discriminator = nn.DataParallel(sequence_discriminator)
    train(audios, videos, unet, frame_discriminator, sequence_discriminator)
Esempio n. 4
0
def validate(args):

    # Setup Dataloader
    data_json = json.load(open('config.json'))
    data_path = data_json[args.dataset]['data_path']

    v_loader = SaltLoader(data_path, split='val')
    train_df = v_loader.train_df

    val_loader = data.DataLoader(v_loader,
                                 batch_size=args.batch_size,
                                 num_workers=8)

    # load Model
    if args.arch == 'unet':
        model = Unet(start_fm=16)
    else:
        model = Unet_upsample(start_fm=16)
    model_path = data_json[args.model]['model_path']
    model.load_state_dict(torch.load(model_path)['model_state'])
    model.cuda()
    total = sum([param.nelement() for param in model.parameters()])
    print('Number of params: %.2fM' % (total / 1e6))

    #validate
    pred_list = []
    for images, masks in val_loader:
        images = Variable(images.cuda())
        y_preds = model(images)
        # print(y_preds.shape)
        y_preds_shaped = y_preds.reshape(-1, args.img_size_target,
                                         args.img_size_target)
        for idx in range(args.batch_size):
            y_pred = y_preds_shaped[idx]
            pred = torch.sigmoid(y_pred)
            pred = pred.cpu().data.numpy()
            pred_ori = resize(pred, (args.img_size_ori, args.img_size_ori),
                              mode='constant',
                              preserve_range=True)
            pred_list.append(pred_ori)

    preds_valid = np.array(pred_list)
    y_valid_ori = np.array(
        [train_df.loc[idx].masks for idx in v_loader.ids_valid])

    #jaccard score
    accuracies_best = 0.0
    for threshold in np.linspace(0, 1, 11):
        ious = []
        for y_pred, mask in zip(preds_valid, y_valid_ori):
            prediction = (y_pred > threshold).astype(int)
            iou = jaccard_similarity_score(mask.flatten(),
                                           prediction.flatten())
            ious.append(iou)

        accuracies = [
            np.mean(ious > iou_threshold)
            for iou_threshold in np.linspace(0.5, 0.95, 10)
        ]
        if accuracies_best < np.mean(accuracies):
            accuracies_best = np.mean(accuracies)
            threshold_best = threshold
        print('Threshold: %.1f, Metric: %.3f' %
              (threshold, np.mean(accuracies)))
    print("jaccard score gets threshold_best=", threshold_best)

    #other score way
    thresholds = np.linspace(0, 1, 50)
    ious = np.array([
        iou_metric_batch(y_valid_ori, np.int32(preds_valid > threshold))
        for threshold in tqdm_notebook(thresholds)
    ])
    #don't understand
    threshold_best_index = np.argmax(ious[9:-10]) + 9
    iou_best = ious[threshold_best_index]
    threshold_best = thresholds[threshold_best_index]
    print("other way gets iou_best=", iou_best, "threshold_best=",
          threshold_best)
Esempio n. 5
0
def train(args):

    # Setup Dataloader
    data_json = json.load(open('config.json'))
    data_path=data_json[args.dataset]['data_path']
    t_loader = SaltLoader(data_path, img_size_ori=args.img_size_ori,img_size_target=args.img_size_target)
    v_loader = SaltLoader(data_path, split='val',img_size_ori=args.img_size_ori, img_size_target=args.img_size_target)

    train_loader = data.DataLoader(t_loader, batch_size=args.batch_size, num_workers=8, shuffle=True)
    val_loader = data.DataLoader(v_loader, batch_size=args.batch_size, num_workers=8)

    # Setup Model
    if args.arch=='unet':
        model = Unet(start_fm=16)
    else:
        model=Unet_upsample(start_fm=16)
    print(model)
    total = sum([param.nelement() for param in model.parameters()])
    print('Number of params: %.2fM' % (total / 1e6))

    model.cuda()

    # Check if model has custom optimizer / loss
    optimizer = torch.optim.Adam(model.parameters(), lr=args.l_rate)
    loss_fn= nn.BCEWithLogitsLoss()

    best_loss=100
    mean_train_losses = []
    mean_val_losses = []
    for epoch in range(args.n_epoch):
        train_losses = []
        val_losses = []
        for images, masks in train_loader:
            images = Variable(images.cuda())
            masks = Variable(masks.cuda())

            outputs = model(images)

            loss = loss_fn(outputs, masks)
            train_losses.append(loss.data)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        for images, masks in val_loader:
            images = Variable(images.cuda())
            masks = Variable(masks.cuda())

            outputs = model(images)
            loss = loss_fn(outputs, masks)
            val_losses.append(loss.data)

        mean_train_losses.append(np.mean(train_losses))
        mean_val_losses.append(np.mean(val_losses))
        if np.mean(val_losses) < best_loss:
            best_loss = np.mean(val_losses)
            state = {'epoch': epoch + 1,
                     'model_state': model.state_dict(),
                     'optimizer_state': optimizer.state_dict(), }
            torch.save(state, "./saved_models/{}_{}_best_model.pkl".format(args.arch, args.dataset))

        # Print Loss
        print('Epoch: {}. Train Loss: {}. Val Loss: {}'.format(epoch + 1, np.mean(train_losses), np.mean(val_losses)))

    state = {'model_state': model.state_dict(),
             'optimizer_state': optimizer.state_dict(), }
    torch.save(state, "./saved_models/{}_{}_final_model.pkl".format(args.arch, args.dataset))

    print("saved two models in ./saved_models")