コード例 #1
0
def evaluate(model_name, dataset, quadratic=False):
    
    print(f'[{time.ctime()}] Start evaluating {model_name} on {dataset}')
    
    # quadratic = 'quad' in model_name
    
    if model_name == 'sepconv':
        import utilities
        model = utilities.get_sepconv(weights='l1').cuda()
    elif model_name == 'qvi-lin' or model_name =='qvi-quad':
        from code.quadratic.interpolate import interpolate as model
    elif model_name == 'dain':
        from code.DAIN.interpolate import interpolate_efficient as model
    elif model_name == 'sepconv2':
        checkpoint= torch.load('models/checkpoint_1593886534_seed_0_optimizer=adamax_input_size=4_lr=0.001_lr2=0.0001_weights=None_kernel_size=45_loss=l1_pretrain=1_kernel_size_d=31_kernel_size_scale=4_kernel_size_qd=25_kernel_size_qd_scale=4')
        model = checkpoint['last_model'].cuda().eval()
    else:
        raise NotImplementedError()
        
    torch.manual_seed(42)
    np.random.seed(42)
    
    results = defaultdict(list)
    if dataset == 'lmd':
        ds = dataloader.large_motion_dataset2(quadratic=quadratic, fold='test', cropped=False)
    elif dataset == 'adobe240':
        ds = dataloader.adobe240_dataset(quadratic=quadratic, fold='test')
    elif dataset == 'gopro':
        ds = dataloader.gopro_dataset(quadratic=quadratic, fold='test')
    elif dataset == 'vimeo90k':
        ds = dataloader.vimeo90k_dataset(quadratic=quadratic, fold='test')
    else:
        raise NotImplementedError()

    ds = dataloader.TransformedDataset(ds, normalize=True, random_crop=False, flip_probs=0)    
    
    _, _, test = dataloader.split_data(ds, [0, 0, 1])
    
    data_loader = torch.utils.data.DataLoader(test, batch_size=1)
    with torch.no_grad():
        for X,y in tqdm(data_loader, total=len(data_loader)):
            X = X.cuda()
            y = y.cuda()
            
            y_hat = model(X).clamp(0,1)
            
            y.mul_(255)
            y_hat.mul_(255)
            
            results['psnr'].extend(metrics.psnr(y_hat, y))
            results['ie'].extend(metrics.interpolation_error(y_hat, y))
            results['ssim'].extend(metrics.ssim(y_hat, y))
    
    # store in dataframe
    results = pd.DataFrame(results)
    results['model'] = model_name
    results['dataset'] = dataset
    
    return results
コード例 #2
0
ファイル: eval_hard.py プロジェクト: rwq/MSC_AI_Thesis_FI
def evaluate(model_name):

    print(f'[{time.ctime()}] Start evaluating {model_name}')

    quadratic = 'quad' in model_name

    if model_name == 'sepconv-l1':
        import utilities
        model = utilities.get_sepconv(weights='l1').cuda()
    elif model_name == 'sepconv-lf':
        import utilities
        model = utilities.get_sepconv(weights='lf').cuda()
    elif model_name == 'qvi-lin' or model_name == 'qvi-quad':
        from code.quadratic.interpolate import interpolate as model
    elif model_name == 'dain':
        from code.DAIN.interpolate import interpolate_efficient as model
    else:
        raise NotImplementedError()

    torch.manual_seed(42)
    np.random.seed(42)

    ds = dataloader.vimeo90k_dataset(quadratic=quadratic, fold='test')
    # ds = torch.utils.data.Subset(ds, indices=hard_vimeo)
    ds = dataloader.TransformedDataset(ds,
                                       normalize=True,
                                       random_crop=False,
                                       flip_probs=0)

    _, _, test = dataloader.split_data(ds, [0, 0, 1])

    with torch.no_grad():
        for i, index in enumerate(tqdm(hard_vimeo)):
            X, _ = ds[index]
            X = X.cuda().unsqueeze(0).contiguous()

            y_hat = model(X).clamp(0, 1)

            y_hat.mul_(255)
            img = y_hat.squeeze(0).permute(
                1, 2, 0).detach().cpu().numpy().astype('uint8')

            filepath_out = os.path.join(OUTPUT_FOLDER,
                                        '{}_{}.png'.format(model_name, index))
            imageio.imwrite(filepath_out, img)
コード例 #3
0
def train(params, n_epochs, verbose=True):
    # init interpolation model
    timestamp = int(time.time())
    formatted_params = '_'.join(f'{k}={v}' for k, v in params.items())

    torch.manual_seed(FLAGS.seed)
    np.random.seed(FLAGS.seed)
    random.seed(FLAGS.seed)

    if FLAGS.filename == None:
        G = SepConvNetExtended(kl_size=params['kl_size'],
                               kq_size=params['kq_size'],
                               kl_d_size=params['kl_d_size'],
                               kl_d_scale=params['kl_d_scale'],
                               kq_d_scale=params['kq_d_scale'],
                               kq_d_size=params['kq_d_size'],
                               input_frames=params['input_size'])

        if params['pretrain'] in [1, 2]:
            print('LOADING L1')
            G.load_weights('l1')
        name = f'{timestamp}_seed_{FLAGS.seed}_{formatted_params}'
        G = torch.nn.DataParallel(G).cuda()

        # optimizer = torch.optim.Adamax(G.parameters(), lr=params['lr'], betas=(.9, .999))
        if params['optimizer'] == 'ranger':
            optimizer = Ranger([{
                'params':
                [p for l, p in G.named_parameters() if 'moduleConv' not in l]
            }, {
                'params':
                [p for l, p in G.named_parameters() if 'moduleConv' in l],
                'lr':
                params['lr2']
            }],
                               lr=params['lr'],
                               betas=(.95, .999))

        elif params['optimizer'] == 'adamax':
            optimizer = torch.optim.Adamax([{
                'params':
                [p for l, p in G.named_parameters() if 'moduleConv' not in l]
            }, {
                'params':
                [p for l, p in G.named_parameters() if 'moduleConv' in l],
                'lr':
                params['lr2']
            }],
                                           lr=params['lr'],
                                           betas=(.9, .999))

        else:
            raise NotImplementedError()

        scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
            optimizer, T_0=60 - FLAGS.warmup, T_mult=1, eta_min=1e-5)

        start_epoch = 0

    else:
        checkpoint = torch.load(FLAGS.filename)
        G = checkpoint['last_model'].cuda()
        start_epoch = checkpoint['epoch'] + 1
        name = checkpoint['name']

        optimizer = torch.optim.Adamax([{
            'params':
            [p for l, p in G.named_parameters() if 'moduleConv' not in l]
        }, {
            'params':
            [p for l, p in G.named_parameters() if 'moduleConv' in l],
            'lr':
            params['lr2']
        }],
                                       lr=params['lr'],
                                       betas=(.9, .999))

        optimizer.load_state_dict(checkpoint['optimizer'])
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                               T_max=n_epochs -
                                                               FLAGS.warmup,
                                                               eta_min=1e-5,
                                                               last_epoch=-1)

        for _ in range(start_epoch - FLAGS.warmup + 1):
            scheduler.step()

    print('SETTINGS:')
    print(params)
    print('NAME:')
    print(name)
    sys.stdout.flush()

    # loss_network = losses.LossNetwork(layers=[9,16,26]).cuda() #9, 16, 26
    # Perc_loss = losses.PerceptualLoss(loss_network, include_input=True)

    torch.manual_seed(42)
    np.random.seed(42)
    random.seed(42)

    quadratic = params['input_size'] == 4

    L1_loss = torch.nn.L1Loss().cuda()
    # Flow_loss = losses.FlowLoss(quadratic=quadratic).cuda()

    ds_train_lmd = dataloader.large_motion_dataset(quadratic=quadratic,
                                                   cropped=True,
                                                   fold='train',
                                                   min_flow=6)
    ds_valid_lmd = dataloader.large_motion_dataset(quadratic=quadratic,
                                                   cropped=True,
                                                   fold='valid')

    ds_vimeo_train = dataloader.vimeo90k_dataset(fold='train',
                                                 quadratic=quadratic)
    ds_vimeo_test = dataloader.vimeo90k_dataset(fold='test',
                                                quadratic=quadratic)

    # train, test_lmd = dataloader.split_data(ds_lmd, [.9, .1])
    train_vimeo, valid_vimeo = dataloader.split_data(ds_vimeo_train, [.9, .1])

    # torch.manual_seed(FLAGS.seed)
    # np.random.seed(FLAGS.seed)
    # random.seed(FLAGS.seed)

    train_settings = {
        'flip_probs': FLAGS.flip_probs,
        'normalize': True,
        'crop_size': (FLAGS.crop_size, FLAGS.crop_size),
        'jitter_prob': FLAGS.jitter_prob,
        'random_rescale_prob': FLAGS.random_rescale_prob
        # 'rescale_distr':(.8, 1.2),
    }

    valid_settings = {
        'flip_probs': 0,
        'random_rescale_prob': 0,
        'random_crop': False,
        'normalize': True
    }

    train_lmd = dataloader.TransformedDataset(ds_train_lmd, **train_settings)
    valid_lmd = dataloader.TransformedDataset(ds_valid_lmd, **valid_settings)

    train_vimeo = dataloader.TransformedDataset(train_vimeo, **train_settings)
    valid_vimeo = dataloader.TransformedDataset(valid_vimeo, **valid_settings)
    test_vimeo = dataloader.TransformedDataset(ds_vimeo_test, **valid_settings)

    train_data = torch.utils.data.ConcatDataset([train_lmd, train_vimeo])

    # displacement
    df = pd.read_csv(f'hardinstancesinfo/vimeo90k_test_flow.csv')
    test_disp = torch.utils.data.Subset(
        ds_vimeo_test,
        indices=df[df.mean_manh_flow >= df.quantile(.9).mean_manh_flow].index.
        tolist())
    test_disp = dataloader.TransformedDataset(test_disp, **valid_settings)
    test_disp = torch.utils.data.DataLoader(test_disp,
                                            batch_size=4,
                                            pin_memory=True)

    # nonlinearity
    df = pd.read_csv(f'hardinstancesinfo/Vimeo90K_test.csv')
    test_nonlin = torch.utils.data.Subset(
        ds_vimeo_test,
        indices=df[
            df.non_linearity >= df.quantile(.9).non_linearity].index.tolist())
    test_nonlin = dataloader.TransformedDataset(test_nonlin, **valid_settings)
    test_nonlin = torch.utils.data.DataLoader(test_nonlin,
                                              batch_size=4,
                                              pin_memory=True)

    # create weights for train sampler
    df_vim = pd.read_csv(f'hardinstancesinfo/vimeo90k_train_flow.csv')
    weights_vim = df_vim[df_vim.index.isin(
        train_vimeo.dataset.indices)].mean_manh_flow.tolist()
    weights_lmd = ds_train_lmd.weights
    train_sampler = torch.utils.data.sampler.WeightedRandomSampler(
        weights_lmd + weights_vim, FLAGS.num_train_samples, replacement=False)

    train_dl = torch.utils.data.DataLoader(train_data,
                                           batch_size=FLAGS.batch_size,
                                           pin_memory=True,
                                           shuffle=False,
                                           sampler=train_sampler,
                                           num_workers=FLAGS.num_workers)
    valid_dl_vim = torch.utils.data.DataLoader(valid_vimeo,
                                               batch_size=4,
                                               pin_memory=True,
                                               num_workers=FLAGS.num_workers)
    valid_dl_lmd = torch.utils.data.DataLoader(valid_lmd,
                                               batch_size=4,
                                               pin_memory=True,
                                               num_workers=FLAGS.num_workers)
    test_dl_vim = torch.utils.data.DataLoader(test_vimeo,
                                              batch_size=4,
                                              pin_memory=True,
                                              num_workers=FLAGS.num_workers)

    # metrics
    writer = SummaryWriter(f'runs/final_exp/full_run_losses/{name}')

    results = ResultStore(writer=writer,
                          metrics=['psnr', 'ssim', 'ie', 'L1_loss', 'lf'],
                          folds=FOLDS)

    early_stopping_metric = 'L1_loss'
    early_stopping = EarlyStopping(results,
                                   patience=FLAGS.patience,
                                   metric=early_stopping_metric,
                                   fold='valid_vimeo')

    loss_network = losses.LossNetwork(layers=[26]).cuda()  #9, 16, 26
    Perc_loss = losses.PerceptualLoss(loss_network).cuda()

    def do_epoch(dataloader, fold, epoch, train=False):
        assert fold in FOLDS

        if verbose:
            pb = tqdm(desc=f'{fold} {epoch+1}/{n_epochs}',
                      total=len(dataloader),
                      leave=True,
                      position=0)

        for i, (X, y) in enumerate(dataloader):
            X = X.cuda()
            y = y.cuda()

            y_hat = G(X)

            l1_loss = L1_loss(y_hat, y)
            feature_loss = Perc_loss(y_hat, y)

            lf_loss = l1_loss + feature_loss

            if train:
                optimizer.zero_grad()
                lf_loss.backward()
                optimizer.step()

            # compute metrics
            y_hat = (y_hat * 255).clamp(0, 255)
            y = (y * 255).clamp(0, 255)

            psnr = metrics.psnr(y_hat, y)
            ssim = metrics.ssim(y_hat, y)
            ie = metrics.interpolation_error(y_hat, y)

            results.store(
                fold, epoch, {
                    'L1_loss': l1_loss.item(),
                    'psnr': psnr,
                    'ssim': ssim,
                    'ie': ie,
                    'lf': lf_loss.item()
                })

            if verbose: pb.update()

        # update tensorboard
        results.write_tensorboard(fold, epoch)
        sys.stdout.flush()

    start_time = time.time()
    for epoch in range(start_epoch, n_epochs):

        G.train()
        do_epoch(train_dl, 'train_fold', epoch, train=True)

        if epoch >= FLAGS.warmup - 1:
            scheduler.step()

        G.eval()
        with torch.no_grad():
            do_epoch(valid_dl_vim, 'valid_vimeo', epoch)
            do_epoch(valid_dl_lmd, 'valid_lmd', epoch)

        if (early_stopping.stop() and epoch >= FLAGS.min_epochs
            ) or epoch % FLAGS.test_every == 0 or epoch + 1 == n_epochs:
            with torch.no_grad():
                do_epoch(test_disp, 'test_disp', epoch)
                do_epoch(test_nonlin, 'test_nonlin', epoch)

                do_epoch(test_dl_vim, 'test_vimeo', epoch)

            visual_evaluation(model=G,
                              quadratic=params['input_size'] == 4,
                              writer=writer,
                              epoch=epoch)

            visual_evaluation_vimeo(model=G,
                                    quadratic=params['input_size'] == 4,
                                    writer=writer,
                                    epoch=epoch)

        # save model if new best
        if early_stopping.new_best():
            filepath_out = os.path.join(MODEL_FOLDER, '{0}_{1}')
            torch.save(G, filepath_out.format('generator', name))

        # save last model state
        checkpoint = {
            'last_model': G,
            'epoch': epoch,
            'optimizer': optimizer.state_dict(),
            'name': name,
            'scheduler': scheduler
        }
        torch.save(checkpoint, filepath_out.format('checkpoint', name))

        if early_stopping.stop() and epoch >= FLAGS.min_epochs:
            break

        torch.cuda.empty_cache()

    end_time = time.time()
    # free memory
    del G
    torch.cuda.empty_cache()
    time_elapsed = end_time - start_time
    print(f'Ran {n_epochs} epochs in {round(time_elapsed, 1)} seconds')

    return results
コード例 #4
0
ファイル: run.py プロジェクト: shin285/kakao_arena
import dataloader
from categoryclassifier import CategoryClassifier

training_data, validation_data = dataloader.split_data('D:\data\kakao_arena')

bcateid, mcateid, scateid, dcateid, model, brand, maker = zip(*training_data)

v_bcateid, v_mcateid, v_scateid, v_dcateid, v_model, v_brand, v_maker = zip(*validation_data)

classifier = CategoryClassifier()
classifier.training(model, bcateid, v_model, v_bcateid)
コード例 #5
0
            output = net(features)
            # Binarize the output
            pred = output.apply_(lambda x: 0.0 if x < 0.5 else 1.0)
            test_loss += criterion(output, target)  # sum up batch loss
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)
    print('\nTest set:\n\tAverage loss: {:.4f}'.format(test_loss))
    print('\tAccuracy: {}/{} ({:.0f}%)\n'.format(
        correct, (len(test_loader) * test_loader.batch_size),
        100. * correct / (len(test_loader) * test_loader.batch_size)))


dataset = 'breast-cancer'
data = samplingloader(dataset, 'arff')
train_data, test_data, features_num = split_data(data)
torch.manual_seed(42)
net = Net(features_num).double()
learning_rate = 0.01
optimizer = torch.optim.SGD(net.parameters(),
                            lr=learning_rate,
                            momentum=0.5,
                            nesterov=False)
epochs = 10

print("Training Start")
for epoch in range(1, epochs + 1):
    train(net, train_data, optimizer, epoch)
    test(net, test_data)

torch.save(net.state_dict(), 'models/' + dataset + '_without_sampling')