Exemple #1
0
        test_dataset = KTH(args.test_data_dir,
                           seq_len=args.short_len + args.out_len,
                           train=False)
        testloader = DataLoader(test_dataset,
                                batch_size=args.batch_size,
                                shuffle=False,
                                num_workers=args.workers,
                                drop_last=False)

    clips = testloader.sampler.data_source.clips
    lpips_dist = lpips.LPIPS(net='alex').to(device)
    valid_mse, valid_psnr, valid_ssim, valid_lpips = AverageMeter(
    ), AverageMeter(), AverageMeter(), AverageMeter()

    print('Start testing...')
    pred_model.eval()
    with torch.no_grad():
        for test_i, test_data in enumerate(testloader):
            # define data indexes
            short_data_start, short_data_end = 0, args.short_len
            out_gt_start, out_gt_end = short_data_end, short_data_end + args.out_len

            # obtain input data and output gt
            test_data = torch.stack(test_data).to(device)
            test_data = test_data.transpose(dim0=0, dim1=1)
            short_data = test_data[:, short_data_start:short_data_end, :, :, :]
            out_gt = test_data[:, out_gt_start:out_gt_end, :, :, :]

            # frame prediction
            out_pred = pred_model(short_data, None, args.out_len, phase=2)
            out_pred = torch.clamp(out_pred, min=0, max=1)
Exemple #2
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(default_params,
                  exp=exp,
                  frame_sizes=frame_sizes,
                  dataset=dataset,
                  **params)

    os.environ['CUDA_VISIBLE_DEVICES'] = params['gpu']

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'],
                      dropout=params['dropout'])
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(
        torch.optim.Adam(predictor.parameters(), lr=params['lr']))

    data_loader = make_data_loader(model.lookback, params)
    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']

    criterion = sequence_nll_loss_bits

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path, params)
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        start_epoch = int(epoch)
        global_step = iteration
        start_epoch = iteration
        predictor.load_state_dict(state_dict)
    else:
        start_epoch = 0
        global_step = 0

    #writer = SummaryWriter("runs/{}-{}".format(params['dataset'], str(datetime.datetime.now()).split('.')[0].replace(' ', '-')))
    writer = SummaryWriter(
        os.path.join(
            results_path, "{}-{}".format(
                params['dataset'],
                str(datetime.datetime.now()).split('.')[0].replace(' ', '-'))))
    dataset_train = data_loader(0, val_split, eval=False)
    dataset_val = data_loader(val_split, test_split, eval=True)
    dataset_test = data_loader(test_split, 1, eval=True)

    generator = Generator(predictor.model, params['cuda'])
    best_val_loss = 10000000000000

    for e in range(start_epoch, int(params['epoch_limit'])):
        for i, data in enumerate(dataset_train):

            batch_inputs = data[:-1]
            batch_target = data[-1]

            def wrap(input):
                if torch.is_tensor(input):
                    input = torch.autograd.Variable(input)
                    if params['cuda']:
                        input = input.cuda()
                return input

            batch_inputs = list(map(wrap, batch_inputs))

            batch_target = torch.autograd.Variable(batch_target)
            if params['cuda']:
                batch_target = batch_target.cuda()

            plugin_data = [None, None]

            def closure():
                batch_output = predictor(*batch_inputs)

                loss = criterion(batch_output, batch_target)
                loss.backward()

                if plugin_data[0] is None:
                    plugin_data[0] = batch_output.data
                    plugin_data[1] = loss.data

                return loss

            optimizer.zero_grad()
            optimizer.step(closure)
            train_loss = plugin_data[1]

            # stats: iteration
            writer.add_scalar('train/train loss', train_loss, global_step)
            print("E:{:03d}-S{:05d}: Loss={}".format(e, i, train_loss))
            global_step += 1

        # validation: per epoch
        predictor.eval()
        with torch.no_grad():
            loss_sum = 0
            n_examples = 0
            for data in dataset_val:
                batch_inputs = data[:-1]
                batch_target = data[-1]
                batch_size = batch_target.size()[0]

                def wrap(input):
                    if torch.is_tensor(input):
                        input = torch.autograd.Variable(input)
                        if params['cuda']:
                            input = input.cuda()
                    return input

                batch_inputs = list(map(wrap, batch_inputs))

                batch_target = torch.autograd.Variable(batch_target)
                if params['cuda']:
                    batch_target = batch_target.cuda()

                batch_output = predictor(*batch_inputs)

                loss_sum += criterion(batch_output,
                                      batch_target).item() * batch_size

                n_examples += batch_size

            val_loss = loss_sum / n_examples
            writer.add_scalar('validation/validation loss', val_loss,
                              global_step)
            print("== Validation Step E:{:03d}: Loss={} ==".format(
                e, val_loss))

        predictor.train()

        # saver: epoch
        last_pattern = 'ep{}-it{}'
        best_pattern = 'best-ep{}-it{}'
        if not params['keep_old_checkpoints']:
            pattern = os.path.join(checkpoints_path,
                                   last_pattern.format('*', '*'))
            for file_name in glob(pattern):
                os.remove(file_name)
        torch.save(
            predictor.state_dict(),
            os.path.join(checkpoints_path, last_pattern.format(e,
                                                               global_step)))

        cur_val_loss = val_loss
        if cur_val_loss < best_val_loss:
            pattern = os.path.join(checkpoints_path,
                                   last_pattern.format('*', '*'))
            for file_name in glob(pattern):
                os.remove(file_name)
            torch.save(
                predictor.state_dict(),
                os.path.join(checkpoints_path,
                             best_pattern.format(e, global_step)))
            best_val_loss = cur_val_loss

        generate_sample(generator, params, writer, global_step, results_path,
                        e)

    # generate final results
    generate_sample(generator, params, None, global_step, results_path, 0)
Exemple #3
0
        h0 = torch.zeros(num_of_layers * num_of_directions, id_block.shape[0],
                         lstm_dim)
        c0 = torch.zeros(num_of_layers * num_of_directions, id_block.shape[0],
                         lstm_dim)

        batch_input, batch_len, batch_label = make_batch(
            _data['tr'], _label['tr'], id_block)
        output = predictor(batch_input, batch_len, h0, c0)
        loss = criterion(output, batch_label)
        running_loss += loss.item() * batch_input.size(0)
        loss.backward()
        _ = torch.nn.utils.clip_grad_norm_(predictor.parameters(), clip)
        optimizer.step()
    running_loss = running_loss / _data['tr'].shape[0]
    predictor.eval()
    dev_acc = eval(predictor, _data['dev'], _label['dev'])
    acc_1 = eval(predictor, _data['te1'], _label['te1'])
    acc_2 = eval(predictor, _data['te2'], _label['te2'])
    acc_3 = eval(predictor, _data['te3'], _label['te3'])
    if dev_acc > best_dev_acc:
        best_dev_acc = dev_acc
        best_model_wts = copy.deepcopy(predictor.state_dict())
        best_test1_acc = acc_1
        best_test2_acc = acc_2
        best_test3_acc = acc_3
        best_epoch_num = epoch
    print('epoc', epoch, '\t', running_loss, '\t', dev_acc, '\t', acc_1, '\t',
          acc_2, '\t', acc_3)
    all_losses.append(running_loss)
    all_acc_1.append(acc_1)