Beispiel #1
0
def test(dataset, loader, model, args, device, tag=''):
    psnr = AverageMeter()

    # Set the model to evaluation mode
    model.eval()

    with tqdm(total=len(dataset)) as t:
        t.set_description(tag)

        for data in loader:
            lr, hr = data
            lr = lr.to(device)
            hr = hr.to(device)

            if args.self_ensemble:
                sr = forward_x8(lr)
            else:
                sr = forward(lr)

            # Quantize results
            sr = quantize(sr, args.rgb_range)

            # Update PSNR
            psnr.update(calc_psnr(sr, hr, scale=args.scale, max_value=args.rgb_range[1]), lr.shape[0])

            t.update(lr.shape[0])

    print('DIV2K (val) PSNR: {:.4f} dB'.format(psnr.avg))
Beispiel #2
0
def train(dataset, loader, model, criterion, optimizer, tag=''):
    losses = AverageMeter()

    # Set the model to training mode
    model.train()

    with tqdm(total=len(dataset)) as t:
        t.set_description(tag)

        for data in loader:
            lr, hr = data
            lr = lr.to(device)
            hr = hr.to(device)

            # Predict results and calculate loss
            sr = model(lr)
            loss = criterion(sr, hr)
            # pdb.set_trace()
            # (Pdb) lr.size()
            # torch.Size([96, 3, 24, 24])
            # (Pdb) hr.size()
            # torch.Size([96, 3, 96, 96])

            # Update loss
            losses.update(loss.item(), lr.shape[0])

            # Compute gradients and update parameters
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            t.set_postfix(loss='{:.4f}'.format(losses.avg))
            t.update(lr.shape[0])

        return losses.avg
Beispiel #3
0
def test(dataset, loader, model, device, args, tag=''):
    psnr = AverageMeter()

    # Set the model to evaluation mode
    model.eval()
    count = 0
    with tqdm(total=len(dataset)) as t:
        t.set_description(tag)

        for data in loader:
            count = count + 1

            lr, hr = data
            # pdb.set_trace()

            lr = lr.to(device)
            hr = hr.to(device)

            if args.self_ensemble:
                sr = forward_x8(lr)
            else:
                sr = forward(lr)

            # pdb.set_trace()
            # (Pdb) lr.size()
            # torch.Size([1, 3, 678, 1020])
            # (Pdb) sr.size()
            # torch.Size([1, 3, 1356, 2040])

            # Quantize results
            sr = quantize(sr, args.rgb_range)

            # Update PSNR
            psnr.update(
                calc_psnr(sr,
                          hr,
                          scale=args.scale,
                          max_value=args.rgb_range[1]), lr.shape[0])

            t.update(lr.shape[0])

            # if count > 100:
            #     srimg = torchvision.transforms.ToPILImage()(sr.squeeze().div(255).cpu())
            #     srimg.save("result/{:03d}.png".format(count - 100))

            #     #     srimg.show()
            #     #     pdb.set_trace()

    print('SDR (val) PSNR: {:.4f} dB'.format(psnr.avg))
Beispiel #4
0
 def on_test_start(state):
     state['loss_meter'] = AverageMeter()
     state['sorted_segments_list'] = []
     state['output'] = []
     if config.VERBOSE:
         state['progress_bar'] = tqdm(
             total=math.ceil(len(test_dataset) / config.TRAIN.BATCH_SIZE))
Beispiel #5
0
 def on_start(state):
     state['loss_meter'] = AverageMeter()
     state['test_interval'] = int(
         len(train_dataset) / config.TRAIN.BATCH_SIZE *
         config.TEST.INTERVAL)
     state['t'] = 1
     model.train()
     if config.VERBOSE:
         state['progress_bar'] = tqdm(total=state['test_interval'])
Beispiel #6
0
 def on_test_start(state):
     state['loss_meter'] = AverageMeter()
     state['sorted_segments_list'] = []
     if config.VERBOSE:
         if state['split'] == 'train':
             state['progress_bar'] = tqdm(total=math.ceil(
                 len(train_dataset) / config.TEST.BATCH_SIZE))
         elif state['split'] == 'val':
             state['progress_bar'] = tqdm(
                 total=math.ceil(len(val_dataset) / config.TEST.BATCH_SIZE))
         elif state['split'] == 'test':
             state['progress_bar'] = tqdm(total=math.ceil(
                 len(test_dataset) / config.TEST.BATCH_SIZE))
         else:
             raise NotImplementedError
Beispiel #7
0
def test(dataset, loader, model, args, device, tag=''):
    psnr = AverageMeter()

    # Set the model to evaluation mode
    model.eval()

    with tqdm(total=len(dataset)) as t:
        t.set_description(tag)

        count = 0
        for data in loader:
            lr, hr = data
            lr = lr.to(device)
            hr = hr.to(device)

            if args.self_ensemble:
                sr = forward_x8(lr, model)
            else:
                sr = forward(lr, model)

            # Quantize results
            sr = quantize(sr, args.rgb_range)

            if args.output_dir:
                im = Image.fromarray(np.uint8(sr.squeeze().cpu().numpy().transpose(2, 1, 0))).transpose(Image.ROTATE_270)
                im_lr = Image.fromarray(np.uint8(lr.squeeze().cpu().numpy().transpose(2, 1, 0))).transpose(Image.ROTATE_270)
                im.save(args.output_dir + str(count) + ".png")
                im_lr.save(args.output_dir + str(count) + "_lr.png")
                count += 1

            # to save activation layers
            # act = activation['deconv'].squeeze()
            #
            # for idx in range(50):
            #     plt.imsave('results/output/act/activation%d_.png' % idx, act[idx], vmin=act[idx].min(), vmax=act[idx].max())

            # Update PSNR
            # psnr.update(calc_psnr(sr, hr, scale=args.scale, max_value=args.rgb_range[1]), lr.shape[0])

            t.update(lr.shape[0])

    print('DIV2K (val) PSNR: {:.4f} dB'.format(psnr.avg))
Beispiel #8
0
def test(dataset, loader, model, criterion, args, tag=''):
    losses = AverageMeter()
    psnr = AverageMeter()

    # Set the model to evaluation mode
    model.eval()

    with tqdm(total=len(dataset)) as t:
        t.set_description(tag)

        for data in loader:
            lr, hr = data
            lr = lr.to(device)
            hr = hr.to(device)

            # Predict results without calculating gradients
            with torch.no_grad():
                sr = model(lr)

            loss = criterion(sr, hr)

            # Update loss
            losses.update(loss.item(), lr.shape[0])

            # Quantize results
            sr = quantize(sr, args.rgb_range)

            # Update PSNR
            psnr.update(
                calc_psnr(sr,
                          hr,
                          scale=args.scale,
                          max_value=args.rgb_range[1]), lr.shape[0])

            t.set_postfix(loss='{:.4f}'.format(losses.avg))
            t.update(lr.shape[0])

        return losses.avg, psnr.avg
Beispiel #9
0
def train_epoch(epoch, data_loader, model, criterion, optimizer, opt,
                class_names, writer):
    print(
        "# ---------------------------------------------------------------------- #"
    )
    print('Training at epoch {}'.format(epoch))
    model.train()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    accuracies = AverageMeter()

    end_time = time.time()

    for i, data_item in enumerate(data_loader):
        visual, target, audio, visualization_item, batch_size = process_data_item(
            opt, data_item)
        data_time.update(time.time() - end_time)

        output, loss = run_model(opt, [visual, target, audio],
                                 model,
                                 criterion,
                                 i,
                                 print_attention=False)

        acc = calculate_accuracy(output, target)

        losses.update(loss.item(), batch_size)
        accuracies.update(acc, batch_size)

        # Backward and optimize
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end_time)
        end_time = time.time()

        iter = (epoch - 1) * len(data_loader) + (i + 1)
        writer.add_scalar('train/batch/loss', losses.val, iter)
        writer.add_scalar('train/batch/acc', accuracies.val, iter)

        if opt.debug:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Acc {acc.val:.3f} ({acc.avg:.3f})'.format(
                      epoch,
                      i + 1,
                      len(data_loader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                      acc=accuracies))

    # ---------------------------------------------------------------------- #
    print("Epoch Time: {:.2f}min".format(batch_time.avg * len(data_loader) /
                                         60))
    print("Train loss: {:.4f}".format(losses.avg))
    print("Train acc: {:.4f}".format(accuracies.avg))

    writer.add_scalar('train/epoch/loss', losses.avg, epoch)
    writer.add_scalar('train/epoch/acc', accuracies.avg, epoch)
Beispiel #10
0
def val_epoch(epoch, data_loader, model, criterion, opt, writer, optimizer):
    print(
        "# ---------------------------------------------------------------------- #"
    )
    print('Validation at epoch {}'.format(epoch))
    model.eval()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    accuracies = AverageMeter()

    end_time = time.time()

    for i, data_item in enumerate(data_loader):
        visual, target, audio, visualization_item, batch_size = process_data_item(
            opt, data_item)
        data_time.update(time.time() - end_time)
        with torch.no_grad():
            output, loss = run_model(opt, [visual, target, audio], model,
                                     criterion, i)

        acc = calculate_accuracy(output, target)

        losses.update(loss.item(), batch_size)
        accuracies.update(acc, batch_size)
        batch_time.update(time.time() - end_time)
        end_time = time.time()

    writer.add_scalar('val/loss', losses.avg, epoch)
    writer.add_scalar('val/acc', accuracies.avg, epoch)
    print("Val loss: {:.4f}".format(losses.avg))
    print("Val acc: {:.4f}".format(accuracies.avg))

    save_file_path = os.path.join(opt.ckpt_path, 'save_{}.pth'.format(epoch))
    states = {
        'epoch': epoch + 1,
        'state_dict': model.state_dict(),
        'optimizer': optimizer.state_dict(),
    }
    torch.save(states, save_file_path)
Beispiel #11
0
def train(train_loader, model, criterion_encoder, optimizer_encoder, classifier, criterion_fc, optimizer_fc, epoch, opt):
    model.train()

    losses = AverageMeter()
    fc_losses = AverageMeter()
    encoder_losses = AverageMeter()
    top1 = AverageMeter()

    _print("--" * 50)
    for i, (images, labels) in enumerate(train_loader):
        images = torch.cat(images, dim=0).cuda()
        labels = torch.cat([labels, labels], dim=0).cuda()
        BSZ = labels.shape[0]

        warmup_learning_rate(opt, epoch, i, len(
            train_loader), optimizer_encoder)
        warmup_learning_rate(opt, epoch, i, len(train_loader), optimizer_fc)

        features = model(images, labels)
        encoder_loss = criterion_encoder(features, labels)
        encoder_losses.update(encoder_loss.item(), BSZ)

        logits = classifier(features)
        fc_loss = criterion_fc(logits, labels)
        fc_losses.update(fc_loss.item(), BSZ)

        acc1, acc5 = accuracy(logits, labels, topk=(1, 5))
        top1.update(acc1[0], BSZ)

        loss = encoder_loss + fc_loss
        # loss = fc_loss
        losses.update(loss.item(), BSZ)

        optimizer_encoder.zero_grad()
        optimizer_fc.zero_grad()
        loss.backward()
        optimizer_encoder.step()
        optimizer_fc.step()

        progress_bar(i, len(train_loader), "train")

    _print("epoch:{}".format(epoch))
    _print(
        "loss: {loss.avg:.4f} "
        "encoder_loss: {encoder_loss.avg:.4f} "
        "fc_loss: {fc_loss.avg:.4f} "
        "train acc@1 {top1.avg:.4f} ".format(
            loss=losses, encoder_loss=encoder_losses, fc_loss=fc_losses, top1=top1))
    return losses.avg, top1.avg
Beispiel #12
0
def validate(val_loader, model, criterion_encoder, classifier, criterion_fc, epoch, opt):
    model.eval()

    losses = AverageMeter()
    fc_losses = AverageMeter()
    encoder_losses = AverageMeter()
    top1 = AverageMeter()

    with torch.no_grad():
        for i, (images, labels) in enumerate(val_loader):
            images = images.cuda()
            labels = labels.cuda()
            BSZ = labels.shape[0]

            # caculate class loss
            features = model(images, labels)
            # encoder_loss = criterion_encoder(features, labels)
            # encoder_losses.update(encoder_loss.item(), BSZ)

            logits = classifier(features)
            fc_loss = criterion_fc(logits, labels)
            fc_losses.update(fc_loss.item(), BSZ)

            loss = fc_loss
            losses.update(loss.item(), BSZ)

            acc1, acc5 = accuracy(logits, labels, topk=(1, 5))
            top1.update(acc1[0], BSZ)

            progress_bar(i, len(val_loader), "eval test set")

    _print(
        "loss: {loss.avg:.4f} "
        "fc_loss :{fc_loss.avg:.4f} "
        "acc@1 {top1.avg:.4f}".format(loss=losses, fc_loss=fc_losses, top1=top1))

    return losses.avg, top1.avg
Beispiel #13
0
def initialize_stats():
    per_label_f1 = defaultdict(list)
    by_length_f1 = defaultdict(list)
    sent_f1, corpus_f1 = AverageMeter(), [0., 0., 0.]
    return per_label_f1, by_length_f1, sent_f1, corpus_f1
Beispiel #14
0
        (len(folders), len(folders))), np.zeros((len(folders), len(folders)))
    for i, folder_1 in enumerate(folders):
        for j, folder_2 in enumerate(folders):
            # if i > j:
            #     continue
            sent_f1_list, corpus_f1_list = [], []
            for k, pred_path in enumerate(
                    glob(os.path.join('results/DiDeMo', folder_1, '*'))):
                for l, gt_path in enumerate(
                        glob(os.path.join("results/DiDeMo", folder_2, '*'))):
                    if i == j and k >= l:
                        continue
                    predictions = pickle.load(open(pred_path, 'rb'))
                    ground_truths = pickle.load(open(gt_path, 'rb'))

                    sent_f1, corpus_f1 = AverageMeter(), [0., 0., 0.]
                    for prediction, ground_truth in zip(
                            predictions, ground_truths):
                        max_len = len(prediction['caption'].split(' '))
                        pred_span = prediction['span']
                        gold_span = ground_truth['span']
                        assert ground_truth['caption'] == prediction['caption']
                        pred_set = set(
                            (a[0], a[1]) for a in pred_span
                            if a[0] != a[1] or a == (0, max_len - 1))
                        gold_set = set(
                            (a[0], a[1]) for a in gold_span
                            if a[0] != a[1] or a == (0, max_len - 1))
                        if len(gold_set) == 0:
                            continue
                        tp, fp, fn = get_stats(pred_set, gold_set)