Пример #1
0
def main():
    # parse options
    parser = TestOptions()
    opts = parser.parse()

    # data loader
    train_loader, input_data_par = get_loader(1)

    # model
    print('\n--- load model ---')
    model = DRIT(opts)
    model.setgpu(opts.gpu)
    model.resume(opts.resume, train=False)
    model.eval()

    # directory
    result_dir = os.path.join(opts.result_dir, opts.name)
    if not os.path.exists(result_dir):
        os.mkdir(result_dir)

    # test
    print('\n--- testing ---')
    for it, (images_a, images_b, labels) in enumerate(train_loader['test']):
        images_a = images_a.cuda(opts.gpu).detach()
        images_b = images_b.cuda(opts.gpu).detach()
        with torch.no_grad():
            loss = model.test_model(images_a, images_b)
            print('it:{}, loss:{}'.format(it, loss))
    return
Пример #2
0
def main():
    # parse options
    parser = TestOptions()
    opts = parser.parse()

    # data loader
    print('\n--- load dataset ---')
    datasetA = dataset_single(opts, 'A', opts.input_dim_a)
    datasetB = dataset_single(opts, 'B', opts.input_dim_b)
    if opts.a2b:
        loader = torch.utils.data.DataLoader(datasetA,
                                             batch_size=1,
                                             num_workers=opts.nThreads)
        loader_attr = torch.utils.data.DataLoader(datasetB,
                                                  batch_size=1,
                                                  num_workers=opts.nThreads,
                                                  shuffle=True)
    else:
        loader = torch.utils.data.DataLoader(datasetB,
                                             batch_size=1,
                                             num_workers=opts.nThreads)
        loader_attr = torch.utils.data.DataLoader(datasetA,
                                                  batch_size=1,
                                                  num_workers=opts.nThreads,
                                                  shuffle=True)

    # model
    print('\n--- load model ---')
    model = DRIT(opts)
    model.setgpu(opts.gpu)
    model.resume(opts.resume, train=False)
    model.eval()

    # directory
    result_dir = os.path.join(opts.result_dir, opts.name)
    if not os.path.exists(result_dir):
        os.mkdir(result_dir)

    # test
    print('\n--- testing ---')
    for idx1, img1 in enumerate(loader):
        print('{}/{}'.format(idx1, len(loader)))
        img1 = img1.cuda(opts.gpu)
        imgs = [img1]
        names = ['input']
        for idx2, img2 in enumerate(loader_attr):
            if idx2 == opts.num:
                break
            img2 = img2.cuda(opts.gpu)
            with torch.no_grad():
                if opts.a2b:
                    img = model.test_forward_transfer(img1, img2, a2b=True)
                else:
                    img = model.test_forward_transfer(img2, img1, a2b=False)
            imgs.append(img)
            names.append('output_{}'.format(idx2))
        save_imgs(imgs, names, os.path.join(result_dir, '{}'.format(idx1)))

    return
Пример #3
0
def main():
  # parse options
  parser = TestOptions()
  opts = parser.parse()

  # data loader
  print('\n--- load dataset ---')
  if opts.a2b:
    dataset = dataset_single(opts, 'A', opts.input_dim_a)
    subdir = "a2b"
  else:
    dataset = dataset_single(opts, 'B', opts.input_dim_b)
    subdir = "b2a"
  loader = torch.utils.data.DataLoader(dataset, batch_size=1, num_workers=opts.nThreads)

  # model
  print('\n--- load model ---')
  model = DRIT(opts)
  model.setgpu(opts.gpu)
  model.resume(opts.resume, train=False)
  model.eval()

  # directory
  result_dir = os.path.join(opts.result_dir, opts.name, subdir)
  if not os.path.exists(result_dir):
    os.makedirs(result_dir)

  # test
  print('\n--- testing ---')
  for idx1, img1 in enumerate(loader):
    print('{}/{}'.format(idx1, len(loader)))
    img1 = img1.cuda()
    imgs = [img1]
    names = ['input']
    for idx2 in range(opts.num):
      with torch.no_grad():
        img = model.test_forward(img1, a2b=opts.a2b)
      imgs.append(img)
      names.append('output_{}'.format(idx2))
    save_imgs(imgs, names, os.path.join(result_dir, '{}'.format(idx1)))

  return
Пример #4
0
def main():
    # parse options
    parser = TestOptions()
    opts = parser.parse()

    # data loader
    print('\n--- load dataset ---')
    datasetA = dataset_single(opts, 'A', opts.input_dim_a)
    datasetB = dataset_single(opts, 'B', opts.input_dim_b)
    if opts.a2b:
        loader = torch.utils.data.DataLoader(datasetA,
                                             batch_size=1,
                                             num_workers=opts.nThreads)
        loader_attr = torch.utils.data.DataLoader(datasetB,
                                                  batch_size=1,
                                                  num_workers=opts.nThreads,
                                                  shuffle=True)
    else:
        loader = torch.utils.data.DataLoader(datasetB,
                                             batch_size=1,
                                             num_workers=opts.nThreads)
        loader_attr = torch.utils.data.DataLoader(datasetA,
                                                  batch_size=1,
                                                  num_workers=opts.nThreads,
                                                  shuffle=True)

    # model
    print('\n--- load model ---')
    model = DRIT(opts)
    model.setgpu(opts.gpu)
    model.resume(opts.resume, train=False)
    model.eval()

    # directory
    result_dir = os.path.join(opts.result_dir, opts.name)
    if not os.path.exists(result_dir):
        os.mkdir(result_dir)

    # test
    print('\n--- testing ---')
    for idx1, (img1, img1_path) in enumerate(loader):
        print('{}/{}'.format(idx1, len(loader)))
        img1_path = img1_path[0]
        img1_prefix = os.path.basename(img1_path).split('.')[0]
        #    print('img1_prefix:', img1_prefix)
        img1 = img1.cuda()
        imgs = [img1]
        #    print('img1 type:', type(img1))
        names = [f'{img1_prefix}_input']
        for idx2, (img2, img2_path) in enumerate(loader_attr):
            img2_path = img2_path[0]
            img2_prefix = os.path.basename(img2_path).split('.')[0]
            #      print('img2_prefix:', img2_prefix)
            if img1_prefix == img2_prefix:
                img2 = img2.cuda()
                imgs.append(img2)
                names.append(f'{img2_prefix}_real')
                #        print('img2 type:', type(img2))
                with torch.no_grad():
                    if opts.a2b:
                        img = model.test_forward_transfer(img1, img2, a2b=True)
                    else:
                        img = model.test_forward_transfer(img2,
                                                          img1,
                                                          a2b=False)
                imgs.append(img)
                names.append(f'{img2_prefix}_fake')
                break
        save_imgs(imgs, names, result_dir)

    return
Пример #5
0
test_loader = data.get_test_loader('test', opts.data_name, vocab,
                                   opts.crop_size, opts.batch_size,
                                   opts.workers, opts)

subspace = model_2.VSE(opts)
subspace.setgpu()
subspace.load_state_dict(torch.load(opts.resume2))
subspace.val_start()

# model
print('\n--- load model ---')
model = DRIT(opts)
model.setgpu(opts.gpu)
model.resume(opts.resume, train=False)
model.eval()

a = None
b = None
c = None
d = None
for it, (images, captions, lengths, ids) in enumerate(test_loader):
    if it >= opts.test_iter:
        break
    images = images.cuda(opts.gpu).detach()
    captions = captions.cuda(opts.gpu).detach()

    img_emb, cap_emb = subspace.forward_emb(images,
                                            captions,
                                            lengths,
                                            volatile=True)