def main():
    args = parser.parse_args()

    if args.device is None:
        device = torch.device('cuda' if (torch.cuda.is_available()) else 'cpu')
    else:
        device = torch.device(args.device)

    # modified to lock the directory
    outdir = args.outdir
    if outdir is None:
        outdir = os.path.join(args.path[-1], 'metrics/fidpth')

    os.makedirs(outdir, exist_ok=True)
    pidfile.exit_if_job_done(outdir, redo=False)

    fid_value = calculate_fid_given_paths(args.path,
                                          args.batch_size,
                                          device,
                                          args.dims)
    print('FID: ', fid_value)

    # modified: save it
    np.savez(os.path.join(outdir, 'fid.npz'), fid=fid_value)
    pidfile.mark_job_done(outdir)
Exemple #2
0
    parser.add_argument('--workers', type=int, default=4, help='workers')
    parser.add_argument('--niter',
                        type=int,
                        default=200,
                        help='number of epochs to train for')
    parser.add_argument('--lr',
                        type=float,
                        default=0.001,
                        help='learning rate, default=0.0002')
    parser.add_argument('--beta1',
                        type=float,
                        default=0.9,
                        help='beta1 for adam. default=0.5')
    parser.add_argument('--seed', default=0, type=int, help='manual seed')
    parser.add_argument('--outf', type=str, help='output directory')

    opt = parser.parse_args()
    print(opt)

    if opt.outf is None:
        opt.outf = 'results/classifiers/celebahq/%s' % opt.attribute

    os.makedirs(opt.outf, exist_ok=True)
    # save options
    pidfile.exit_if_job_done(opt.outf)
    with open(os.path.join(opt.outf, 'optE.yml'), 'w') as f:
        yaml.dump(vars(opt), f, default_flow_style=False)
    train(opt)
    print("finished training!")
    pidfile.mark_job_done(opt.outf)
Exemple #3
0
    parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
    parser.add_argument("path", type=str, nargs=1,
                        help='Path to the generated images')
    parser.add_argument('--outdir', type=str, default=None,
                        help='path to save computed prdc')
    parser.add_argument('--batch_size', type=int, default=32,
                        help='batch size to compute vgg features')
    parser.add_argument('--workers', type=int, default=4,
                        help='data loading workers')
    parser.add_argument('--load_size', type=int, default=256,
                        help='size to load images at')
    parser.add_argument('--crop_aspect_car', action='store_true',
                        help='crop out border padding for cars')

    args = parser.parse_args()
    outdir = args.outdir
    if outdir is None:
        outdir = os.path.join(args.path[-1], 'metrics/distances')
    pidfile.exit_if_job_done(outdir, redo=False)

    metrics = compute_distances(args)
    for k,v in metrics.items():
        if 'avg' in k:
            print("{}: {}".format(k, v))
    np.savez(os.path.join(outdir, 'distances.npz'), **metrics)

    pidfile.mark_job_done(outdir)



            loss = criterion(outputs, targets)
            test_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            progress_bar(
                batch_idx, len(testloader),
                'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
                (test_loss /
                 (batch_idx + 1), 100. * correct / total, correct, total))

    # Save checkpoint.
    acc = 100. * correct / total
    if acc > best_acc:
        print('Saving..')
        state = {
            'net': net.state_dict(),
            'acc': acc,
            'epoch': epoch,
        }
        torch.save(state, '%s/ckpt.pth' % save_dir)
        best_acc = acc


for epoch in range(start_epoch, start_epoch + 200):
    train(epoch)
    test(epoch)
    scheduler.step()

pidfile.mark_job_done(save_dir)
Exemple #5
0
            tags.append(tag)
            amts.append(amt)

        images = model.decode(feps)

        # Save images as PNG.
        for idx in range(images.shape[0]):
            filename = os.path.join(opt.output_path, 'seed%03d_sample%06d.%s'
                                        % (opt.seed, batch_start + idx,
                                           opt.format))
            im = PIL.Image.fromarray(images[idx], 'RGB')
            if opt.resize:
                im = im.resize((opt.resize, opt.resize), PIL.Image.LANCZOS)
            im.save(filename)

    if opt.manipulate:
        outfile = os.path.join(opt.output_path, 'manipulations.npz')
        np.savez(outfile, tags=np.concatenate(tags), amts=np.concatenate(amts))


if __name__ == '__main__':
    opt = parse_args()
    if os.environ.get('CUDA_VISIBLE_DEVICES') is None:
        os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu
    expdir = opt.output_path
    os.makedirs(expdir, exist_ok=True)

    pidfile.exit_if_job_done(expdir)
    sample(opt)
    pidfile.mark_job_done(expdir)
                pil_image.save(filename + '.png')


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Generate GAN samples')
    parser.add_argument('--model', required=True, help='proggan, stylegan')
    parser.add_argument('--domain', required=True, help='church, ffhq... etc')
    parser.add_argument('--outdir', required=True, help='output directory')
    parser.add_argument('--seed',
                        type=int,
                        default=0,
                        help='random seed for z samples')
    parser.add_argument('--num_samples',
                        type=int,
                        default=500,
                        help='number of samples')
    parser.add_argument('--batch_size',
                        type=int,
                        default=16,
                        help='batch_size')
    parser.add_argument('--im_size', type=int, help='resize to this size')
    args = parser.parse_args()
    args.outdir = args.outdir.format(**vars(args))
    os.makedirs(args.outdir, exist_ok=True)
    pidfile.exit_if_job_done(args.outdir)
    main(args)
    cmd = f'cp utils/lightbox.html {args.outdir}/+lightbox.html'
    print(cmd)
    os.system(cmd)
    pidfile.mark_job_done(args.outdir)
Exemple #7
0
    # additional options for top n patches
    options.parser.add_argument(
        '--unique',
        action='store_true',
        help='take only 1 patch per image when computing top n')
    opt = options.parse()
    print("Calculating patches from model: %s epoch %s" %
          (opt.name, opt.which_epoch))
    print("On dataset (real): %s" % (opt.real_im_path))
    print("And dataset (fake): %s" % (opt.fake_im_path))
    expdir = opt.name
    dataset_name = opt.dataset_name
    output_dir = os.path.join(opt.results_dir, expdir, opt.partition,
                              'epoch_%s' % opt.which_epoch, dataset_name,
                              'patches_top%d' % opt.topn)
    print(output_dir)
    os.makedirs(output_dir, exist_ok=True)

    # check if checkpoint is out of date
    redo = opt.force_redo
    ckpt_path = os.path.join(opt.checkpoints_dir, opt.name,
                             '%s_net_D.pth' % opt.which_epoch)
    timestamp_path = os.path.join(output_dir,
                                  'timestamp_%s_net_D.txt' % opt.which_epoch)
    if util.check_timestamp(ckpt_path, timestamp_path):
        redo = True
        util.update_timestamp(ckpt_path, timestamp_path)
    pidfile.exit_if_job_done(output_dir, redo=True)  # redo=redo)
    run_patch_topn(opt, output_dir)
    pidfile.mark_job_done(output_dir)
Exemple #8
0
            log_str = f"[TRAIN] Iter: {i} Loss: {loss.item()} PSNR: {psnr.item()} PSNR0: {psnr0} Var loss: {var_loss} Var loss coarse: {var_loss_coarse} Weight change loss: {weight_change_loss}"
            with open(os.path.join(basedir, expname, 'log.txt'), 'a+') as f:
                f.write(log_str + '\n')
            print(log_str)

        global_step += 1

        if real_image_application and global_step - start == args.n_iters_real:
            return

        if real_image_application and global_step - start == args.n_iters_code_only:
            optimize_mlp = True
            dataset.optimizer_name = 'adam'
            dataset.style_optimizer = torch.optim.Adam(dataset.params,
                                                       lr=dataset.lr)
            print('Starting to jointly optimize weights with code')


if __name__ == '__main__':
    parser = config_parser()
    args = parser.parse_args()
    if args.instance != -1:
        # Allows for scripting over single instance experiments.
        exit_if_job_done(os.path.join(args.basedir, args.expname))
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
        train()
        mark_job_done(os.path.join(args.basedir, args.expname))
    else:
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
        train()
Exemple #9
0
        # update val losses
        for k, v in losses.items():
            val_losses[k + '_val'].update(v, n=len(inputs['labels']))

    # get average val losses
    for k, v in val_losses.items():
        val_losses[k] = v.avg

    return val_losses


if __name__ == '__main__':
    options = TrainOptions(print_opt=False)
    opt = options.parse()

    # lock active experiment directory and write out options
    os.makedirs(os.path.join(opt.checkpoints_dir, opt.name), exist_ok=True)
    pidfile.exit_if_job_done(os.path.join(opt.checkpoints_dir, opt.name))
    options.print_options(opt)

    # configure logging file
    logging_file = os.path.join(opt.checkpoints_dir, opt.name, 'log.txt')
    utils.logging.configure(logging_file, append=False)

    # run train loop
    train(opt)

    # mark done and release lock
    pidfile.mark_job_done(os.path.join(opt.checkpoints_dir, opt.name))
Exemple #10
0
classifier = domain_classifier.define_classifier(args.domain,
                                                 args.classifier_name)

# output data structure
ensemble_data = {
    'original': [],  # predictions of the original image
    'label': [],  # GT label
    args.aug_type: [],  # ensembled predictions using specified image aug
    'random_seed': args.seed,
}

# do evaluation
for i, imdata_from_loader in enumerate(tqdm(loader)):
    # fix for too many open files error
    # https://github.com/pytorch/pytorch/issues/11201
    imdata = copy.deepcopy(imdata_from_loader)
    del imdata_from_loader
    im = imdata[0]  # shape: 1xn_ensxCxHxW
    label = imdata[1]
    with torch.no_grad():
        ensemble_data['label'].append(label.numpy())
        im = im[0].cuda()  # removes unused batch dim, keeps ensemble dim
        predictions = domain_classifier.postprocess(classifier(im))
        predictions_np = predictions.cpu().numpy()
        ensemble_data['original'].append(predictions_np[[0]])
        ensemble_data[args.aug_type].append(predictions_np[1:])

# save the result and unlock directory
np.savez(data_filename, **ensemble_data)
pidfile.mark_job_done(lockdir)
Exemple #11
0
        # histogram
        f, ax = plt.subplots(1, 1, figsize=(6, 4))
        ax.bar(range(1, len(labels) + 1), counts)
        ax.set_xticks(range(1, len(labels) + 1))
        ax.set_xticklabels(labels, rotation='vertical')
        ax.set_ylabel('count')
        f.savefig(os.path.join(cluster_dir, 'histogram.pdf'),
                  bbox_inches='tight')

        # write counts to file
        with open(os.path.join(cluster_dir, 'counts.txt'), 'w') as f:
            [f.write('%s\n' % line) for line in infos]

        # write random patch baseline to file
        infos = []
        for index, (k,v) in enumerate(sorted(clusters_baseline.items(), key=lambda item: item[1])[::-1]):
            infos.append('%d: %s, %d patches' % (index, k, v))
        with open(os.path.join(cluster_dir, 'baseline.txt'), 'w') as f:
            [f.write('%s\n' % line) for line in infos]


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Cluster patches using face segmentation.')
    parser.add_argument('path', type=str, help='path to precomputed top clusters')
    args = parser.parse_args()
    outpath = os.path.join(args.path, 'clusters')
    os.makedirs(outpath, exist_ok =True)
    pidfile.exit_if_job_done(outpath,redo=True)
    cluster(args, outpath)
    pidfile.mark_job_done(outpath)