def main():
    args = parser.parse_args()

    if args.device is None:
        device = torch.device('cuda' if (torch.cuda.is_available()) else 'cpu')
    else:
        device = torch.device(args.device)

    # modified to lock the directory
    outdir = args.outdir
    if outdir is None:
        outdir = os.path.join(args.path[-1], 'metrics/fidpth')

    os.makedirs(outdir, exist_ok=True)
    pidfile.exit_if_job_done(outdir, redo=False)

    fid_value = calculate_fid_given_paths(args.path,
                                          args.batch_size,
                                          device,
                                          args.dims)
    print('FID: ', fid_value)

    # modified: save it
    np.savez(os.path.join(outdir, 'fid.npz'), fid=fid_value)
    pidfile.mark_job_done(outdir)
Esempio n. 2
0
    parser.add_argument('--workers', type=int, default=4, help='workers')
    parser.add_argument('--niter',
                        type=int,
                        default=200,
                        help='number of epochs to train for')
    parser.add_argument('--lr',
                        type=float,
                        default=0.001,
                        help='learning rate, default=0.0002')
    parser.add_argument('--beta1',
                        type=float,
                        default=0.9,
                        help='beta1 for adam. default=0.5')
    parser.add_argument('--seed', default=0, type=int, help='manual seed')
    parser.add_argument('--outf', type=str, help='output directory')

    opt = parser.parse_args()
    print(opt)

    if opt.outf is None:
        opt.outf = 'results/classifiers/celebahq/%s' % opt.attribute

    os.makedirs(opt.outf, exist_ok=True)
    # save options
    pidfile.exit_if_job_done(opt.outf)
    with open(os.path.join(opt.outf, 'optE.yml'), 'w') as f:
        yaml.dump(vars(opt), f, default_flow_style=False)
    train(opt)
    print("finished training!")
    pidfile.mark_job_done(opt.outf)
Esempio n. 3
0
    parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
    parser.add_argument("path", type=str, nargs=1,
                        help='Path to the generated images')
    parser.add_argument('--outdir', type=str, default=None,
                        help='path to save computed prdc')
    parser.add_argument('--batch_size', type=int, default=32,
                        help='batch size to compute vgg features')
    parser.add_argument('--workers', type=int, default=4,
                        help='data loading workers')
    parser.add_argument('--load_size', type=int, default=256,
                        help='size to load images at')
    parser.add_argument('--crop_aspect_car', action='store_true',
                        help='crop out border padding for cars')

    args = parser.parse_args()
    outdir = args.outdir
    if outdir is None:
        outdir = os.path.join(args.path[-1], 'metrics/distances')
    pidfile.exit_if_job_done(outdir, redo=False)

    metrics = compute_distances(args)
    for k,v in metrics.items():
        if 'avg' in k:
            print("{}: {}".format(k, v))
    np.savez(os.path.join(outdir, 'distances.npz'), **metrics)

    pidfile.mark_job_done(outdir)



parser.add_argument('--resume',
                    '-r',
                    action='store_true',
                    help='resume from checkpoint')
parser.add_argument('--stylemix_layer',
                    type=str,
                    help='which layer to perform stylemixing (e.g. fine)')
args = parser.parse_args()

# setup output directory
if args.stylemix_layer is None:
    save_dir = 'results/classifiers/cifar10/latentclassifier'
else:
    save_dir = 'results/classifiers/cifar10/latentclassifier_stylemix_%s' % args.stylemix_layer
os.makedirs(save_dir, exist_ok=True)
pidfile.exit_if_job_done(save_dir)
torch.manual_seed(0)

device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0  # best test accuracy
start_epoch = 0  # start from epoch 0 or last checkpoint epoch

# Data
print('==> Preparing data..')
transform_train = data.get_transform('cifar10', 'imtrain')
transform_test = data.get_transform('cifar10', 'imval')
trainset = data.get_dataset('cifar10',
                            'train',
                            load_w=True,
                            transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset,
Esempio n. 5
0
            tags.append(tag)
            amts.append(amt)

        images = model.decode(feps)

        # Save images as PNG.
        for idx in range(images.shape[0]):
            filename = os.path.join(opt.output_path, 'seed%03d_sample%06d.%s'
                                        % (opt.seed, batch_start + idx,
                                           opt.format))
            im = PIL.Image.fromarray(images[idx], 'RGB')
            if opt.resize:
                im = im.resize((opt.resize, opt.resize), PIL.Image.LANCZOS)
            im.save(filename)

    if opt.manipulate:
        outfile = os.path.join(opt.output_path, 'manipulations.npz')
        np.savez(outfile, tags=np.concatenate(tags), amts=np.concatenate(amts))


if __name__ == '__main__':
    opt = parse_args()
    if os.environ.get('CUDA_VISIBLE_DEVICES') is None:
        os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu
    expdir = opt.output_path
    os.makedirs(expdir, exist_ok=True)

    pidfile.exit_if_job_done(expdir)
    sample(opt)
    pidfile.mark_job_done(expdir)
                pil_image.save(filename + '.png')


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Generate GAN samples')
    parser.add_argument('--model', required=True, help='proggan, stylegan')
    parser.add_argument('--domain', required=True, help='church, ffhq... etc')
    parser.add_argument('--outdir', required=True, help='output directory')
    parser.add_argument('--seed',
                        type=int,
                        default=0,
                        help='random seed for z samples')
    parser.add_argument('--num_samples',
                        type=int,
                        default=500,
                        help='number of samples')
    parser.add_argument('--batch_size',
                        type=int,
                        default=16,
                        help='batch_size')
    parser.add_argument('--im_size', type=int, help='resize to this size')
    args = parser.parse_args()
    args.outdir = args.outdir.format(**vars(args))
    os.makedirs(args.outdir, exist_ok=True)
    pidfile.exit_if_job_done(args.outdir)
    main(args)
    cmd = f'cp utils/lightbox.html {args.outdir}/+lightbox.html'
    print(cmd)
    os.system(cmd)
    pidfile.mark_job_done(args.outdir)
Esempio n. 7
0
    # additional options for top n patches
    options.parser.add_argument(
        '--unique',
        action='store_true',
        help='take only 1 patch per image when computing top n')
    opt = options.parse()
    print("Calculating patches from model: %s epoch %s" %
          (opt.name, opt.which_epoch))
    print("On dataset (real): %s" % (opt.real_im_path))
    print("And dataset (fake): %s" % (opt.fake_im_path))
    expdir = opt.name
    dataset_name = opt.dataset_name
    output_dir = os.path.join(opt.results_dir, expdir, opt.partition,
                              'epoch_%s' % opt.which_epoch, dataset_name,
                              'patches_top%d' % opt.topn)
    print(output_dir)
    os.makedirs(output_dir, exist_ok=True)

    # check if checkpoint is out of date
    redo = opt.force_redo
    ckpt_path = os.path.join(opt.checkpoints_dir, opt.name,
                             '%s_net_D.pth' % opt.which_epoch)
    timestamp_path = os.path.join(output_dir,
                                  'timestamp_%s_net_D.txt' % opt.which_epoch)
    if util.check_timestamp(ckpt_path, timestamp_path):
        redo = True
        util.update_timestamp(ckpt_path, timestamp_path)
    pidfile.exit_if_job_done(output_dir, redo=True)  # redo=redo)
    run_patch_topn(opt, output_dir)
    pidfile.mark_job_done(output_dir)
Esempio n. 8
0
            log_str = f"[TRAIN] Iter: {i} Loss: {loss.item()} PSNR: {psnr.item()} PSNR0: {psnr0} Var loss: {var_loss} Var loss coarse: {var_loss_coarse} Weight change loss: {weight_change_loss}"
            with open(os.path.join(basedir, expname, 'log.txt'), 'a+') as f:
                f.write(log_str + '\n')
            print(log_str)

        global_step += 1

        if real_image_application and global_step - start == args.n_iters_real:
            return

        if real_image_application and global_step - start == args.n_iters_code_only:
            optimize_mlp = True
            dataset.optimizer_name = 'adam'
            dataset.style_optimizer = torch.optim.Adam(dataset.params,
                                                       lr=dataset.lr)
            print('Starting to jointly optimize weights with code')


if __name__ == '__main__':
    parser = config_parser()
    args = parser.parse_args()
    if args.instance != -1:
        # Allows for scripting over single instance experiments.
        exit_if_job_done(os.path.join(args.basedir, args.expname))
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
        train()
        mark_job_done(os.path.join(args.basedir, args.expname))
    else:
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
        train()
Esempio n. 9
0
        # update val losses
        for k, v in losses.items():
            val_losses[k + '_val'].update(v, n=len(inputs['labels']))

    # get average val losses
    for k, v in val_losses.items():
        val_losses[k] = v.avg

    return val_losses


if __name__ == '__main__':
    options = TrainOptions(print_opt=False)
    opt = options.parse()

    # lock active experiment directory and write out options
    os.makedirs(os.path.join(opt.checkpoints_dir, opt.name), exist_ok=True)
    pidfile.exit_if_job_done(os.path.join(opt.checkpoints_dir, opt.name))
    options.print_options(opt)

    # configure logging file
    logging_file = os.path.join(opt.checkpoints_dir, opt.name, 'log.txt')
    utils.logging.configure(logging_file, append=False)

    # run train loop
    train(opt)

    # mark done and release lock
    pidfile.mark_job_done(os.path.join(opt.checkpoints_dir, opt.name))
Esempio n. 10
0
                    help='which image transform to use for ensembling')
parser.add_argument('--partition', type=str, default='val')
parser.add_argument('--n_ens', type=int, default=32)
parser.add_argument('--seed', type=int, default=2)
args = parser.parse_args()
print(args)

# set random seeds
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)

# lock the experiment directory
lockdir = f'results/evaluations/{args.domain}/lockfiles/{args.classifier_name}_{args.partition}/image_ensemble_{args.aug_type}'
os.makedirs(lockdir, exist_ok=True)
pidfile.exit_if_job_done(lockdir, redo=False)

# data output filename
data_filename = lockdir.replace('lockfiles', 'output') + '.npz'
os.makedirs(os.path.dirname(data_filename), exist_ok=True)
print("saving result in: %s" % data_filename)

# load dataset and classifier
val_transform = data.get_transform(args.domain, 'imval')
ensemble_transform = data.get_transform(args.domain, args.aug_type)
transform = ImageEnsemble(val_transform, ensemble_transform, args.n_ens)
print("Ensemble transform:")
print(ensemble_transform)
if 'celebahq' in args.domain:
    # for celebahq, load the attribute-specific dataset
    attribute = args.classifier_name.split('__')[0]
Esempio n. 11
0
        # histogram
        f, ax = plt.subplots(1, 1, figsize=(6, 4))
        ax.bar(range(1, len(labels) + 1), counts)
        ax.set_xticks(range(1, len(labels) + 1))
        ax.set_xticklabels(labels, rotation='vertical')
        ax.set_ylabel('count')
        f.savefig(os.path.join(cluster_dir, 'histogram.pdf'),
                  bbox_inches='tight')

        # write counts to file
        with open(os.path.join(cluster_dir, 'counts.txt'), 'w') as f:
            [f.write('%s\n' % line) for line in infos]

        # write random patch baseline to file
        infos = []
        for index, (k,v) in enumerate(sorted(clusters_baseline.items(), key=lambda item: item[1])[::-1]):
            infos.append('%d: %s, %d patches' % (index, k, v))
        with open(os.path.join(cluster_dir, 'baseline.txt'), 'w') as f:
            [f.write('%s\n' % line) for line in infos]


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Cluster patches using face segmentation.')
    parser.add_argument('path', type=str, help='path to precomputed top clusters')
    args = parser.parse_args()
    outpath = os.path.join(args.path, 'clusters')
    os.makedirs(outpath, exist_ok =True)
    pidfile.exit_if_job_done(outpath,redo=True)
    cluster(args, outpath)
    pidfile.mark_job_done(outpath)