예제 #1
0
파일: gan.py 프로젝트: ywcmaike/OCFGAN
 def _build_model(self):
     """Initializes the generator and discriminator networks.
     """
     self.generator, self.discriminator = networks.build_networks(
         gen=self.gen_net,
         disc=self.disc_net,
         ngf=64,
         ndf=self.disc_size,
         imsize=self.imsize,
         nc=self.nc,
         k=self.noise_dim,
         z=self.dout_dim,
         bn=self.batch_norm)
     print('Generator', self.generator)
     print('Discriminator', self.discriminator)
     self.generator.apply(networks.weights_init)
     self.discriminator.apply(networks.weights_init)
     self.generator.cuda()
     self.discriminator.cuda()
     cudnn.benchmark = True
# Other options can change with every run
parser.add_argument('--batch_size', type=int, default=64, help='Batch size [default: 64]')
parser.add_argument('--fold', type=str, default='train', help='Fold [default: train]')
parser.add_argument('--start_epoch', type=int, help='Epoch to start from (defaults to most recent epoch)')
parser.add_argument('--count', type=int, default=1, help='Number of counterfactuals to generate')

options = vars(parser.parse_args())

sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from dataloader import CustomDataloader
import counterfactual
from networks import build_networks
from options import load_options


# TODO: Right now, to edit cf_speed et al, you need to edit params.json

start_epoch = options['start_epoch']
options = load_options(options)
options['epoch'] = start_epoch

dataloader = CustomDataloader(**options)

# Batch size must be large enough to make a square grid visual
options['batch_size'] = dataloader.num_classes + 1

networks = build_networks(dataloader.num_classes, **options)

for i in range(options['count']):
    counterfactual.generate_counterfactual(networks, dataloader, **options)
dataloader_train = torch.utils.data.DataLoader(
    train_dataset,
    batch_size=options['batch_size'],
    shuffle=False,
    num_workers=8,
    pin_memory=True,
    drop_last=False)

dataloader_off = torch.utils.data.DataLoader(dataset_off,
                                             batch_size=options['batch_size'],
                                             shuffle=False,
                                             num_workers=8,
                                             pin_memory=True,
                                             drop_last=False)

networks = build_networks(**options)

new_results = evaluate_with_comparison(networks,
                                       dataloader,
                                       comparison_dataloader=dataloader_off,
                                       dataloader_train=dataloader_train,
                                       **options)

pprint(new_results)
if not os.path.exists('evaluation'):
    os.mkdir('evaluation')
result = {options['mode']: new_results}
result_dir = os.path.join('evaluation',
                          'result_{}.json'.format(options['image_size']))
if os.path.exists(result_dir):
    old_result = json.load(open(result_dir))
예제 #4
0
                        help='Size of the image.')
    parser.add_argument(
        '--png',
        action='store_true',
        help='Whether to generate a png '
        '(overrides num_samples and generates a 8x8 grid of images).')
    parser.add_argument('--o',
                        '--out_dir',
                        type=str,
                        default='./',
                        help='Output directory.')

    args = parser.parse_args()

    generator, _ = networks.build_networks(gen=args.gen,
                                           imsize=args.imsize,
                                           k=args.k)
    generator.load_state_dict(torch.load(args.ckpt))
    print('[*] Generator loaded')
    if not os.path.exists(args.o):
        os.makedirs(args.o)
    generator.cuda()
    batch_size = 64 if args.png else 256
    noise = torch.cuda.FloatTensor(batch_size, args.k, 1, 1)
    n_loops = 1 if args.png else args.n // batch_size + 1
    samples = []
    for i in tqdm(range(n_loops)):
        noise.normal_(0, 1)
        with torch.no_grad():
            images = generator(noise).detach().cpu().numpy()
        if args.png:  # Generate 8x8 if png