def run(): parser = argparse.ArgumentParser(description="ALAE prepare SVHN") parser.add_argument( "--config-file", default="configs/svhn.yaml", metavar="FILE", help="path to config file", type=str, ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() cfg = get_cfg_defaults() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() logger = logging.getLogger("logger") logger.setLevel(logging.DEBUG) output_dir = cfg.OUTPUT_DIR os.makedirs(output_dir, exist_ok=True) ch = logging.StreamHandler(stream=sys.stdout) ch.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s") ch.setFormatter(formatter) logger.addHandler(ch) logger.info(args) logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, "r") as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) random.seed(0) os.makedirs("SVHN", exist_ok=True) train = list(SVHN('.', split='train', download=True)) test = list(SVHN('.', split='test', download=True)) random.shuffle(train) svhn_images = np.stack([np.transpose(x[0], (2, 0, 1)) for x in train]) svhn_labels = np.stack([x[1] for x in train]) prepare_mnist(cfg, logger, svhn_images, svhn_labels, train=True) svhn_images = np.stack([np.transpose(x[0], (2, 0, 1)) for x in test]) svhn_labels = np.stack([x[1] for x in test]) prepare_mnist(cfg, logger, svhn_images, svhn_labels, train=False)
def run(): parser = argparse.ArgumentParser( description="ALAE. Split FFHQ into parts for training and testing") parser.add_argument( "--config-file", default="configs/ffhq.yaml", metavar="FILE", help="path to config file", type=str, ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() cfg = get_cfg_defaults() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() logger = logging.getLogger("logger") logger.setLevel(logging.DEBUG) output_dir = cfg.OUTPUT_DIR os.makedirs(output_dir, exist_ok=True) ch = logging.StreamHandler(stream=sys.stdout) ch.setLevel(logging.DEBUG) formatter = logging.Formatter( "%(asctime)s %(name)s %(levelname)s: %(message)s") ch.setFormatter(formatter) logger.addHandler(ch) fh = logging.FileHandler(os.path.join(output_dir, 'log.txt')) fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) logger.addHandler(fh) logger.info(args) logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, "r") as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) split_tfrecord(cfg, logger)
def run(): parser = argparse.ArgumentParser( description="Adversarial, hierarchical style VAE") parser.add_argument( "--config-file", default="configs/celeba-hq256.yaml", metavar="FILE", help="path to config file", type=str, ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() cfg = get_cfg_defaults() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() logger = logging.getLogger("logger") logger.setLevel(logging.DEBUG) output_dir = cfg.OUTPUT_DIR os.makedirs(output_dir, exist_ok=True) ch = logging.StreamHandler(stream=sys.stdout) ch.setLevel(logging.DEBUG) formatter = logging.Formatter( "%(asctime)s %(name)s %(levelname)s: %(message)s") ch.setFormatter(formatter) logger.addHandler(ch) logger.info(args) logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, "r") as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) prepare_celeba(cfg, logger, True) prepare_celeba(cfg, logger, False)
def train_net(gpu_id, args): torch.cuda.set_device(0) cfg = get_cfg_defaults() cfg.merge_from_file(args.config_file) cfg.freeze() logger = logging.getLogger("logger") logger.setLevel(logging.DEBUG) output_dir = cfg.OUTPUT_DIR os.makedirs(output_dir, exist_ok=True) ch = logging.StreamHandler(stream=sys.stdout) #输出到命令行 ch.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s") ch.setFormatter(formatter) logger.addHandler(ch) fh = logging.FileHandler(os.path.join(output_dir, 'log.txt')) #输出到文件 fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) logger.addHandler(fh) logger.info(args) logger.info("Using {} GPUs".format(1)) logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, "r") as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) torch.set_default_tensor_type('torch.cuda.FloatTensor') train(cfg, logger)
lod_for_saving_model = lod2batch.lod lod2batch.step() if local_rank == 0: if lod2batch.is_time_to_save(): checkpointer.save("model_tmp_intermediate_lod%d" % lod_for_saving_model) if lod2batch.is_time_to_report(): save_sample(lod2batch, tracker, sample, samplez, x, logger, model_s, cfg, encoder_optimizer, decoder_optimizer) scheduler.step() if local_rank == 0: checkpointer.save("model_tmp_lod%d" % lod_for_saving_model) save_sample(lod2batch, tracker, sample, samplez, x, logger, model_s, cfg, encoder_optimizer, decoder_optimizer) logger.info("Training finish!... save training results") if local_rank == 0: checkpointer.save("model_final").wait() if __name__ == "__main__": gpu_count = torch.cuda.device_count() run(train, get_cfg_defaults(), description='StyleGAN', default_config='configs/experiment_celeba_sep.yaml', world_size=gpu_count)
with torch.no_grad(): for filename in paths: img = np.asarray(Image.open(path + '/' + filename)) if img.shape[2] == 4: img = img[:, :, :3] im = img.transpose((2, 0, 1)) x = torch.tensor(np.asarray(im, dtype=np.float32), device='cpu', requires_grad=True).cuda() / 127.5 - 1. if x.shape[0] == 4: x = x[:3] latents = encode(x[None, ...].cuda()) f = decode(latents) r = torch.cat([x[None, ...].detach().cpu(), f.detach().cpu()], dim=3) canvas.append(r) return canvas canvas = make(paths[:40]) canvas = torch.cat(canvas, dim=0) save_image(canvas * 0.5 + 0.5, 'make_figures/output/reconstructions_bed_1.png', nrow=4, pad_value=1.0) canvas = make(paths[40:80]) canvas = torch.cat(canvas, dim=0) save_image(canvas * 0.5 + 0.5, 'make_figures/output/reconstructions_bed_2.png', nrow=4, pad_value=1.0) if __name__ == "__main__": gpu_count = 1 run(sample, get_cfg_defaults(), description='ALAE-reconstruction-bedroom', default_config='configs/bedroom.yaml', world_size=gpu_count, write_log=False)
def build_configuration(config_type='celeba', configs_dir=None): cfg = get_cfg_defaults() filename = os.path.join('configs' if not configs_dir else configs_dir, f'{config_type}.yaml') cfg.merge_from_file(filename) return cfg
'generator_s': decoder, 'mapping_tl_s': mapping_tl, 'mapping_fl_s': mapping_fl, 'dlatent_avg': dlatent_avg } checkpointer = Checkpointer(cfg, model_dict, {}, logger=logger, save=False) checkpointer.load() model.eval() layer_count = cfg.MODEL.LAYER_COUNT decoder = nn.DataParallel(decoder) im_size = 2 ** (cfg.MODEL.LAYER_COUNT + 1) with torch.no_grad(): draw_uncurated_result_figure(cfg, 'make_figures/output/%s/generations.jpg' % cfg.NAME, model, cx=0, cy=0, cw=im_size, ch=im_size, rows=6, lods=[0, 0, 0, 1, 1, 2], seed=5) if __name__ == "__main__": gpu_count = 1 run(sample, get_cfg_defaults(), description='ALAE-generations', default_config='configs/ffhq.yaml', world_size=gpu_count, write_log=False)
betta = 0.5 ** (lod2batch.get_batch_size() / (10 * 1000.0)) model_s.lerp(model, betta) epoch_end_time = time.time() per_epoch_ptime = epoch_end_time - epoch_start_time lod_for_saving_model = lod2batch.lod lod2batch.step() if local_rank == 0: if lod2batch.is_time_to_save(): checkpointer.save("model_tmp_intermediate_lod%d" % lod_for_saving_model) if lod2batch.is_time_to_report(): save_sample(lod2batch, tracker, sample, samplez, x, logger, model_s, cfg, encoder_optimizer, decoder_optimizer) scheduler.step() if local_rank == 0: checkpointer.save("model_tmp_lod%d" % lod_for_saving_model) save_sample(lod2batch, tracker, sample, samplez, x, logger, model_s, cfg, encoder_optimizer, decoder_optimizer) logger.info("Training finish!... save training results") if local_rank == 0: checkpointer.save("model_final").wait() if __name__ == "__main__": gpu_count = torch.cuda.device_count() run(train, get_cfg_defaults(), description='StyleGAN', default_config='configs/ffhq.yaml', world_size=gpu_count)
i * initial_resolution + height_padding + y * (res + current_padding)) place(canvas, imr, current_padding + res + x * (2 * res + current_padding), i * initial_resolution + height_padding + y * (res + current_padding)) except IndexError: return canvas height_padding += initial_padding * 2 current_padding -= padding_step padd += padding_step return canvas canvas = [make_part(current_padding, src0, rec0), make_part(current_padding, src1, rec1), make_part(current_padding, src2, rec2), make_part(current_padding, src3, rec3)] canvas = np.concatenate(canvas, axis=2) print('Saving image') save_path = 'make_figures/output/%s/reconstructions_multiresolution.png' % cfg.NAME os.makedirs(os.path.dirname(save_path), exist_ok=True) save_image(torch.Tensor(canvas), save_path) if __name__ == "__main__": gpu_count = 1 run(sample, get_cfg_defaults(), description='ALAE-reconstruction_figure', default_config='configs/ffhq.yaml', world_size=gpu_count, write_log=False)
for v, label in zip(attribute_values, labels): bimpy.slider_float(label, v, -40.0, 40.0) bimpy.checkbox("Randomize noise", randomize) if randomize.value: seed += 1 torch.manual_seed(seed) if bimpy.button('Next'): latents, latents_original, img_src = loadNext() display_original = True if bimpy.button('Display Reconstruction'): display_original = False if bimpy.button('Generate random'): latents, latents_original, img_src = loadRandom() display_original = False if bimpy.input_text("Current file", current_file, 64) and os.path.exists(path + '/' + current_file.value): paths.insert(0, current_file.value) latents, latents_original, img_src = loadNext() bimpy.end() if __name__ == "__main__": gpu_count = 1 run(sample, get_cfg_defaults(), description='ALAE-interactive', default_config='configs/ffhq.yaml', world_size=gpu_count, write_log=False)