def train(args): assets = AssetManager(args.base_dir) if not args.retrain: model_dir = assets.recreate_model_dir(args.model_name) tensorboard_dir = assets.recreate_tensorboard_dir(args.model_name) else: model_dir = assets.get_model_dir(args.model_name) tensorboard_dir = assets.get_tensorboard_dir(args.model_name) data = np.load(assets.get_preprocess_file_path(args.data_name)) imgs = data['imgs'].astype(np.float32) / 255.0 config = dict( img_shape=imgs.shape[1:], n_imgs=imgs.shape[0], n_classes=data['n_classes'].item(), ) config.update(base_config) lord = Lord(config) if args.retrain: lord.load(model_dir, latent=True, amortized=False) lord.config = config lord.train_latent(imgs=imgs, classes=data['classes'], model_dir=model_dir, tensorboard_dir=tensorboard_dir, retrain=args.retrain) lord.save(model_dir, latent=True, amortized=False)
def train(args): assets = AssetManager(args.base_dir) model_dir = assets.recreate_model_dir(args.model_name) tensorboard_dir = assets.recreate_tensorboard_dir(args.model_name) data = np.load(assets.get_preprocess_file_path(args.data_name)) imgs = data['imgs'].astype(np.float32) / 255.0 config = Config( img_shape=imgs.shape[1:], n_imgs=imgs.shape[0], n_classes=data['n_classes'].item(), content_dim=default_config['content_dim'], class_dim=default_config['class_dim'], content_std=default_config['content_std'], content_decay=default_config['content_decay'], n_adain_layers=default_config['n_adain_layers'], adain_dim=default_config['adain_dim'], perceptual_loss_layers=default_config['perceptual_loss']['layers'], perceptual_loss_weights=default_config['perceptual_loss']['weights'], perceptual_loss_scales=default_config['perceptual_loss']['scales']) lord = Lord.build(config) lord.train(imgs=imgs, classes=data['classes'], batch_size=default_config['train']['batch_size'], n_epochs=default_config['train']['n_epochs'], model_dir=model_dir, tensorboard_dir=tensorboard_dir) lord.save(model_dir)
def train(args): wandb.config.update(default_config) args_dict = vars(args) args_dict.pop('func') wandb.config.update(args_dict) assets = AssetManager(args.base_dir) model_dir = assets.recreate_model_dir(args.model_name) tensorboard_dir = assets.recreate_tensorboard_dir(args.model_name) data = np.load(assets.get_preprocess_file_path(args.data_name)) imgs, classes, contents, n_classes = data['imgs'], data['classes'], data['contents'], data['n_classes'] imgs = imgs.astype(np.float32) / 255.0 converter = Converter.build( img_shape=imgs.shape[1:], n_imgs=imgs.shape[0], n_classes=n_classes, content_dim=args.content_dim, class_dim=args.class_dim, content_std=args.content_std, content_decay=args.content_decay, n_adain_layers=default_config['n_adain_layers'], adain_enabled=args.adain, adain_dim=default_config['adain_dim'], adain_normalize=args.adain_normalize, perceptual_loss_layers=default_config['perceptual_loss']['layers'], perceptual_loss_weights=default_config['perceptual_loss']['weights'], perceptual_loss_scales=default_config['perceptual_loss']['scales'], ) converter.train( imgs=imgs, classes=classes, batch_size=default_config['train']['batch_size'], n_epochs=default_config['train']['n_epochs'], model_dir=model_dir, tensorboard_dir=tensorboard_dir ) converter.save(model_dir)
def train(args): assets = AssetManager(args.base_dir) model_dir = assets.recreate_model_dir(args.model_name) tensorboard_dir = assets.recreate_tensorboard_dir(args.model_name) data = np.load(assets.get_preprocess_file_path(args.data_name)) imgs = data['imgs'].astype(np.float32) / 255.0 config = dict( img_shape=imgs.shape[1:], n_imgs=imgs.shape[0], n_classes=data['n_classes'].item(), ) config.update(base_config) lord = Lord(config) lord.train(imgs=imgs, classes=data['classes'], model_dir=model_dir, tensorboard_dir=tensorboard_dir)