import re import glob import data import module from os.path import join,basename,exists from os import mkdir,makedirs import os os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"]="3" # ============================================================================== # = param = # ============================================================================== # py.arg('--dataset', default='MR2CT') py.arg('--datasets_dir', default='./datasets') py.arg('--size', type=int, default=256) # load image to this size # py.arg('--crop_size', type=int, default=256) # then crop to this size py.arg('--batch_size', type=int, default=2) py.arg('--epochs', type=int, default=225) py.arg('--epoch_decay', type=int, default=25) # epoch to start decaying learning rate py.arg('--lr', type=float, default=0.000001) py.arg('--beta_1', type=float, default=0.5) py.arg('--adversarial_loss_mode', default='gan', choices=['gan', 'lsgan']) # py.arg('--gradient_penalty_mode', default='none', choices=['none', 'dragan', 'wgan-gp']) py.arg('--gradient_penalty_weight', type=float, default=10.0) py.arg('--cycle_loss_weight', type=float, default=12.0) py.arg('--identity_loss_weight', type=float, default=0.5) py.arg('--output_dir',default='./Results') py.arg('--pool_size', type=int, default=50) # pool size to store fake samples args = py.args()
import numpy as np import pylib as py import tensorflow as tf import tensorflow.keras as keras import tf2lib as tl import tf2gan as gan import tqdm import data import module # ============================================================================== # = param = # ============================================================================== py.arg('--dataset', default='horse2zebra') py.arg('--output_index', default='') py.arg('--load_size', type=int, default=286) # load image to this size py.arg('--crop_size', type=int, default=256) # then crop to this size py.arg('--batch_size', type=int, default=1) py.arg('--epochs', type=int, default=200) py.arg('--epoch_decay', type=int, default=100) # epoch to start decaying learning rate py.arg('--lr', type=float, default=0.0002) py.arg('--beta_1', type=float, default=0.5) py.arg('--adversarial_loss_mode', default='lsgan', choices=['gan', 'hinge_v1', 'hinge_v2', 'lsgan', 'wgan']) py.arg('--gradient_penalty_mode', default='none', choices=['none', 'dragan', 'wgan-gp'])
format='%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s') # imports for running test.py (UniGAN app) import imlib as im import pylib as py import tensorflow as tf import tflib as tl import tqdm import data import module # configuration DEBUG = True os.environ["CUDA_PATH"] = "/usr/local/cuda" py.arg('--flask_path', default='/var/www/html/flaskapp_unigan') py.arg('--img_dir', default='./data/zappos_50k/images') py.arg('--test_label_path', default='./data/zappos_50k/test_label.txt') py.arg('--test_int', type=float, default=2) py.arg('--experiment_name', default='UniGAN_128') args_ = py.args() # output_dir output_dir = os.path.join(args_.flask_path, py.join('output', args_.experiment_name)) # save settings args = py.args_from_yaml(py.join(output_dir, 'settings.yml')) args.__dict__.update(args_.__dict__) # others
# default_att_names = ['ancient', 'barren', 'bent', 'blunt', 'bright', 'broken', 'browned', 'brushed', # 'burnt', 'caramelized', 'chipped', 'clean', 'clear', 'closed', 'cloudy', 'cluttered', 'coiled', # 'cooked', 'cored', 'cracked', 'creased', 'crinkled', 'crumpled', 'crushed', 'curved', 'cut', # 'damp', 'dark', 'deflated', 'dented', 'diced', 'dirty', 'draped', 'dry', 'dull', 'empty', # 'engraved', 'eroded', 'fallen', 'filled', 'foggy', 'folded', 'frayed', 'fresh', 'frozen', # 'full', 'grimy', 'heavy', 'huge', 'inflated', 'large', 'lightweight', 'loose', 'mashed', # 'melted', 'modern', 'moldy', 'molten', 'mossy', 'muddy', 'murky', 'narrow', 'new', 'old', # 'open', 'painted', 'peeled', 'pierced', 'pressed', 'pureed', 'raw', 'ripe', 'ripped', 'rough', # 'ruffled', 'runny', 'rusty', 'scratched', 'sharp', 'shattered', 'shiny', 'short', 'sliced', # 'small', 'smooth', 'spilled', 'splintered', 'squished', 'standing', 'steaming', 'straight', # 'sunny', 'tall', 'thawed', 'thick', 'thin', 'tight', 'tiny', 'toppled', 'torn', 'unpainted', # 'unripe', 'upright', 'verdant', 'viscous', 'weathered', 'wet', 'whipped', # 'wide', 'wilted', 'windblown', 'winding', 'worn', 'wrinkled', 'young'] py.arg('--att_names', choices=data.ATT_ID.keys(), nargs='+', default=default_att_names) py.arg('--img_dir', default='./data/CelebAMask-HQ/CelebA-HQ-img') py.arg('--train_label_path', default='./data/CelebAMask-HQ/train_label.txt') py.arg('--val_label_path', default='./data/CelebAMask-HQ/val_label.txt ') py.arg('--load_size', type=int, default=256) py.arg('--crop_size', type=int, default=256) py.arg('--n_epochs', type=int, default=60) py.arg('--epoch_start_decay', type=int, default=30) py.arg('--batch_size', type=int, default=1) py.arg('--learning_rate', type=float, default=2e-4) py.arg('--beta_1', type=float, default=0.5) py.arg('--model', default='model_256', choices=['model_128', 'model_256', 'model_384'])
import imlib as im import numpy as np import pylib as py import tensorflow as tf import tf2lib as tl import data import module # ============================================================================== # = param = # ============================================================================== py.arg('--experiment_dir') py.arg('--batch_size', type=int, default=32) test_args = py.args() args = py.args_from_yaml(py.join(test_args.experiment_dir, 'settings.yml')) args.__dict__.update(test_args.__dict__) # ============================================================================== # = test = # ============================================================================== # data A_img_paths_test = py.glob(py.join(args.datasets_dir, args.dataset, 'testA'), '*.jpg') B_img_paths_test = py.glob(py.join(args.datasets_dir, args.dataset, 'testB'), '*.jpg') A_dataset_test = data.make_dataset(A_img_paths_test, args.batch_size, args.load_size,
import tqdm import data import module # ============================================================================== # = param = # ============================================================================== default_att_names = [ 'Bald', 'Bangs', 'Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Bushy_Eyebrows', 'Eyeglasses', 'Male', 'Mouth_Slightly_Open', 'Mustache', 'No_Beard', 'Pale_Skin', 'Young' ] py.arg('--att_names', choices=data.ATT_ID.keys(), nargs='+', default=default_att_names) py.arg( '--img_dir', default= './data/img_celeba/aligned/align_size(572,572)_move(0.250,0.000)_face_factor(0.450)_jpg/data' ) py.arg('--train_label_path', default='./data/img_celeba/train_label.txt') py.arg('--val_label_path', default='./data/img_celeba/val_label.txt') py.arg('--load_size', type=int, default=143) py.arg('--crop_size', type=int, default=128) py.arg('--n_epochs', type=int, default=60) py.arg('--epoch_start_decay', type=int, default=30) py.arg('--batch_size', type=int, default=32)
import imlib as im import numpy as np import pylib as py import tensorflow as tf import tflib as tl import tqdm import data import module # ============================================================================== # = param = # ============================================================================== py.arg('--flask_path', default='/var/www/html/flaskapp_unigan') py.arg('--generator_pb', default='generator_unigan_gender_only_beta_0.5.pb') py.arg('--img_dir', default='./data/zappos_50k/images') py.arg('--test_label_path', default='./data/zappos_50k/test_label.txt') py.arg('--test_att_name', choices=data.ATT_ID.keys(), default='Women') py.arg('--test_int_min', type=float, default=-2) py.arg('--test_int_max', type=float, default=2) py.arg('--test_int_step', type=float, default=0.5) py.arg('--experiment_name', default='default') args_ = py.args() # output_dir output_dir = os.path.join(args_.flask_path, py.join('output', args_.experiment_name)) # output_dir = py.join('output', args_.experiment_name)
import imlib as im import numpy as np import pylib as py import tensorflow as tf import tflib as tl import tqdm import data import module # ============================================================================== # = param = # ============================================================================== py.arg('--flask_path', default='/var/www/html/flaskapp_unigan') py.arg('--img_dir', default='./data/zappos_50k/images') py.arg('--test_label_path', default='./data/zappos_50k/test_label.txt') py.arg('--test_att_names', choices=data.ATT_ID.keys(), nargs='+', default=['Men', 'Women']) py.arg('--test_ints', type=float, nargs='+', default=2) py.arg('--experiment_name', default='default') args_ = py.args() # output_dir output_dir = os.path.join(args_.flask_path, py.join('output', args_.experiment_name)) # output_dir = py.join('output', args_.experiment_name)
import tensorflow as tf import tensorflow.keras as keras import tf2lib as tl import tf2gan as gan from PIL import Image import tqdm import matplotlib.pyplot as plt import data import random import module import tensorflow_datasets as tfds # ============================================================================== # = param = # ============================================================================== '''py.arg('--dataset', default='summer2winter_yosemite') py.arg('--datasets_dir', default='dataset') py.arg('--load_size', type=int, default=256) # load image to this size py.arg('--crop_size', type=int, default=256) # then crop to this size py.arg('--batch_size', type=int, default=1) py.arg('--epochs', type=int, default=200) py.arg('--epoch_decay', type=int, default=100) # epoch to start decaying learning rate py.arg('--lr', type=float, default=0.0002) py.arg('--beta_1', type=float, default=0.5) py.arg('--adversarial_loss_mode', default='lsgan', choices=['gan', 'hinge_v1', 'hinge_v2', 'lsgan', 'wgan']) py.arg('--gradient_penalty_mode', default='none', choices=['none', 'dragan', 'wgan-gp']) py.arg('--gradient_penalty_weight', type=float, default=10.0) py.arg('--cycle_loss_weight', type=float, default=10.0) py.arg('--identity_loss_weight', type=float, default=0.0) py.arg('--pool_size', type=int, default=50) # pool size to store fake samples args = py.args()'''
import tqdm import data import module # ============================================================================== # = environ = # ============================================================================== os.environ["CUDA_PATH"] = "/usr/local/cuda" # ============================================================================== # = param = # ============================================================================== py.arg('--flask_path', default='/var/www/html/flaskapp') py.arg( '--img_dir', default= './data/img_celeba/aligned/align_size(572,572)_move(0.250,0.000)_face_factor(0.450)_jpg/data' ) py.arg('--test_label_path', default='./data/img_celeba/test_label.txt') py.arg('--test_int', type=float, default=2) py.arg('--experiment_name', default='default') args_ = py.args() # output_dir output_dir = os.path.join(args_.flask_path, py.join('output', args_.experiment_name))
import imlib as im import numpy as np import pylib as py import tensorflow as tf import tflib as tl import tqdm import data import module # ============================================================================== # = param = # ============================================================================== py.arg('--img_dir', default='./data/zappos_50k/images') py.arg('--test_label_path', default='./data/zappos_50k/test_label.txt') py.arg('--test_int', type=float, default=2) py.arg('--experiment_name', default='default') args_ = py.args() # output_dir output_dir = py.join('output', args_.experiment_name) # save settings args = py.args_from_yaml(py.join(output_dir, 'settings.yml')) args.__dict__.update(args_.__dict__) # others n_atts = len(args.att_names)
import tensorflow as tf import Utils as utils import cv2 import data import module from os.path import join,exists,basename from os import makedirs,mkdir import os def normalize(arr,eps=0.000001): return 2*((arr-np.min(arr))/(np.max(arr)-np.min(arr)+eps))-1 os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"]="0" # ============================================================================== # = param = # ============================================================================== py.arg('--datasets_dir', default='./datasets') py.arg('--experiment_dir',default='./Results') py.arg('--batch_size', type=int, default=1) test_args = py.args() args = py.args_from_yaml(join(test_args.experiment_dir, 'settings.yml')) args.__dict__.update(test_args.__dict__) # ============================================================================== # = test = # ============================================================================== # data A_img_paths_test = glob.glob(join(args.datasets_dir, 'MRI_test', '*.png')) # print(len(A_img_paths_test)) # B_img_paths_test = py.glob(py.join(args.datasets_dir, args.dataset, 'CT_test'), '*.png')
import pylib as py import imlib as im import torch import numpy as np import torchlib import module import data py.arg( '--dataset', default='fashion_mnist', choices=['cifar10', 'fashion_mnist', 'mnist', 'celeba', 'anime', 'custom']) # py.arg('--experiment_name', required=True) py.arg('--checkpoint_name', default="Epoch_inter(50).ckpt") py.arg('--z_dim', type=int, default=128) py.arg('--num_samples', type=int, required=True) py.arg('--batch_size', type=int, default=1) py.arg('--experiment_names', nargs='+', type=str) py.arg('--output_dir', type=str, default='generated_imgs') args = py.args() print(args.experiment_names) experiment_names = args.experiment_names use_gpu = torch.cuda.is_available() device = torch.device("cuda" if use_gpu else "cpu") torch.manual_seed(0) if args.dataset in ['cifar10', 'fashion_mnist', 'mnist', 'imagenet']: # 32x32 output_channels = 3
import imlib as im import numpy as np import pylib as py import tensorflow as tf import tflib as tl import tqdm import data import module # ============================================================================== # = param = # ============================================================================== py.arg('--img_dir', default='./data/zappos_50k/images') py.arg('--test_label_path', default='./data/zappos_50k/test_label.txt') py.arg('--test_att_names', choices=data.ATT_ID.keys(), nargs='+', default=['Unisex', 'Athletics']) py.arg('--experiment_name', default='default') args_ = py.args() # output_dir output_dir = py.join('output', args_.experiment_name) # save settings args = py.args_from_yaml(py.join(output_dir, 'settings.yml')) args.__dict__.update(args_.__dict__)
import pylib as py import imlib as im import torch import numpy as np import torchlib from torchvision import datasets, transforms from torch.utils.data import DataLoader, Dataset, sampler import torchvision.utils as vutils import os import module import data py.arg( '--dataset', default='fashion_mnist', choices=['cifar10', 'fashion_mnist', 'mnist', 'celeba', 'anime', 'custom']) py.arg('--out_dir', required=True) py.arg('--num_samples_per_class', type=int, required=True) py.arg('--batch_size', type=int, default=1) py.arg('--num', type=int, default=-1) py.arg('--output_dir', type=str, default='generated_imgs') args = py.args() use_gpu = torch.cuda.is_available() device = torch.device("cuda" if use_gpu else "cpu") py.mkdir(args.out_dir) transform = transforms.Compose([
import pylib as py import tensorflow as tf import tflib as tl import module from tensorflow.python.framework import graph_util # ============================================================================== # = param = # ============================================================================== py.arg('--experiment_name', default='default') args_ = py.args() # output_dir output_dir = py.join('output', args_.experiment_name) # save settings args = py.args_from_yaml(py.join(output_dir, 'settings.yml')) args.__dict__.update(args_.__dict__) # others n_atts = len(args.att_names) sess = tl.session() sess.__enter__() # make default # ==============================================================================
import imlib as im import numpy as np import pylib as py import tensorflow as tf import tflib as tl import tqdm import data import module # ============================================================================== # = param = # ============================================================================== py.arg('--img_dir', default='./data/zappos_50k/images') py.arg('--test_label_path', default='./data/zappos_50k/test_label.txt') py.arg('--test_att_name', choices=data.ATT_ID.keys(), default='Unisex') py.arg('--test_int_min', type=float, default=-2) py.arg('--test_int_max', type=float, default=2) py.arg('--test_int_step', type=float, default=0.5) py.arg('--experiment_name', default='default') args_ = py.args() # output_dir output_dir = py.join('output', args_.experiment_name) # save settings args = py.args_from_yaml(py.join(output_dir, 'settings.yml')) args.__dict__.update(args_.__dict__)
import numpy as np import pylib as py import tensorflow as tf import tensorflow.keras as keras import tf2lib as tl import tf2gan as gan import tqdm import data import module # ============================================================================== # = param = # ============================================================================== py.arg('--datasets_dir', default='datasets') py.arg('--output_dir', default='output') py.arg('--load_size', type=int, default=286) # load image to this size py.arg('--crop_size', type=int, default=256) # then crop to this size py.arg('--batch_size', type=int, default=1) py.arg('--epochs', type=int, default=200) py.arg('--epoch_decay', type=int, default=100) # epoch to start decaying learning rate py.arg('--lr', type=float, default=0.0002) py.arg('--beta_1', type=float, default=0.5) py.arg('--norm_type', default='instance_norm', choices=['none', 'batch_norm', 'instance_norm', 'layer_norm']) py.arg('--adversarial_loss_mode', default='lsgan', choices=['gan', 'hinge_v1', 'hinge_v2', 'lsgan', 'wgan'])
def weights_init(m): classname = m.__class__.__name__ if classname.find('ConvTranspose') != -1: m.weight.data.normal_(0.0, 0.02) elif classname.find('Conv2d') != -1: m.weight.data.normal_(0.0, 0.02) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) # ============================================================================== # = param = # ============================================================================== # command line py.arg('--dataset', default='fashion_mnist', choices=['cifar10', 'fashion_mnist', 'mnist', 'celeba', 'anime', 'custom', 'imagenet']) py.arg('--batch_size', type=int, default=256) py.arg('--epochs', type=int, default=100) py.arg('--lr', type=float, default=0.0002) py.arg('--beta_1', type=float, default=0.5) py.arg('--n_d', type=int, default=1) # # d updates per g update py.arg('--z_dim', type=int, default=128) py.arg('--adversarial_loss_mode', default='gan', choices=['gan', 'hinge_v1', 'hinge_v2', 'lsgan', 'wgan']) py.arg('--gradient_penalty_mode', default='none', choices=['none', '1-gp', '0-gp', 'lp']) py.arg('--gradient_penalty_sample_mode', default='line', choices=['line', 'real', 'fake', 'dragan']) py.arg('--gradient_penalty_weight', type=float, default=10.0) py.arg('--experiment_name', default='none') py.arg('--gradient_penalty_d_norm', default='layer_norm',
# CUDA_VISIBLE_DEVICES=0 python3 train.py --dataset=custom --custom_dataroot ./data/car_renderings_baseline --updown_sampling 6 --im_size 256 --epoch=200 --adversarial_loss_mode=gan --batch_size 150 --experiment_name car_dcgan # CUDA_VISIBLE_DEVICES=1 python3 train.py --dataset=custom --custom_dataroot ./data/car_renderings_baseline --updown_sampling 6 --im_size 256 --epoch=200 --adversarial_loss_mode=lsgan --batch_size 150 --experiment_name car_lsgan # CUDA_VISIBLE_DEVICES=2 python3 train.py --dataset=custom --custom_dataroot ./data/car_renderings_baseline --updown_sampling 6 --im_size 256 --epoch=200 --adversarial_loss_mode=wgan --batch_size 110 --gradient_penalty_d_norm instance_norm --gradient_penalty_mode 1-gp --n_d 5 --experiment_name car_wgangp # CUDA_VISIBLE_DEVICES=3 python3 train.py --dataset=custom --custom_dataroot ./data/chair_renderings_baseline --updown_sampling 6 --im_size 256 --epoch=400 --adversarial_loss_mode=gan --batch_size 150 --experiment_name chair_dcgan # CUDA_VISIBLE_DEVICES=4 python3 train.py --dataset=custom --custom_dataroot ./data/chair_renderings_baseline --updown_sampling 6 --im_size 256 --epoch=400 --adversarial_loss_mode=lsgan --batch_size 150 --experiment_name chair_lsgan # CUDA_VISIBLE_DEVICES=5 python3 train.py --dataset=custom --custom_dataroot ./data/chair_renderings_baseline --updown_sampling 6 --im_size 256 --epoch=400 --adversarial_loss_mode=wgan --batch_size 110 --gradient_penalty_d_norm instance_norm --gradient_penalty_mode 1-gp --n_d 5 --experiment_name chair_wgangp # ============================================================================== # = param = # ============================================================================== # command line py.arg( '--dataset', default='fashion_mnist', choices=['cifar10', 'fashion_mnist', 'mnist', 'celeba', 'anime', 'custom']) py.arg('--custom_dataroot', default='./data', help='the data root in custom dataset mode') py.arg('--updown_sampling', type=int, default=6, help='3 for 32x32, 4 for 64x64, etc') py.arg('--im_size', type=int, default=256, help='image size') py.arg('--batch_size', type=int, default=64) py.arg('--epochs', type=int, default=25) py.arg('--lr', type=float, default=0.0002) py.arg('--beta_1', type=float, default=0.5) py.arg('--n_d', type=int, default=1) # # d updates per g update py.arg('--z_dim', type=int, default=128)
import numpy as np import pylib as py import tensorflow as tf import tflib as tl import tqdm import data import module # ============================================================================== # = param = # ============================================================================== py.arg( '--img_dir', default= './data/img_celeba/aligned/align_size(572,572)_move(0.250,0.000)_face_factor(0.450)_jpg/data' ) py.arg('--test_label_path', default='./data/img_celeba/test_label.txt') py.arg('--test_att_name', choices=data.ATT_ID.keys(), default='Pale_Skin') py.arg('--test_int_min', type=float, default=-2) py.arg('--test_int_max', type=float, default=2) py.arg('--test_int_step', type=float, default=0.5) py.arg('--experiment_name', default='default') args_ = py.args() # output_dir output_dir = py.join('output', args_.experiment_name) # save settings
import model import data_generator import os import pylib as py import json #Add code for last entry data file. (For systematic number of LOGS and CHECKPOINTS. #Add code for parsing through JSON files to find already trained code. #py.arg('--prev_train_data', default = "./Previous Train Data") py.arg('--dataset', default="./Data") py.arg('--batch_size', type=int, default=4) py.arg('--epochs', type=int, default=100) py.arg('--lr', type=float, default=0.002) py.arg('--image_size', type=int, default=256) py.arg('--n_channels', type=int, default=3) py.arg('--shuffle_data', type=bool, default=True) py.arg('--bottleneck_size', type=int, default=None) py.arg('--loss_weight', type=float, default=0.8) py.arg('--checkpoint_dir', default=None) py.arg('--tensorboard_dir', default="./logs") py.arg('--prev_checkpoint', default=None) py.arg('--prev_tensorboard', default=None) args = py.args() ##Finding previously trained model. [CODE NOT COMPLETED] #json_directory = "args.prev_train_data" #json_list = os.listdir(json_directory) #for json_file in json_list: # with open(os.path.join(json_directory, json_file)) as f: # metadata = json.load(f)
config = ConfigProto() config.gpu_options.allow_growth = True session = InteractiveSession(config=config) #os.environ["CUDA_VISIBLE_DEVICES"] = '2,3' tf.config.experimental.set_lms_enabled(True) #neptune.set_project('Serre-Lab/paleo-ai') #GPUS to be used GPU = [2, 3] # ============================================================================== # = param = # ============================================================================== py.arg( '--outdir', default= '/users/irodri15/data/irodri15/Fossils/Experiments/cyclegan/checkpoints/') py.arg( '--train_datasetA', default= '/users/irodri15/data/irodri15/Fossils/Experiments/datasets/gan_fossils_leaves_v1/fossils_train_oscar_processed.csv' ) py.arg( '--train_datasetB', default= '/users/irodri15/data/irodri15/Fossils/Experiments/datasets/gan_fossils_leaves_v1/leaves_train_oscar_processed.csv' ) py.arg( '--test_datasetA', default= '/users/irodri15/data/irodri15/Fossils/Experiments/datasets/gan_fossils_leaves_v1/fossils_test_oscar_processed.csv'
import pylib as py import tensorflow as tf import tensorflow.keras as keras import tf2gan as gan import tf2lib as tl import tqdm import matplotlib.pyplot as plt import sys import numpy as np # ============================================================================== # = param = # ============================================================================== # command line py.arg('--dataset', default='PETCT', choices=['PETCT', 'cifar10', 'fashion_mnist', 'mnist', 'celeba', 'anime', 'custom']) py.arg('--batch_size', type=int, default=5) py.arg('--epochs', type=int, default=25) py.arg('--lr', type=float, default=0.0002) py.arg('--beta_1', type=float, default=0.5) py.arg('--n_d', type=int, default=1) # # d updates per g update py.arg('--PETCT_dim', type=int, default=128) py.arg('--adversarial_loss_mode', default='wgan', choices=['gan', 'hinge_v1', 'hinge_v2', 'lsgan', 'wgan']) py.arg('--gradient_penalty_mode', default='none', choices=['none', 'dragan', 'wgan-gp']) py.arg('--gradient_penalty_weight', type=float, default=10.0) py.arg('--experiment_name', default='PETCT') py.arg('--data_rate', type=float, default=0.0) py.arg('--training_mode', type=int, default=0) py.arg('--padding', default='same', choices=['same', 'valid', 'full']) py.arg('--input_size',type=int, default=256) args = py.args()
except RuntimeError as e: print(e) import tensorflow.keras as keras import tf2lib as tl import tf2gan as gan import tqdm import data import module # ============================================================================== # = param = # ============================================================================== # python train.py --dataset horse2zebra --epochs 10 --batch_size 500 py.arg('--dataset', default='horse2zebra') py.arg('--datasets_dir', default='datasets') py.arg('--load_size', type=int, default=286) # load image to this size py.arg('--crop_size', type=int, default=256) # then crop to this size py.arg('--batch_size', type=int, default=1000) py.arg('--epochs', type=int, default=2) py.arg('--epoch_decay', type=int, default=100) # epoch to start decaying learning rate py.arg('--lr', type=float, default=0.0002) py.arg('--beta_1', type=float, default=0.5) py.arg('--adversarial_loss_mode', default='lsgan', choices=['gan', 'hinge_v1', 'hinge_v2', 'lsgan', 'wgan']) py.arg('--gradient_penalty_mode', default='none', choices=['none', 'dragan', 'wgan-gp'])
import numpy as np import pylib as py import tensorflow as tf import tensorflow.keras as keras import tf2lib as tl import tf2gan as gan import tqdm import data import module # ============================================================================== # = param = # ============================================================================== py.arg("--dataset", default="horse2zebra") py.arg("--output_dir", default="horse2zebra") py.arg("--datasets_dir", default="datasets") py.arg("--load_size", type=int, default=286) # load image to this size py.arg("--crop_size", type=int, default=256) # then crop to this size py.arg("--channels", type=int, default=1) # 3 for RGB, 1 for grayscale py.arg("--batch_size", type=int, default=1) py.arg("--epochs", type=int, default=100) py.arg("--epoch_decay", type=int, default=50) # epoch to start decaying learning rate py.arg("--lr", type=float, default=0.0002) py.arg("--beta_1", type=float, default=0.5) py.arg( "--adversarial_loss_mode", default="lsgan", choices=["gan", "hinge_v1", "hinge_v2", "lsgan", "wgan"],
import imlib as im import module import pylib as py import tensorflow as tf import tensorflow.keras as keras import tf2gan as gan import tf2lib as tl import tqdm import matplotlib.pyplot as plt # ============================================================================== # = param = # ============================================================================== # command line py.arg('--dataset', default='fashion_mnist', choices=['cifar10', 'fashion_mnist', 'mnist', 'celeba', 'anime', 'custom']) py.arg('--batch_size', type=int, default=64) py.arg('--epochs', type=int, default=25) py.arg('--lr', type=float, default=0.0002) py.arg('--beta_1', type=float, default=0.5) py.arg('--n_d', type=int, default=1) # # d updates per g update py.arg('--z_dim', type=int, default=128) py.arg('--adversarial_loss_mode', default='gan', choices=['gan', 'hinge_v1', 'hinge_v2', 'lsgan', 'wgan']) py.arg('--gradient_penalty_mode', default='none', choices=['none', 'dragan', 'wgan-gp']) py.arg('--gradient_penalty_weight', type=float, default=10.0) py.arg('--experiment_name', default='none') args = py.args() # output_dir if args.experiment_name == 'none': args.experiment_name = '%s_%s' % (args.dataset, args.adversarial_loss_mode)
import os import imlib as im import numpy as np import pylib as py import scipy import tensorflow as tf import tflib as tl import module # ============================================================================== # = param = # ============================================================================== py.arg('--n_traversal', type=int, default=100) py.arg('--n_traversal_point', type=int, default=17) py.arg('--truncation_threshold', type=float, default=1.5) py.arg('--experiment_name', default='default') args_ = py.args() # output_dir output_dir = py.join('output', args_.experiment_name) # save settings args = py.args_from_yaml(py.join(output_dir, 'settings.yml')) args.__dict__.update(args_.__dict__) sess = tl.session()
import numpy as np import pylib as py import tensorflow as tf import tflib as tl import tqdm import data import module # ============================================================================== # = param = # ============================================================================== py.arg( '--img_dir', default= './data/img_celeba/aligned/align_size(572,572)_move(0.250,0.000)_face_factor(0.450)_jpg/data' ) py.arg('--test_label_path', default='./data/img_celeba/test_label.txt') py.arg('--test_att_names', choices=data.ATT_ID.keys(), nargs='+', default=['Bangs', 'Mustache']) py.arg('--test_ints', type=float, nargs='+', default=2) py.arg('--experiment_name', default='default') args_ = py.args() # output_dir output_dir = py.join('output', args_.experiment_name)
import numpy as np import pylib as py import tensorflow as tf import tflib as tl import tqdm import data import module # ============================================================================== # = param = # ============================================================================== py.arg( '--img_dir', default= './data/img_celeba/aligned/align_size(572,572)_move(0.250,0.000)_face_factor(0.450)_jpg/data' ) py.arg('--test_label_path', default='./data/img_celeba/test_label.txt') py.arg('--with_mask', default=False) py.arg('--test_int', type=float, default=2) py.arg('--experiment_name', default='default') args_ = py.args() # output_dir output_dir = py.join('output', args_.experiment_name) # save settings args = py.args_from_yaml(py.join(output_dir, 'settings.yml')) args.__dict__.update(args_.__dict__)