def build_and_train(): (x_train, _), (x_test, _) = mnist.load_data() image_size = x_train.shape[1] x_train = np.reshape(x_train, [-1, image_size, image_size, 1]) x_train = x_train.astype('float32') / 255 input_shape = [image_size, image_size, 1] latent_size = 100 batch_size = 64 train_steps = 40000 lr = 2e-04 decay = 6e-08 optimizer = RMSprop(lr=lr, decay=decay) inputs = Input(shape=input_shape) discriminator = gan.discriminator(inputs, image_size, activation=None) discriminator.compile(loss='mse', optimizer=optimizer, metrics=['accuracy']) discriminator.summary() inputs = Input(shape=(latent_size, )) generator = gan.generator(inputs, image_size) generator.summary() discriminator.trainable = False adversarial = Model(inputs, discriminator(generator(inputs))) adversarial.compile(loss='mse', optimizer=optimizer, metrics=['accuracy']) adversarial.summary() models = (generator, discriminator, adversarial) model_name = 'lsgan_mnist' params = (batch_size, latent_size, train_steps, model_name) gan.train(models, x_train, params)
def run_expiriment(generator, discriminator, samplers, minibatch, sampler): d_optimizer = optim.Adam(discriminator.parameters(), lr=d_eta, betas=(0.9, 0.999)) g_optimizer = optim.Adam(generator.parameters(), lr=g_eta, betas=(0.9, 0.999)) gan.train((generator, 1, g_optimizer), (discriminator, 2, d_optimizer), samplers, minibatch, epochs, sampler)
def build_and_train_models(): (x_train, _), (_, _) = mnist.load_data() image_size = x_train.shape[1] x_train = np.reshape(x_train, [-1, image_size, image_size, 1]) x_train = x_train.astype('float32') / 255 model_name = "lsgan_mnist" latent_size = 100 input_shape = (image_size, image_size, 1) batch_size = 64 lr = 2e-4 decay = 6e-8 train_steps = 20000 inputs = Input(shape=input_shape, name='discriminator_input') discriminator = gan.discriminator(inputs, activation=None) optimizer = RMSprop(lr=lr, decay=decay) discriminator.compile(loss='mse', optimizer=optimizer, metrics=['accuracy']) discriminator.summary() input_shape = (latent_size, ) inputs = Input(shape=input_shape, name='z_input') generator = gan.generator(inputs, image_size) generator.summary() optimizer = RMSprop(lr=lr*0.5, decay=decay*0.5) discriminator.trainable = False adversarial = Model(inputs, discriminator(generator(inputs)), name=model_name) adversarial.compile(loss='mse', optimizer=optimizer, metrics=['accuracy']) adversarial.summary() models = (generator, discriminator, adversarial) params = (batch_size, latent_size, train_steps, model_name) gan.train(models, x_train, params)
# Read and normalize dataset according the model reader = readfile.read('telepathology1.csv') trainX, trainY, testX, testY = reader.get_data() # Define GAN parameters epochs = len(trainX) latent_dim = 1 feature_num = 1 batch_size = 1 # Build GAN gan = gan.GAN(latent_dim, feature_num) gan.load_data(trainX, trainY) # Train GAN start = time.time() generator = gan.train(epochs, batch_size) stop = time.time() print("Training time: %.2f s" % (stop - start)) # Test modello GAN + HMM y_pred = [] for i in range(len(testX)): temp_X = np.array(testX[i]) temp_X = np.array(temp_X).reshape(batch_size, latent_dim, feature_num) predictions = generator.predict(temp_X) y_pred.append(predictions[0][0]) y_pred = util.convert(y_pred) model = hmm.HmmScaled('init_model1.json')
help='After how many epochs should print results/save model') parser.add_argument('--sustains', type=list, default=[1.0, 0.7, 0.4], metavar='N', help='sustain levels') parser.add_argument( '--attacks', type=list, default=[0.0, 0.2, 0.4], metavar='N', help='attack levels (amount of sample that should be rising)') parser.add_argument( '--releases', type=list, default=[0.0, 0.2, 0.4], metavar='N', help='release levels (amount of sample that should be falling)') args = parser.parse_args() attacks = [int(a * args.seq_length) + 1 for a in args.attacks] releases = [int(a * args.seq_length) + 1 for a in args.releases] variables = [args.sustains, attacks, releases] gan.train(num_epoch=args.num_epoch, dsize=args.dsize, batch_size=args.batch_size, print_interval=args.print_interval, variables=variables, cf=args.conv_dim, seq_length=args.seq_length)
parser = argparse.ArgumentParser() parser.add_argument('--mode', type=str, default='train', help='train, evaluate') args = parser.parse_args() tl.global_flag['mode'] = args.mode if tl.global_flag['mode'] == 'train': # img_path = './VOCROOT/VOC2007/JPEGImages/' # img_name = os.listdir(img_path) # for name in img_name: # path = os.path.join(img_path, name) # label_list, img_list = ssd.ssd(path) gan.train() elif tl.global_flag['mode'] == 'evaluate': img_path = './VOCROOT/VOC2007TEST/JPEGImages/' img_name = os.listdir(img_path) for name in img_name: path = os.path.join(img_path, name) label_list, img_list = simple_ssd_demo.ssd_res(path) gan.evaluate(img_list, label_list) else: raise Exception("Unknow --mode") # if len(label_list) != len(img_list): # print("len(label_list) != len(img_list)")
""" Author : Byunghyun Ban Date : 2020.07.24. """ import data_reader import gan # 몇 에포크 만큼 학습을 시킬 것인지 결정합니다. EPOCHS = 100 # 예제 기본값은 100입니다. # 데이터를 읽어옵니다. dr = data_reader.DataReader() # GAN을 불러옵니다. # Generator generator = gan.make_generator() # Discriminator discriminator = gan.make_discriminator() # 인공신경망을 학습시킵니다. print("\n\n************ TRAINING START ************ ") gan.train(generator, discriminator, dr.train_dataset, EPOCHS) # GIF 애니메이션을 저장합니다. gan.gif_generation()
import gan import os STEP_SIZE_CRITIC = 0.00005 STEP_SIZE_G_C = 0.00005 BATCH_SIZE = 64 TOTAL_ITERATIONS = 10000 NOISE_SIZE = 100 C_UPDATES_PER_G_UPDATE = 10 CLIP_THRESHOLD = 0.01 ssc_list = [0.0001, 0.00005, 0.00001] ssgc_list = [0.0001, 0.00005, 0.00001] bs_list = [16, 32, 64, 128] ns_list = [10, 100, 1000] cupgu_list = [1, 5, 10, 25, 50] setting = 0 for ssc in ssc_list: for ssgc in ssgc_list: for bs in bs_list: for ns in ns_list: for cupgu in cupgu_list: try: os.stat("gan-output-" + str(setting)) except: os.mkdir("gan-output-" + str(setting)) gan.train(ssc, ssgc, bs, TOTAL_ITERATIONS, ns, cupgu, setting) setting = setting + 1
import gan import os STEP_SIZE_CRITIC = 0.000005 STEP_SIZE_G_C = 0.00002 BATCH_SIZE = 64 TOTAL_ITERATIONS = 10000 NOISE_SIZE = 1500 C_UPDATES_PER_G_UPDATE = 10 setting = "default" try: os.stat("gan-output-" + str(setting)) except: os.mkdir("gan-output-" + str(setting)) gan.train(STEP_SIZE_CRITIC, STEP_SIZE_G_C, BATCH_SIZE, TOTAL_ITERATIONS, NOISE_SIZE, C_UPDATES_PER_G_UPDATE, setting) setting = setting + 1
mask1 = df['Background_percentage'] < max_val mask2 = df['Background_percentage'] > min_val usable_images = df.loc[mask1 & mask2, 'Path'] image_processing.process_images(usable_images, images_for_training, change_mode_to='RGB') training_data = image_processing.get_data_from_images(images_for_training) # size of the latent space latent_dim = 100 # create the discriminator d_model = gan.define_discriminator((output_shape[1], output_shape[0], 3)) #d_model.summary() # create the generator g_model = gan.define_generator(latent_dim, output_shape=output_shape) g_model.summary() # create the gan gan_model = gan.define_gan(g_model, d_model) # train model gan.train(g_model, d_model, gan_model, training_data, latent_dim, gan_results, n_epochs=50, n_batch=128)