def my_train(args): # create output finder if not os.path.exists(os.path.expanduser(args.datasetpath1)): os.mkdir(findername) # create figures if not os.path.exists('./figures'): os.mkdir('./figures') # load test data procImage_val, rawImage_val = my_load_data_test(args.datasetpath1) print('procImage_val : ', procImage_val.shape) print('rawImage_val : ', rawImage_val.shape) img_shape = rawImage_val.shape[-3:] #rawImage.shape[-3:] print('img_shape : ', img_shape) disc_img_shape = (args.patch_size, args.patch_size, procImage_val.shape[-1]) print('disc_img_shape : ', disc_img_shape) opt_discriminator = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08) # load generator model generator_model = models.my_load_generator(img_shape, disc_img_shape) generator_model.load_weights( 'params_generator_pix_epoch_160.hdf5') #160-akb kao generator_model.compile(loss='mae', optimizer=opt_discriminator) generator_model.trainable = False for j in range(0, 5): idx = [1 + 5 * j, 2 + 5 * j, 3 + 5 * j, 4 + 5 * j, 5 + 5 * j ] #np.random.choice(procImage_val.shape[0], args.batch_size) print("j=", j, "idx=", idx) X_gen_target, X_gen = procImage_val[idx], rawImage_val[idx] plot_generated_batch(X_gen_target, X_gen, generator_model, args.batch_size, "validation" + str(j))
def my_train(args): # create output finder if not os.path.exists(os.path.expanduser(args.datasetpath00)): os.mkdir(findername) # create figures if not os.path.exists('./figures'): os.mkdir('./figures') #ultiN=3 # load data procImage0, rawImage0 = my_load_data_train(args.datasetpath00) procImage_val0, rawImage_val0 = my_load_data_test(args.datasetpath10) procImage1, rawImage1 = my_load_data_train(args.datasetpath01) procImage_val1, rawImage_val1 = my_load_data_test(args.datasetpath11) procImage2, rawImage2 = my_load_data_train(args.datasetpath02) procImage_val2, rawImage_val2 = my_load_data_test(args.datasetpath12) procImage3, rawImage3 = my_load_data_train(args.datasetpath03) procImage_val3, rawImage_val3 = my_load_data_test(args.datasetpath13) procImage4, rawImage4 = my_load_data_train(args.datasetpath04) procImage_val4, rawImage_val4 = my_load_data_test(args.datasetpath14) procImage5, rawImage5 = my_load_data_train(args.datasetpath05) procImage_val5, rawImage_val5 = my_load_data_test(args.datasetpath15) print('procImage.shape : ', procImage0.shape) print('rawImage.shape : ', rawImage0.shape) print('procImage_val : ', procImage_val0.shape) print('rawImage_val : ', rawImage_val0.shape) img_shape = rawImage0.shape[-3:] print('img_shape : ', img_shape) patch_num = (img_shape[0] // args.patch_size) * (img_shape[1] // args.patch_size) disc_img_shape = (args.patch_size, args.patch_size, procImage0.shape[-1]) print('disc_img_shape : ', disc_img_shape) # train opt_dcgan = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08) opt_discriminator = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08) # load generator model generator_model = models.my_load_generator(img_shape, disc_img_shape) #generator_model.load_weights('params_generator6_pix_epoch_4800.hdf5') # load discriminator model discriminator_model = models.my_load_DCGAN_discriminator( img_shape, disc_img_shape, patch_num) #discriminator_model.load_weights('params_discriminator6_pix_epoch_4800.hdf5') #loss='mae' generator_model.compile(loss='mae', optimizer=opt_discriminator) discriminator_model.trainable = False DCGAN_model = models.my_load_DCGAN(generator_model, discriminator_model, img_shape, args.patch_size) loss = [l1l2_loss, 'binary_crossentropy'] loss_weights = [1E1, 1] DCGAN_model.compile(loss=loss, loss_weights=loss_weights, optimizer=opt_dcgan) discriminator_model.trainable = True discriminator_model.compile(loss='binary_crossentropy', optimizer=opt_discriminator) # start training j = 0 print('start training') starttime = time.time() perm = np.random.permutation(rawImage0.shape[0]) X_procImage0 = procImage0[perm] X_rawImage0 = rawImage0[perm] X_procImageIter0 = [ X_procImage0[i:i + args.batch_size] for i in range(0, rawImage0.shape[0], args.batch_size) ] X_rawImageIter0 = [ X_rawImage0[i:i + args.batch_size] for i in range(0, rawImage0.shape[0], args.batch_size) ] X_procImage1 = procImage1[perm] X_rawImage1 = rawImage1[perm] X_procImageIter1 = [ X_procImage1[i:i + args.batch_size] for i in range(0, rawImage1.shape[0], args.batch_size) ] X_rawImageIter1 = [ X_rawImage1[i:i + args.batch_size] for i in range(0, rawImage1.shape[0], args.batch_size) ] X_procImage2 = procImage2[perm] X_rawImage2 = rawImage2[perm] X_procImageIter2 = [ X_procImage2[i:i + args.batch_size] for i in range(0, rawImage2.shape[0], args.batch_size) ] X_rawImageIter2 = [ X_rawImage2[i:i + args.batch_size] for i in range(0, rawImage2.shape[0], args.batch_size) ] X_procImage3 = procImage3[perm] X_rawImage3 = rawImage3[perm] X_procImageIter3 = [ X_procImage3[i:i + args.batch_size] for i in range(0, rawImage3.shape[0], args.batch_size) ] X_rawImageIter3 = [ X_rawImage3[i:i + args.batch_size] for i in range(0, rawImage3.shape[0], args.batch_size) ] X_procImage4 = procImage4[perm] X_rawImage4 = rawImage4[perm] X_procImageIter4 = [ X_procImage4[i:i + args.batch_size] for i in range(0, rawImage4.shape[0], args.batch_size) ] X_rawImageIter4 = [ X_rawImage4[i:i + args.batch_size] for i in range(0, rawImage4.shape[0], args.batch_size) ] X_procImage5 = procImage5[perm] X_rawImage5 = rawImage5[perm] X_procImageIter5 = [ X_procImage5[i:i + args.batch_size] for i in range(0, rawImage5.shape[0], args.batch_size) ] X_rawImageIter5 = [ X_rawImage5[i:i + args.batch_size] for i in range(0, rawImage5.shape[0], args.batch_size) ] for e in range(args.epoch): training(procImage0, rawImage0, procImage_val0, rawImage_val0, X_procImage0, X_rawImage0, X_procImageIter0, X_rawImageIter0, j, 0, args.batch_size, args.patch_size, generator_model, discriminator_model, DCGAN_model) training(procImage1, rawImage1, procImage_val1, rawImage_val1, X_procImage1, X_rawImage1, X_procImageIter1, X_rawImageIter1, j, 1, args.batch_size, args.patch_size, generator_model, discriminator_model, DCGAN_model) training(procImage2, rawImage2, procImage_val2, rawImage_val2, X_procImage2, X_rawImage2, X_procImageIter2, X_rawImageIter2, j, 2, args.batch_size, args.patch_size, generator_model, discriminator_model, DCGAN_model) training(procImage3, rawImage3, procImage_val3, rawImage_val3, X_procImage3, X_rawImage3, X_procImageIter3, X_rawImageIter3, j, 3, args.batch_size, args.patch_size, generator_model, discriminator_model, DCGAN_model) training(procImage4, rawImage4, procImage_val4, rawImage_val4, X_procImage4, X_rawImage4, X_procImageIter4, X_rawImageIter4, j, 4, args.batch_size, args.patch_size, generator_model, discriminator_model, DCGAN_model) training(procImage5, rawImage5, procImage_val5, rawImage_val5, X_procImage5, X_rawImage5, X_procImageIter5, X_rawImageIter5, j, 5, args.batch_size, args.patch_size, generator_model, discriminator_model, DCGAN_model) j += 1 print("") print('j %d, Epoch1 %s/%s, Time: %s' % (j, e + 1, args.epoch, time.time() - starttime)) if j % 100 == 0: generator_model.save_weights( 'params_generator6_pix_epoch_{0:03d}.hdf5'.format(j), True) discriminator_model.save_weights( 'params_discriminator6_pix_epoch_{0:03d}.hdf5'.format(j), True) else: continue
def my_train(args): # create output finder if not os.path.exists(os.path.expanduser(args.datasetpath0)): os.mkdir(findername) # create figures if not os.path.exists('./figures'): os.mkdir('./figures') # load data procImage, rawImage = my_load_data_train(args.datasetpath0) procImage_val, rawImage_val = my_load_data_test(args.datasetpath1) print('procImage.shape : ', procImage.shape) print('rawImage.shape : ', rawImage.shape) print('procImage_val : ', procImage_val.shape) print('rawImage_val : ', rawImage_val.shape) img_shape = rawImage.shape[-3:] print('img_shape : ', img_shape) patch_num = (img_shape[0] // args.patch_size) * (img_shape[1] // args.patch_size) disc_img_shape = (args.patch_size, args.patch_size, procImage.shape[-1]) print('disc_img_shape : ', disc_img_shape) # train opt_dcgan = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08) opt_discriminator = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08) # load generator model generator_model = models.my_load_generator(img_shape, disc_img_shape) #generator_model.load_weights('params_generator_pix_epoch_1000.hdf5') # load discriminator model discriminator_model = models.my_load_DCGAN_discriminator( img_shape, disc_img_shape, patch_num) #discriminator_model.load_weights('params_discriminator_pix_epoch_1000.hdf5') generator_model.compile(loss='mae', optimizer=opt_discriminator) discriminator_model.trainable = False DCGAN_model = models.my_load_DCGAN(generator_model, discriminator_model, img_shape, args.patch_size) loss = [l1l2_loss, 'binary_crossentropy'] loss_weights = [1E1, 1] DCGAN_model.compile(loss=loss, loss_weights=loss_weights, optimizer=opt_dcgan) discriminator_model.trainable = True discriminator_model.compile(loss='binary_crossentropy', optimizer=opt_discriminator) # start training j = 0 print('start training') for e in range(args.epoch): starttime = time.time() perm = np.random.permutation(rawImage.shape[0]) X_procImage = procImage[perm] X_rawImage = rawImage[perm] X_procImageIter = [ X_procImage[i:i + args.batch_size] for i in range(0, rawImage.shape[0], args.batch_size) ] X_rawImageIter = [ X_rawImage[i:i + args.batch_size] for i in range(0, rawImage.shape[0], args.batch_size) ] b_it = 0 progbar = generic_utils.Progbar(len(X_procImageIter) * args.batch_size) for (X_proc_batch, X_raw_batch) in zip(X_procImageIter, X_rawImageIter): b_it += 1 X_disc, y_disc = get_disc_batch(X_proc_batch, X_raw_batch, generator_model, b_it, args.patch_size) raw_disc, _ = get_disc_batch(X_raw_batch, X_raw_batch, generator_model, 1, args.patch_size) x_disc = X_disc + raw_disc # update the discriminator disc_loss = discriminator_model.train_on_batch(x_disc, y_disc) # create a batch to feed the generator model idx = np.random.choice(procImage.shape[0], args.batch_size) X_gen_target, X_gen = procImage[idx], rawImage[idx] y_gen = np.zeros((X_gen.shape[0], 2), dtype=np.uint8) y_gen[:, 1] = 1 # Freeze the discriminator discriminator_model.trainable = False gen_loss = DCGAN_model.train_on_batch(X_gen, [X_gen_target, y_gen]) # Unfreeze the discriminator discriminator_model.trainable = True progbar.add(args.batch_size, values=[("D logloss", disc_loss), ("G tot", gen_loss[0]), ("G L1", gen_loss[1]), ("G logloss", gen_loss[2])]) print("") print('j %d, Epoch %s/%s, Time: %s' % (j, e + 1, args.epoch, time.time() - starttime)) if j % 100 == 0: print("") print('j %d, Epoch %s/%s, Time: %s' % (j, e + 1, args.epoch, time.time() - starttime)) plot_generated_batch(X_proc_batch, X_raw_batch, generator_model, args.batch_size, "training" + str(j)) idx = np.random.choice(procImage_val.shape[0], args.batch_size) X_gen_target, X_gen = procImage_val[idx], rawImage_val[idx] plot_generated_batch(X_gen_target, X_gen, generator_model, args.batch_size, "validation" + str(j)) generator_model.save_weights( 'params_generator_pix_epoch_{0:03d}.hdf5'.format(j), True) discriminator_model.save_weights( 'params_discriminator_pix_epoch_{0:03d}.hdf5'.format(j), True) j += 1 else: j += 1
def my_train(args): # create output finder if not os.path.exists(os.path.expanduser(args.datasetpath1)): os.mkdir(findername) # create figures if not os.path.exists('./figures'): os.mkdir('./figures') # load test data procImage_val0, rawImage_val0 = my_load_data_test(args.datasetpath0) procImage_val1, rawImage_val1 = my_load_data_test(args.datasetpath1) procImage_val2, rawImage_val2 = my_load_data_test(args.datasetpath2) procImage_val3, rawImage_val3 = my_load_data_test(args.datasetpath3) procImage_val4, rawImage_val4 = my_load_data_test(args.datasetpath4) procImage_val5, rawImage_val5 = my_load_data_test(args.datasetpath5) print('procImage_val : ', procImage_val0.shape) print('rawImage_val : ', rawImage_val0.shape) img_shape = rawImage_val0.shape[-3:] #rawImage.shape[-3:] print('img_shape : ', img_shape) disc_img_shape = (args.patch_size, args.patch_size, procImage_val0.shape[-1]) print('disc_img_shape : ', disc_img_shape) opt_discriminator = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08) # load generator model generator_model = models.my_load_generator(img_shape, disc_img_shape) generator_model.load_weights( 'params_generator6u_pix_epoch_8000.hdf5') #160-akb kao generator_model.compile(loss='mae', optimizer=opt_discriminator) generator_model.trainable = False XX = [] imgs = [] YY = [] for j in range(0, 1): idx = [ 0 + 10 * j, 1 + 10 * j, 2 + 10 * j, 3 + 10 * j, 4 + 10 * j, 5 + 10 * j, 6 + 10 * j, 7 + 10 * j, 8 + 10 * j, 9 + 10 * j ] #np.random.choice(procImage_val.shape[0], args.batch_size) print("j=", j, "idx=", idx) X_gen_target, X_gen = procImage_val0[idx], rawImage_val0[idx] XX = plot_generated_batch(X_gen_target, X_gen, generator_model, args.batch_size, "validation" + str(0) + ".png") imgarray = img_to_array(XX) imgs.append(imgarray) X_gen_target, X_gen = procImage_val1[idx], rawImage_val1[idx] XX = plot_generated_batch(X_gen_target, X_gen, generator_model, args.batch_size, "validation" + str(0) + ".png") imgarray = img_to_array(XX) imgs.append(imgarray) X_gen_target, X_gen = procImage_val2[idx], rawImage_val2[idx] XX = plot_generated_batch(X_gen_target, X_gen, generator_model, args.batch_size, "validation" + str(0) + ".png") imgarray = img_to_array(XX) imgs.append(imgarray) X_gen_target, X_gen = procImage_val3[idx], rawImage_val3[idx] XX = plot_generated_batch(X_gen_target, X_gen, generator_model, args.batch_size, "validation" + str(0) + ".png") imgarray = img_to_array(XX) imgs.append(imgarray) X_gen_target, X_gen = procImage_val4[idx], rawImage_val4[idx] XX = plot_generated_batch(X_gen_target, X_gen, generator_model, args.batch_size, "validation" + str(0) + ".png") imgarray = img_to_array(XX) imgs.append(imgarray) X_gen_target, X_gen = procImage_val5[idx], rawImage_val5[idx] XX = plot_generated_batch(X_gen_target, X_gen, generator_model, args.batch_size, "validation" + str(0) + ".png") imgarray = img_to_array(XX) imgs.append(imgarray) #plt.imshow(imgs[j]) #plt.axis('off') #plt.savefig("./figures/current_batch_"+"validation"+str(j)+".png") YY = np.concatenate((imgs[0], imgs[1], imgs[2], imgs[3], imgs[4], imgs[5]), axis=0) plt.imshow(YY) plt.axis('off') plt.savefig("./figures/current_batch_" + "mayu2kitaRaw_test1" + ".png") plt.clf() plt.close() """
def my_train(args): # create output finder if not os.path.exists(os.path.expanduser(args.datasetpath00)): os.mkdir(findername) # create figures if not os.path.exists('./figures'): os.mkdir('./figures') # load data procImage, rawImage = my_load_data_train(args.datasetpath00) procImage_val, rawImage_val = my_load_data_test(args.datasetpath01) print('procImage.shape : ', procImage.shape) print('rawImage.shape : ', rawImage.shape) print('procImage_val : ', procImage_val.shape) print('rawImage_val : ', rawImage_val.shape) procImage2, rawImage2 = my_load_data_train(args.datasetpath10) procImage_val2, rawImage_val2 = my_load_data_test(args.datasetpath11) print('procImage.shape : ', procImage2.shape) print('rawImage.shape : ', rawImage2.shape) print('procImage_val : ', procImage_val2.shape) print('rawImage_val : ', rawImage_val2.shape) procImage1, rawImage1 = my_load_data_train(args.datasetpath20) procImage_val1, rawImage_val1 = my_load_data_test(args.datasetpath21) print('procImage.shape : ', procImage1.shape) print('rawImage.shape : ', rawImage1.shape) print('procImage_val : ', procImage_val1.shape) print('rawImage_val : ', rawImage_val1.shape) img_shape = rawImage.shape[-3:] print('img_shape : ', img_shape) patch_num = (img_shape[0] // args.patch_size) * (img_shape[1] // args.patch_size) disc_img_shape = (args.patch_size, args.patch_size, procImage.shape[-1]) print('disc_img_shape : ', disc_img_shape) # train opt_dcgan = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08) opt_discriminator = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08) # load generator model generator_model = models.my_load_generator(img_shape, disc_img_shape) #generator_model.load_weights('params_generator1_pix_epoch_2000.hdf5') # load discriminator model discriminator_model = models.my_load_DCGAN_discriminator( img_shape, disc_img_shape, patch_num) #discriminator_model.load_weights('params_discriminator1_pix_epoch_2000.hdf5') generator_model.compile(loss='mae', optimizer=opt_discriminator) discriminator_model.trainable = False DCGAN_model = models.my_load_DCGAN(generator_model, discriminator_model, img_shape, args.patch_size) loss = [l1_loss, 'binary_crossentropy'] loss_weights = [1E1, 1] DCGAN_model.compile(loss=loss, loss_weights=loss_weights, optimizer=opt_dcgan) discriminator_model.trainable = True discriminator_model.compile(loss='binary_crossentropy', optimizer=opt_discriminator) # start training j = 0 print('start training') starttime = time.time() perm = np.random.permutation(rawImage.shape[0]) X_procImage = procImage[perm] X_rawImage = rawImage[perm] X_procImageIter = [ X_procImage[i:i + args.batch_size] for i in range(0, rawImage.shape[0], args.batch_size) ] X_rawImageIter = [ X_rawImage[i:i + args.batch_size] for i in range(0, rawImage.shape[0], args.batch_size) ] X_procImage2 = procImage2[perm] X_rawImage2 = rawImage2[perm] X_procImageIter2 = [ X_procImage2[i:i + args.batch_size] for i in range(0, rawImage2.shape[0], args.batch_size) ] X_rawImageIter2 = [ X_rawImage2[i:i + args.batch_size] for i in range(0, rawImage2.shape[0], args.batch_size) ] X_procImage1 = procImage1[perm] X_rawImage1 = rawImage1[perm] X_procImageIter1 = [ X_procImage1[i:i + args.batch_size] for i in range(0, rawImage1.shape[0], args.batch_size) ] X_rawImageIter1 = [ X_rawImage1[i:i + args.batch_size] for i in range(0, rawImage1.shape[0], args.batch_size) ] for e in range(args.epoch): b_it = 0 progbar = generic_utils.Progbar(len(X_procImageIter) * args.batch_size) for (X_proc_batch, X_raw_batch) in zip(X_procImageIter, X_rawImageIter): b_it += 1 X_disc, y_disc = get_disc_batch(X_proc_batch, X_raw_batch, generator_model, b_it, args.patch_size) raw_disc, _ = get_disc_batch(X_raw_batch, X_raw_batch, generator_model, 1, args.patch_size) x_disc = X_disc + raw_disc # update the discriminator disc_loss = discriminator_model.train_on_batch(x_disc, y_disc) # create a batch to feed the generator model idx = np.random.choice(procImage.shape[0], args.batch_size) X_gen_target, X_gen = procImage[idx], rawImage[idx] y_gen = np.zeros((X_gen.shape[0], 2), dtype=np.uint8) y_gen[:, 1] = 1 # Freeze the discriminator discriminator_model.trainable = False gen_loss = DCGAN_model.train_on_batch(X_gen, [X_gen_target, y_gen]) # Unfreeze the discriminator discriminator_model.trainable = True progbar.add(args.batch_size, values=[("D logloss", disc_loss), ("G tot", gen_loss[0]), ("G L1", gen_loss[1]), ("G logloss", gen_loss[2])]) b_it2 = 0 progbar2 = generic_utils.Progbar( len(X_procImageIter2) * args.batch_size) for (X_proc_batch2, X_raw_batch2) in zip(X_procImageIter2, X_rawImageIter2): b_it += 1 X_disc2, y_disc2 = get_disc_batch(X_proc_batch2, X_raw_batch2, generator_model, b_it2, args.patch_size) raw_disc2, _ = get_disc_batch(X_raw_batch2, X_raw_batch2, generator_model, 1, args.patch_size) x_disc2 = X_disc2 + raw_disc2 # update the discriminator disc_loss2 = discriminator_model.train_on_batch(x_disc2, y_disc2) # create a batch to feed the generator model idx = np.random.choice(procImage2.shape[0], args.batch_size) X_gen_target2, X_gen2 = procImage2[idx], rawImage2[idx] y_gen2 = np.zeros((X_gen2.shape[0], 2), dtype=np.uint8) y_gen2[:, 1] = 1 # Freeze the discriminator discriminator_model.trainable = False gen_loss2 = DCGAN_model.train_on_batch(X_gen2, [X_gen_target2, y_gen2]) # Unfreeze the discriminator discriminator_model.trainable = True progbar2.add(args.batch_size, values=[("D logloss", disc_loss2), ("G tot", gen_loss2[0]), ("G L1", gen_loss2[1]), ("G logloss", gen_loss2[2])]) b_it1 = 0 progbar1 = generic_utils.Progbar( len(X_procImageIter1) * args.batch_size) for (X_proc_batch1, X_raw_batch1) in zip(X_procImageIter1, X_rawImageIter1): b_it1 += 1 X_disc1, y_disc1 = get_disc_batch(X_proc_batch1, X_raw_batch1, generator_model, b_it1, args.patch_size) raw_disc1, _ = get_disc_batch(X_raw_batch1, X_raw_batch1, generator_model, 1, args.patch_size) x_disc1 = X_disc1 + raw_disc1 # update the discriminator disc_loss1 = discriminator_model.train_on_batch(x_disc1, y_disc1) # create a batch to feed the generator model idx = np.random.choice(procImage1.shape[0], args.batch_size) X_gen_target1, X_gen1 = procImage1[idx], rawImage1[idx] y_gen1 = np.zeros((X_gen1.shape[0], 2), dtype=np.uint8) y_gen1[:, 1] = 1 # Freeze the discriminator discriminator_model.trainable = False gen_loss1 = DCGAN_model.train_on_batch(X_gen1, [X_gen_target1, y_gen1]) # Unfreeze the discriminator discriminator_model.trainable = True progbar1.add(args.batch_size, values=[("D logloss", disc_loss1), ("G tot", gen_loss1[0]), ("G L1", gen_loss1[1]), ("G logloss", gen_loss1[2])]) # save images for visualization file名に通し番号を記載して残す #if b_it % (procImage.shape[0]//args.batch_size//2) == 0: if j % 100 == 0: plot_generated_batch(X_proc_batch, X_raw_batch, generator_model, args.batch_size, "training" + str(j)) idx = np.random.choice(procImage_val.shape[0], args.batch_size) X_gen_target, X_gen = procImage_val[idx], rawImage_val[idx] plot_generated_batch(X_gen_target, X_gen, generator_model, args.batch_size, "validation" + str(j)) plot_generated_batch(X_proc_batch2, X_raw_batch2, generator_model, args.batch_size, "training2_" + str(j)) idx = np.random.choice(procImage_val2.shape[0], args.batch_size) X_gen_target2, X_gen2 = procImage_val2[idx], rawImage_val2[idx] plot_generated_batch(X_gen_target2, X_gen2, generator_model, args.batch_size, "validation2_" + str(j)) plot_generated_batch(X_proc_batch1, X_raw_batch1, generator_model, args.batch_size, "training1_" + str(j)) idx = np.random.choice(procImage_val1.shape[0], args.batch_size) X_gen_target1, X_gen1 = procImage_val1[idx], rawImage_val1[idx] plot_generated_batch(X_gen_target1, X_gen1, generator_model, args.batch_size, "validation1_" + str(j)) else: continue #else: #continue j += 1 print("") print('j %d, Epoch1 %s/%s, Time: %s' % (j, e + 1, args.epoch, time.time() - starttime)) if j % 100 == 0: generator_model.save_weights( 'params_generator1_pix_epoch_{0:03d}.hdf5'.format(j), True) discriminator_model.save_weights( 'params_discriminator1_pix_epoch_{0:03d}.hdf5'.format(j), True) else: continue