def train(model_name, gpu_id): params = param.get_general_params() network_dir = params['model_save_dir'] + '/' + model_name if not os.path.isdir(network_dir): os.mkdir(network_dir) train_feed = data_generation.create_feed(params, params['data_dir'], 'train') os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) config = tf.ConfigProto() config.gpu_options.allow_growth = True set_session(tf.Session(config=config)) vgg_model = truncated_vgg.vgg_norm() networks.make_trainable(vgg_model, False) response_weights = sio.loadmat( 'posewarp-cvpr2018/data/vgg_activation_distribution_train.mat') model = networks.network_posewarp(params) model.compile(optimizer=Adam(lr=1e-4), loss=[networks.vgg_loss(vgg_model, response_weights, 12)]) model.load_weights(network_dir + '/' + 'weights_gan_model.h5') model.summary() n_iters = params['n_training_iter'] for step in range(0, n_iters): x, y = next(train_feed) train_loss = model.train_on_batch(x, y) util.printProgress('Iteration: ', step, ' Train_loss: ', train_loss) if step > 0 and step % params['model_save_interval'] == 0: model.save_weights( (network_dir + '/' + 'weights_model_gan_improved' + '.h5'))
def evaluate(model_name, gpu_id): params = param.get_general_params() os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) config = tf.ConfigProto() config.gpu_options.allow_growth = True set_session(tf.Session(config=config)) vgg_model = truncated_vgg.vgg_norm() networks.make_trainable(vgg_model, False) response_weights = sio.loadmat(params['data_dir'] + '/vgg_activation_distribution_train.mat') model = networks.network_posewarp(params) model.compile(optimizer=Adam(), loss=[networks.vgg_loss(vgg_model, response_weights, 12)]) iterations = range(1000, 185001, 1000) n_batches = 25 losses = [] for i in iterations: print(i) model.load_weights('../models/' + model_name + '/' + str(i) + '.h5') np.random.seed(11) feed = data_generation.create_feed(params, params['data_dir'], 'train') loss = 0 for batch in range(n_batches): x, y = next(feed) loss += model.evaluate(x, y) loss /= (n_batches * 1.0) losses.append(loss) sio.savemat('losses_by_iter.mat', { 'losses': losses, 'iterations': iterations })
def train(dataset,gpu_id): params = param.getGeneralParams() gpu = '/gpu:' + str(gpu_id) np.random.seed(17) feed = datageneration.createFeed(params,'test_vids.txt',5000,False,False,False,True) config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True set_session(tf.Session(config=config)) with tf.device(gpu): vgg_model = myVGG.vgg_norm() networks.make_trainable(vgg_model,False) response_weights = sio.loadmat('mean_response.mat') fgbg = networks.network_fgbg(params,vgg_model,response_weights) fgbg.load_weights('../results/networks/fgbg_vgg/184000.h5') #disc = networks.discriminator(params) #gan = networks.gan(fgbg,disc,params,vgg_model,response_weights,0.01,1e-4) #gan.load_weights('../results/networks/fgbg_gan/7000.h5') n_batches = 200 for j in xrange(n_batches): print j X,Y = next(feed) pred = fgbg.predict(X) sio.savemat('results/transfer_vgg/' + str(j) + '.mat',{'X': X[0],'Y': Y, 'pred': pred})
def train(model_name, gpu_id): params = param.getGeneralParams() gpu = '/gpu:' + str(gpu_id) network_dir = params['project_dir'] + '/results/networks/' + model_name if not os.path.isdir(network_dir): os.mkdir(network_dir) train_feed = datageneration.createFeed(params, "train_vids.txt") #test_feed=datageneration.createFeed(params,"test_vids.txt") config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True set_session(tf.Session(config=config)) with tf.device(gpu): vgg_model = myVGG.vgg_norm() networks.make_trainable(vgg_model, False) response_weights = sio.loadmat('mean_response.mat') model = networks.network_fgbg(params) #model.load_weights('../results/networks/fgbg_vgg/60000.h5') model.compile(optimizer=Adam(lr=1e-4), loss=[networks.vggLoss(vgg_model, response_weights)]) #model.summary() for step in xrange(0, 250000): start = time.time() #X,Y = next(test_feed) #sio.savemat('data/data' + str(step) + '.mat',{'X':X[0],'Y':Y, 'ps': X[1], 'pt': X[2], 'mask': X[3]}) #return X, Y = next(train_feed) with tf.device(gpu): train_loss = model.train_on_batch(X, Y) end = time.time() util.printProgress(step, 0, train_loss, end - start) ''' if(step % params['test_interval'] == 0): n_batches = 8 test_loss = 0 for j in xrange(n_batches): X,Y = next(test_feed) test_loss += np.array(model.test_on_batch(X,Y)) test_loss /= (n_batches) util.printProgress(step,1,test_loss,0) ''' if (step > 0 and step % params['model_save_interval'] == 0): model.save(network_dir + '/' + str(step) + '.h5')
def test(model_name, gpu_id): params = param.get_general_params() TEST_PATH = params['data_dir'] + "/exam/test_golf/" SRC_IMG = TEST_PATH + "ref_img/" TGT_POS = TEST_PATH + "ref_pose/" with tf.Session() as sess: network_dir = params['model_save_dir'] + '/' + model_name # Creates models directory if not exist. if not os.path.isdir(network_dir): print("No model named ´" + model_name + "´ found!") raise img_feed = data_generation.create_feed(params, SRC_IMG, mode="test", do_augment=False) pos_feed = data_generation.create_feed(params, TGT_POS, mode="test", do_augment=False) os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) config = tf.ConfigProto() config.gpu_options.allow_growth = True set_session(tf.Session(config=config)) vgg_model = truncated_vgg.vgg_norm() networks.make_trainable(vgg_model, False) response_weights = sio.loadmat('../data/vgg_activation_distribution_train.mat') ckp_name = [f for f in listdir(network_dir) if isfile(join(network_dir, f))][-1] model = networks.network_posewarp(params) model.compile(optimizer=Adam(lr=1e-4), loss=[networks.vgg_loss(vgg_model, response_weights, 12)]) model.load_weights(network_dir + "/" + ckp_name) n_iters = 1 summary_writer = tf.summary.FileWriter("D:\Proyectos\JEJU2018\Code\posewarp-cvpr2018\code\logs", graph=sess.graph) for step in range(0, n_iters): x_img = next(img_feed) x_pos = next(pos_feed) # out = sess.run(conv, feed_dict={"input_1:0" : x[0]}) # plt.matshow(out[0, :, :, 0]) # plt.show() gen = tf.get_default_graph().get_tensor_by_name("loss/add_2_loss/lambda_5/add:0") inp = tf.get_default_graph().get_tensor_by_name("in_img0:0") # out = tf.get_default_graph().get_tensor_by_name("in_img1:0") image_summary_op = tf.summary.image('images', [inp[0, :, :, :], gen[0, :, :, :]], max_outputs=100) image_summary = sess.run(image_summary_op, feed_dict={"in_img0:0" : x_img[0], "in_pose0:0" : x_img[1], "in_pose1:0" : x_pos[2], "mask_prior:0" : x_img[3], "trans_in:0" : x_img[4]}) summary_writer.add_summary(image_summary) print(image_summary_op)
def train(dataset, gpu_id): params = param.getGeneralParams() gpu = '/gpu:' + str(gpu_id) config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True set_session(tf.Session(config=config)) with tf.device(gpu): vgg_model = myVGG.vgg_norm() networks.make_trainable(vgg_model, False) response_weights = sio.loadmat('mean_response.mat') fgbg = networks.network_fgbg(params, vgg_model, response_weights) #fgbg.load_weights('../results/networks/fgbg_vgg/140000.h5') disc = networks.discriminator(params) gan = networks.gan(fgbg, disc, params, vgg_model, response_weights, 0.01, 1e-4) gan.load_weights('../results/networks/fgbg_gan/2000.h5') outputs = [fgbg.outputs[0]] #outputs.append(fgbg.get_layer('mask_src').output) #outputs.append(fgbg.get_layer('fg_stack').output) #outputs.append(fgbg.get_layer('bg_src').output) #outputs.append(fgbg.get_layer('bg_tgt').output) #outputs.append(fgbg.get_layer('fg_tgt').output) outputs.append(fgbg.get_layer('fg_mask_tgt').output) model = Model(fgbg.inputs, outputs) test = datareader.makeActionExampleList('test_vids.txt', 1) feed = datageneration.warpExampleGenerator(test, params, do_augment=False, return_pose_vectors=True) n_frames = len(test) true_action = np.zeros((256, 256, 3, n_frames)) pred_action = np.zeros((256, 256, 3, n_frames)) mask = np.zeros((256, 256, 1, n_frames)) for i in xrange(n_frames): print i X, Y = next(feed) pred = model.predict(X[:-2]) true_action[:, :, :, i] = convert(np.reshape(Y, (256, 256, 3))) pred_action[:, :, :, i] = convert(np.reshape(pred[0], (256, 256, 3))) mask[:, :, :, i] = pred[1] sio.savemat('results/action/1_gan.mat', { 'true': true_action, 'pred': pred_action, 'mask': mask })
def test(model_name, save_dir, gpu_id, vid_i, iter_num=9999, dbg=False): params = param.get_general_params() img_width = params['IMG_WIDTH'] img_height = params['IMG_HEIGHT'] test_feed, dir_len = data_generation.create_test_feed(params, 5, vid_i=vid_i, txtfile=f'../testset_5_v3/test_{vid_i}_img.txt', k_txtfile=f'../testset_5_v3/train_{vid_i}_img.txt') os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) config = tf.ConfigProto() config.gpu_options.allow_growth = True set_session(tf.Session(config=config)) vgg_model = truncated_vgg.vgg_norm() networks.make_trainable(vgg_model, False) response_weights = sio.loadmat('../data/vgg_activation_distribution_train.mat') model = networks.network_posewarp(params) weight_path = str(os.path.join(params['model_save_dir'], os.path.join(f"{model_name}", f'{iter_num}.h5'))) # model name doesn't super work model.load_weights(weight_path) model.compile(optimizer=Adam(lr=1e-4), loss=[networks.vgg_loss(vgg_model, response_weights, 12)]) model.summary() n_iters = params['n_training_iter'] gen = np.zeros((dir_len, 3, 256, 256)) scores = np.zeros((dir_len, 3)) for j in range(1 if dbg else dir_len): try: x, y, scale, pos, img_num, src = next(test_feed) arr_loss = model.predict_on_batch(x) except cv2.error as e: print("OpenCV Error, gonna ignore") continue i = 0 generated = (arr_loss[i] + 1) * 128 gen_resized = data_generation.reverse_center_and_scale_image(generated, img_width, img_height, pos, scale) target = (y[i] + 1) * 128 target_resized = data_generation.reverse_center_and_scale_image(target, img_width, img_height, pos, scale) source = (x[0][i] + 1) * 128 # resized_source = cv2.resize(source, (0, 0), fx=2, fy=2) # source_resized = data_generation.reverse_center_and_scale_image(source, img_width, img_height, pos, scale) modified_img = data_generation.add_source_to_image(gen_resized, src) cv2.imwrite(save_dir + f'/{img_num:08d}.png', modified_img) gen[j] = np.transpose(generated, (2, 0, 1)) scores[j][0] = compare_ssim(generated, target, multichannel=True, data_range=256) scores[j][1] = compare_psnr(generated, target, data_range=256) scores[j][2] = compare_mse(generated, target) mean_scores = scores.mean(axis=0) std_scores = scores.std(axis=0) print(mean_scores) print(std_scores) save_dict = os.path.join(save_dir, f"saved_scores_{vid_i}.pkl") pickle.dump( scores, open( save_dict, "wb" ) )
def train(dataset, gpu_id): params = param.getGeneralParams() gpu = '/gpu:' + str(gpu_id) np.random.seed(17) feed = datageneration.createFeed(params, 'test_vids.txt', False, True) config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True set_session(tf.Session(config=config)) with tf.device(gpu): vgg_model = myVGG.vgg_norm() networks.make_trainable(vgg_model, False) response_weights = sio.loadmat('mean_response.mat') fgbg = networks.network_fgbg(params) fgbg.load_weights('../results/networks/fgbg_vgg/45000.h5') #disc = networks.discriminator(params) #gan = networks.gan(fgbg,disc,params,vgg_model,response_weights,0.01,1e-4) #gan.load_weights('../results/networks/fgbg_gan/7000.h5') outputs = [fgbg.outputs[0]] outputs.append(fgbg.get_layer('mask_src').output) outputs.append(fgbg.get_layer('fg_stack').output) outputs.append(fgbg.get_layer('bg_src').output) outputs.append(fgbg.get_layer('bg_tgt').output) outputs.append(fgbg.get_layer('fg_tgt').output) outputs.append(fgbg.get_layer('fg_mask_tgt').output) model = Model(fgbg.inputs, outputs) n_batches = 10 for j in xrange(n_batches): print j X, Y = next(feed) pred = model.predict(X[:-2]) sio.savemat( 'results/fgbg_vgg/' + str(j) + '.mat', { 'X': X[0], 'Y': Y, 'pred': pred[0], 'mask_src': pred[1], 'fg_stack': pred[2], 'bg_src': pred[3], 'bg_tgt': pred[4], 'fg_tgt': pred[5], 'fg_mask_tgt': pred[6], 'prior': X[3], 'pose_src': X[-2], 'pose_tgt': X[-1] })
def test(model_name, gpu_id): with tf.Session() as sess: params = param.get_general_params() network_dir = params['model_save_dir'] + '/' + model_name # Creates models directory if not exist. if not os.path.isdir(network_dir): print(network_dir) os.mkdir(network_dir) test_feed = data_generation.create_feed(params, params['data_dir'], 'test') os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) config = tf.ConfigProto() config.gpu_options.allow_growth = True set_session(tf.Session(config=config)) vgg_model = truncated_vgg.vgg_norm() networks.make_trainable(vgg_model, False) response_weights = sio.loadmat('../data/vgg_activation_distribution_train.mat') ckp_name = [f for f in listdir(network_dir) if isfile(join(network_dir, f))][-1] model = networks.network_posewarp(params) model.compile(optimizer=Adam(lr=1e-4), loss=[networks.vgg_loss(vgg_model, response_weights, 12)]) model.load_weights(network_dir + "/" + ckp_name) n_iters = 100 summary_writer = tf.summary.FileWriter("D:\Proyectos\JEJU2018\Code\posewarp-cvpr2018\code\logs", graph=sess.graph) for step in range(0, n_iters): x, y = next(test_feed) test_loss = model.test_on_batch(x, y) util.printProgress(step, 0, test_loss) # out = sess.run(conv, feed_dict={"input_1:0" : x[0]}) # plt.matshow(out[0, :, :, 0]) # plt.show() gen = tf.get_default_graph().get_tensor_by_name("loss/add_2_loss/lambda_5/add:0") inp = tf.get_default_graph().get_tensor_by_name("in_img0:0") out = tf.get_default_graph().get_tensor_by_name("in_img1:0") image_summary_op = tf.summary.image('images', [inp[0, :, :, :], out[0, :, :, :], gen[0, :, :, :]], max_outputs=100) image_summary = sess.run(image_summary_op, feed_dict={"in_img0:0" : x[0], "in_pose0:0" : x[1], "in_pose1:0" : x[2], "mask_prior:0" : x[3], "trans_in:0" : x[4], "in_img1:0" : y}) summary_writer.add_summary(image_summary)
def train(model_name, gpu_id): params = param.get_general_params() network_dir = params['model_save_dir'] + '/' + model_name if not os.path.isdir(network_dir): os.mkdir(network_dir) train_feed = data_generation.create_feed(params, params['data_dir'], 'train') os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) config = tf.ConfigProto() config.gpu_options.allow_growth = True set_session(tf.Session(config=config)) vgg_model = truncated_vgg.vgg_norm() networks.make_trainable(vgg_model, False) response_weights = sio.loadmat(params['data_dir'] + '/vgg_activation_distribution_train.mat') model = networks.network_posewarp(params) if not params['load_weights'] == None: model.load_weights(params['load_weights']) alpha = 0.4 #model.compile(optimizer=Adam(lr=1e-4), loss=[networks.vgg_loss(vgg_model, response_weights, 12)]) model.compile( optimizer=Adam(lr=1e-4), loss=lambda y_true, y_pred: (1 - alpha) * networks.vgg_loss(vgg_model, response_weights, 12) (y_true, y_pred) + alpha * tf.reduce_mean(tf.square(y_pred - y_true))) #model.summary() n_iters = params['n_training_iter'] loss_note = [] if params['load_weights'] == None: start = 0 else: start = int(params['load_weights'].split("/")[-1][:-3]) for step in range(start, n_iters): x, y = next(train_feed) train_loss = model.train_on_batch(x, y) loss_note.append([str(step), str(train_loss)]) util.printProgress(step, 0, train_loss) if step > 0 and step % params['model_save_interval'] == 0: model.save(network_dir + '/' + str(step) + '.h5') pd.DataFrame(loss_note).to_csv(network_dir + f"/{step}.csv", header=None, index=None) loss_note = []
def train(gpu_id): params = param.getGeneralParams() gpu = '/gpu:' + str(gpu_id) test_params = param.getDatasetParams('test-aux') _, test = datareader.makeWarpExampleList(test_params, 0, 200, 2, 0) feed = datageneration.warpExampleGenerator(test, params, do_augment=False, draw_skeleton=False, skel_color=(0, 0, 255)) config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True with tf.Session(config=config) as sess: sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) with tf.device(gpu): vgg_model = myVGG.vgg_norm() networks.make_trainable(vgg_model, False) response_weights = sio.loadmat('mean_response.mat') fgbg = networks.network_fgbg(params, vgg_model, response_weights, True) np.random.seed(17) n_batches = 50 for i in xrange(146000, 1452000, 2000): fgbg.load_weights('../results/networks/fgbg_boundary/' + str(i) + '.h5') loss = 0 for j in xrange(n_batches): X, Y = next(feed) loss += fgbg.test_on_batch(X, Y) loss /= n_batches print loss sys.stdout.flush()
def train(model_name, gpu_id, start_iter=0): params = param.get_general_params() network_dir = params['model_save_dir'] + '/' + model_name if not os.path.isdir(network_dir): os.mkdir(network_dir) tf_writer = tf.summary.FileWriter(network_dir + "/log/") train_feed = data_generation.create_feed(params, None, ModelMode.train) os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) config = tf.ConfigProto() config.gpu_options.allow_growth = True set_session(tf.Session(config=config)) vgg_model = truncated_vgg.vgg_norm() networks.make_trainable(vgg_model, False) response_weights = sio.loadmat( '../data/vgg_activation_distribution_train.mat') model = networks.network_posewarp(params) model.load_weights('../models/vgg_100000.h5') model.compile(optimizer=Adam(lr=1e-4), loss=[networks.vgg_loss(vgg_model, response_weights, 12)]) #model.summary() n_iters = params['n_training_iter'] for step in range(start_iter, n_iters + 1): x, y = next(train_feed) train_loss = model.train_on_batch(x, y) util.printProgress(step, 0, train_loss) summary = tf.Summary(value=[ tf.Summary.Value(tag="train_loss", simple_value=train_loss) ]) tf_writer.add_summary(summary, step) if step > 0 and step % 100 == 0: tf_writer.flush() if step > 0 and step % params['model_save_interval'] == 0: model.save(network_dir + '/' + str(step) + '.h5') model.save(network_dir + '/' + str(step) + '.h5')
def predict(model_name, gpu_id, save_file_name): params = param.get_general_params() network_dir = params['model_save_dir'] + '/' + model_name save_dir = params['model_save_dir'] + '/' + model_name + '/result' params['batch_size'] = 1 if not os.path.exists(save_dir): os.mkdir(save_dir) os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) config = tf.ConfigProto() config.gpu_options.allow_growth = True set_session(tf.Session(config=config)) vgg_model = truncated_vgg.vgg_norm() networks.make_trainable(vgg_model, False) response_weights = sio.loadmat(params['data_dir'] + '/vgg_activation_distribution_train.mat') model = networks.network_posewarp(params) # model.compile(optimizer=Adam(), loss=[networks.vgg_loss(vgg_model, response_weights, 12)]) model.load_weights(network_dir + save_file_name) # TODO not sure the final ckpt name np.random.seed(112) feed = data_generation.create_feed(params, params['data_dir'], 'train', do_augment=False) cnt = 8 while True: try: x, y = next(feed) inp = recover2img(x[0]) cv2.imwrite(os.path.join(save_dir, str(cnt) + "inp.jpg"), inp[0]) # cv2.imwrite(os.path.join(save_dir, str(cnt) + "map.jpg",x[2][0][:,:,0])) out = model.predict(x) out = recover2img(out[0]) cv2.imwrite(os.path.join(save_dir, str(cnt) + ".jpg"), out) gt = recover2img(y[0]) cv2.imwrite(os.path.join(save_dir, str(cnt) + "gt.jpg"), gt) cnt += 1 break except: break
def train(model_name, gpu_id): params = param.get_general_params() network_dir = params['model_save_dir'] + '/' + model_name if not os.path.isdir(network_dir): os.mkdir(network_dir) train_feed = data_generation.create_feed(params, params['data_dir'], 'train') os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) config = tf.ConfigProto() config.gpu_options.allow_growth = True set_session(tf.Session(config=config)) vgg_model = truncated_vgg.vgg_norm() networks.make_trainable(vgg_model, False) response_weights = sio.loadmat( '../data/vgg_activation_distribution_train.mat') model = networks.network_posewarp(params) model.compile(optimizer=Adam(lr=1e-4), loss=[networks.vgg_loss(vgg_model, response_weights, 12)]) #model.summary() n_iters = params['n_training_iter'] log_dir = '../log/{:s}'.format(model_name) callback = TensorBoard(log_dir, write_graph=True) callback.set_model(model) train_names = ['train_loss'] for step in range(0, n_iters): x, y = next(train_feed) train_loss = model.train_on_batch(x, y) util.printProgress(step, 0, train_loss) write_log(callback, train_names, [train_loss], step) if step > 0 and step % params['model_save_interval'] == 0: model.save(network_dir + '/' + str(step) + '.h5')
def reptile_outer_loop(model_name, gpu_id, dbg=False, k=5, T=20): network_dir = f'/home/jl5/data/data-posewarp/models/{model_name}' os.makedirs(network_dir) params = param.get_general_params() img_width = params['IMG_WIDTH'] img_height = params['IMG_HEIGHT'] # load the original pretrained weights os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) config = tf.ConfigProto() config.gpu_options.allow_growth = True set_session(tf.Session(config=config)) vgg_model = truncated_vgg.vgg_norm() networks.make_trainable(vgg_model, False) response_weights = sio.loadmat( '../data/vgg_activation_distribution_train.mat') model = networks.network_posewarp(params) weight_path = '../models/vgg_100000.h5' model.load_weights(weight_path) model.compile(optimizer=Adam(lr=1e-4), loss=[networks.vgg_loss(vgg_model, response_weights, 12)]) for i in range(params['meta_iterations'] + 1): print(i) # select k images data = data_generation.create_feed(params, k, ModelMode.metatrain) old_weights = deepcopy(extract_weights(model)) # train on batch for T iterations starting from init/old weights model = train_T_iter(model, T, data) new_weights = extract_weights(model) updated_weights = compute_reptile(new_weights, old_weights, params['epsilon']) model = set_weights(model, updated_weights) # test every like 300 iterations? if i % params['metamodel_save_interval'] == 0: model.save(network_dir + '/' + str(i) + '.h5') return model
def train(model_name, gpu_id): params = param.get_general_params() network_dir = params['model_save_dir'] + '/' + model_name if not os.path.isdir(network_dir): os.mkdir(network_dir) train_feed = data_generation.create_feed(params, params['data_dir'], 'train') os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True gan_lr = 1e-4 disc_lr = 1e-4 disc_loss = 0.1 generator = networks.network_posewarp(params) generator.load_weights('../models/vgg_100000.h5') discriminator = networks.discriminator(params) discriminator.compile(loss='binary_crossentropy', optimizer=Adam(lr=disc_lr)) vgg_model = truncated_vgg.vgg_norm() networks.make_trainable(vgg_model, False) response_weights = sio.loadmat('../data/vgg_activation_distribution_train.mat') gan = networks.gan(generator, discriminator, params) gan.compile(optimizer=Adam(lr=gan_lr), loss=[networks.vgg_loss(vgg_model, response_weights, 12), 'binary_crossentropy'], loss_weights=[1.0, disc_loss]) n_iters = 10000 batch_size = params['batch_size'] for step in range(n_iters): x, y = next(train_feed) gen = generator.predict(x) # Train discriminator x_tgt_img_disc = np.concatenate((y, gen)) x_src_pose_disc = np.concatenate((x[1], x[1])) x_tgt_pose_disc = np.concatenate((x[2], x[2])) L = np.zeros([2 * batch_size]) L[0:batch_size] = 1 inputs = [x_tgt_img_disc, x_src_pose_disc, x_tgt_pose_disc] d_loss = discriminator.train_on_batch(inputs, L) # Train the discriminator a couple of iterations before starting the gan if step < 5: util.printProgress(step, 0, [0, d_loss]) step += 1 continue # TRAIN GAN L = np.ones([batch_size]) x, y = next(train_feed) g_loss = gan.train_on_batch(x, [y, L]) util.printProgress(step, 0, [g_loss[1], d_loss]) if step % params['model_save_interval'] == 0 and step > 0: gan.save(network_dir + '/' + str(step) + '.h5')
def train(dataset,gpu_id): params = param.getGeneralParams() gpu = '/gpu:' + str(gpu_id) np.random.seed(17) feed = datageneration.createFeed(params,'test_vids.txt',5000,False,True,True) config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True set_session(tf.Session(config=config)) with tf.device(gpu): vgg_model = myVGG.vgg_norm() networks.make_trainable(vgg_model,False) response_weights = sio.loadmat('mean_response.mat') #fgbg_vgg = networks.network_fgbg(params,vgg_model,response_weights) #fgbg_vgg.load_weights('../results/networks/fgbg_vgg/184000.h5') #gen = networks.network_fgbg(params,vgg_model,response_weights) #disc = networks.discriminator(params) #gan = networks.gan(gen,disc,params,vgg_model,response_weights,0.1,1e-4) #gan.load_weights('../results/networks/fgbg_gan/7000.h5') #fgbg_l1 = networks.network_fgbg(params,vgg_model,response_weights,loss='l1') #fgbg_l1.load_weights('../results/networks/fgbg_l1/100000.h5') #mask_model = Model(fgbg_vgg.inputs,fgbg_vgg.get_layer('fg_mask_tgt').output) ed_vgg = networks.network_pix2pix(params,vgg_model,response_weights) ed_vgg.load_weights('../results/networks/ed_vgg/135000.h5') gen = networks.network_pix2pix(params,vgg_model,response_weights) disc = networks.discriminator(params) gan = networks.gan(gen,disc,params,vgg_model,response_weights,0.1,1e-4) gan.load_weights('../results/networks/ed_gan/2000.h5') ed_l1 = networks.network_pix2pix(params,vgg_model,response_weights,loss='l1') ed_l1.load_weights('../results/networks/ed_l1/80000.h5') n_examples = 500 metrics = np.zeros((n_examples,9)) poses = np.zeros((n_examples,28*2)) classes = np.zeros(n_examples) for j in xrange(n_examples): print j X,Y = next(feed) pred_l1 = ed_l1.predict(X[:3]) #X[:-3]) pred_vgg = ed_vgg.predict(X[:3]) #X[:-3]) pred_gan = gen.predict(X[:3]) #[:-3]) ''' #mask = mask_model.predict(X[:-3]) pred_l1_fg = pred_l1 * mask pred_vgg_fg = pred_vgg * mask pred_gan_fg = pred_gan * mask pred_l1_bg = pred_l1 * (1-mask) pred_vgg_bg = pred_vgg * (1-mask) pred_gan_bg = pred_gan * (1-mask) Y_fg = Y * mask Y_bg = Y * (1-mask) ''' #,pred_l1_fg,pred_vgg_fg,pred_gan_fg,pred_l1_bg,pred_vgg_bg,pred_gan_bg] #,Y_fg,Y_fg,Y_fg,Y_bg,Y_bg,Y_bg] preds = [pred_l1,pred_vgg,pred_gan] targets = [Y,Y,Y] metrics[j,0:3] = [l1Error(preds[i],targets[i]) for i in xrange(len(preds))] metrics[j,3:6] = [vggError(vgg_model.predict(util.vgg_preprocess(preds[i])), vgg_model.predict(util.vgg_preprocess(targets[i])),response_weights) for i in xrange(len(preds))] metrics[j,6:] = [ssimError(preds[i],targets[i]) for i in xrange(len(preds))] poses[j,0:28] = X[-3] poses[j,28:] = X[-2] classes[j] = int(X[-1]) sio.savemat('results/comparison/pix2pix/' + str(j) + '.mat', {'X': X[0], 'Y': Y, 'pred_l1': pred_l1, 'pred_vgg': pred_vgg, 'pred_gan': pred_gan}) sio.savemat('results/comparison_pix2pix.mat',{'metrics': metrics, 'poses': poses, 'classes': classes})
def train(model_name, gpu_id): os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) config = tf.ConfigProto() config.gpu_options.allow_growth = True set_session(tf.Session(config=config)) with tf.Session() as sess: params = param.get_general_params() network_dir = params['model_save_dir'] + '/' + model_name # Creates models directory if not exist. if not os.path.isdir(network_dir): os.mkdir(network_dir) train_feed = data_generation.create_feed(params, params['data_dir'], 'train') # test_feed = data_generation.create_feed(params, params['data_dir'], 'test') tf_config = tf.ConfigProto() tf_config.gpu_options.allow_growth = True tf_config.allow_soft_placement = True with tf.Session(config=tf_config) as sess: # Load VGG truncated model. vgg_model = truncated_vgg.vgg_norm() networks.make_trainable(vgg_model, False) # Load VGG pretrained weights. response_weights = sio.loadmat( '../Models/vgg_activation_distribution_train.mat') # Create graph and compile keras model. model = networks.network_posewarp(params) tloss = networks.vgg_loss(vgg_model, response_weights, 12) model.compile(optimizer=Adam(lr=1e-4), loss=[tloss]) # Get number of trainig steps. n_iters = params['n_training_iter'] # Create a tensorboard writer. summary_writer = tf.summary.FileWriter("./logs/run2/", graph=sess.graph) tr_x, tr_y = next(train_feed) # te_x, te_y = next(test_feed) # Prepare output directories if they don't exist. output_dir = '../Output/' + model_name + '/' if not os.path.isdir(output_dir): os.mkdir(output_dir) scipy.misc.imsave('../Output/' + model_name + '/tr_orig_image.png', tr_x[0][0, :, :, :]) scipy.misc.imsave('../Output/' + model_name + '/tr_targ_image.png', tr_y[0, :, :, :]) # scipy.misc.imsave('../Output/' + model_name + '/te_orig_image.png', te_x[0][0, :, :, :]) # scipy.misc.imsave('../Output/' + model_name + '/te_targ_image.png', te_y[0, :, :, :]) # Tensorboard logged tensors. gen = tf.get_default_graph().get_tensor_by_name( "loss/add_2_loss/lambda_5/add:0")[0, :, :, :] inp = tf.get_default_graph().get_tensor_by_name("in_img0:0")[ 0, :, :, :] msk = tf.get_default_graph().get_tensor_by_name("mask_prior:0")[ 0, :, :, 0:1] msk = tf.tile(msk, [1, 1, 3]) out = tf.get_default_graph().get_tensor_by_name("in_img1:0")[ 0, :, :, :] for step in range(0, n_iters): # Train with next batch. x, y = next(train_feed) # plt.imshow(x[0][0, :, :, 0] * 3 - cv2.resize(x[3][0, :, :, 0], (256, 256))) # plt.show() # plt.imshow(x[0][1, :, :, 0] * 3 - cv2.resize(x[3][1, :, :, 0], (256, 256))) # plt.show() train_loss = model.train_on_batch(x, y) # Print training loss progress. util.printProgress(step, 0, train_loss) # Add training loss to tensorboard. summary = tf.Summary() summary.value.add(tag='loss', simple_value=train_loss) summary_writer.add_summary(summary, step) # plt.imshow(np.round(((((x[3][1]) / 2.0) + 0.5) * 255.0)).astype(np.uint8)) # plt.show() # # plt.imshow(x[3][1, :, :, 0]) # plt.show() if step % params['test_interval'] == 0: # Set up tensorboard image summary. image_summary_1 = tf.summary.image('images', [inp, msk, out, gen], max_outputs=100) # Compute summary. image_summary_1_run = sess.run(image_summary_1, feed_dict={ "in_img0:0": x[0], "in_pose0:0": x[1], "in_pose1:0": x[2], "mask_prior:0": x[3], "trans_in:0": x[4], "in_img1:0": y }) # Register summary in tensorboard. summary_writer.add_summary(image_summary_1_run) # Compute training sample images. train_image = sess.run(gen, feed_dict={ "in_img0:0": tr_x[0], "in_pose0:0": tr_x[1], "in_pose1:0": tr_x[2], "mask_prior:0": tr_x[3], "trans_in:0": tr_x[4], "in_img1:0": tr_y }) # Save in disk computed sample images. scipy.misc.imsave(output_dir + 'tr' + str(step) + ".png", train_image) # Save model checkpoints. if step > 0 and step % params['model_save_interval'] == 0: model.save_weights(network_dir + '/' + str(step) + '.h5')
def train(dataset, gpu_id): params = param.getGeneralParams() gpu = '/gpu:' + str(gpu_id) lift_params = param.getDatasetParams('weightlifting') golf_params = param.getDatasetParams('golfswinghd') workout_params = param.getDatasetParams('workout') tennis_params = param.getDatasetParams('tennis') aux_params = param.getDatasetParams('test-aux') _, lift_test = datareader.makeWarpExampleList(lift_params, 0, 2000, 2, 1) _, golf_test = datareader.makeWarpExampleList(golf_params, 0, 5000, 2, 2) _, workout_test = datareader.makeWarpExampleList(workout_params, 0, 2000, 2, 3) _, tennis_test = datareader.makeWarpExampleList(tennis_params, 0, 2000, 2, 4) _, aux_test = datareader.makeWarpExampleList(aux_params, 0, 2000, 2, 5) test = lift_test + golf_test + workout_test + tennis_test + aux_test feed = datageneration.warpExampleGenerator(test, params, do_augment=False, draw_skeleton=False, skel_color=(0, 0, 255), return_pose_vectors=True) config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True with tf.Session(config=config) as sess: sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) with tf.device(gpu): vgg_model = myVGG.vgg_norm() networks.make_trainable(vgg_model, False) response_weights = sio.loadmat('mean_response.mat') gen = networks.network_fgbg(params, vgg_model, response_weights, True, loss='vgg') disc = networks.discriminator(params) gan = networks.gan(gen, disc, params, vgg_model, response_weights, 0.01, 1e-4) gan.load_weights('../results/networks/gan/10000.h5') np.random.seed(17) n_batches = 25 for j in xrange(n_batches): print j X, Y = next(feed) loss = gen.evaluate(X[0:-2], Y) pred = gen.predict(X[0:-2]) sio.savemat( 'results/outputs/' + str(j) + '.mat', { 'X': X[0], 'Y': Y, 'pred': pred, 'loss': loss, 'src_pose': X[-2], 'tgt_pose': X[-1] })
def test(model_name, gpu_id): params = param.get_general_params() network_dir = params['model_save_dir'] + '/' + model_name # if not os.path.isdir(network_dir): # os.mkdir(network_dir) train_feed = data_generation.create_feed(params, params['data_dir'], 'test', do_augment=False) os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True gan_lr = 1e-4 disc_lr = 1e-4 disc_loss = 0.1 generator = networks.network_posewarp(params) # generator.load_weights('../models/vgg_100000.h5') generator.load_weights( '/versa/kangliwei/motion_transfer/posewarp-cvpr2018/models/0301_fullfinetune/9000.h5' ) mask_delta_model = Model(input=generator.input, output=generator.get_layer('mask_delta').output) src_mask_model = Model(input=generator.input, output=generator.get_layer('mask_src').output) discriminator = networks.discriminator(params) discriminator.compile(loss='binary_crossentropy', optimizer=Adam(lr=disc_lr)) vgg_model = truncated_vgg.vgg_norm() networks.make_trainable(vgg_model, False) response_weights = sio.loadmat( '../data/vgg_activation_distribution_train.mat') gan = networks.gan(generator, discriminator, params) gan.compile(optimizer=Adam(lr=gan_lr), loss=[ networks.vgg_loss(vgg_model, response_weights, 12), 'binary_crossentropy' ], loss_weights=[1.0, disc_loss]) n_iters = 10000 batch_size = params['batch_size'] for step in range(n_iters): x, y = next(train_feed) gen = generator.predict(x) src_mask_delta = mask_delta_model.predict(x) print('delta_max', src_mask_delta.max()) src_mask_delta = src_mask_delta * 255 src_mask = src_mask_model.predict(x) print('mask_max', src_mask.max()) src_mask = src_mask * 255 # print('src_mask_delta', type(src_mask_delta), src_mask_delta.shape) y = (y / 2 + 0.5) * 255.0 gen = (gen / 2 + 0.5) * 255.0 for i in range(gen.shape[0]): # iterate in batch cv2.imwrite('pics/src' + str(i) + '.jpg', x[0][i] * 255) cv2.imwrite('pics/gen' + str(i) + '.jpg', gen[i]) cv2.imwrite('pics/y' + str(i) + '.jpg', y[i]) for j in range(11): cv2.imwrite('pics/seg_delta_' + str(i) + '_' + str(j) + '.jpg', src_mask_delta[i][:, :, j]) for j in range(11): cv2.imwrite('pics/seg_' + str(i) + '_' + str(j) + '.jpg', src_mask[i][:, :, j]) break # Train discriminator x_tgt_img_disc = np.concatenate((y, gen)) x_src_pose_disc = np.concatenate((x[1], x[1])) x_tgt_pose_disc = np.concatenate((x[2], x[2])) L = np.zeros([2 * batch_size]) L[0:batch_size] = 1 inputs = [x_tgt_img_disc, x_src_pose_disc, x_tgt_pose_disc] d_loss = discriminator.train_on_batch(inputs, L) # Train the discriminator a couple of iterations before starting the gan if step < 5: util.printProgress(step, 0, [0, d_loss]) step += 1 continue # TRAIN GAN L = np.ones([batch_size]) x, y = next(train_feed) g_loss = gan.train_on_batch(x, [y, L]) util.printProgress(step, 0, [g_loss[1], d_loss]) if step % params['model_save_interval'] == 0 and step > 0: generator.save(network_dir + '/' + str(step) + '.h5')
def finetune(model_name, exp_name, save_dir, gpu_id, vid_i, T, iter_num, rdm=False): params = param.get_general_params() img_width = params['IMG_WIDTH'] img_height = params['IMG_HEIGHT'] # params['batch_size'] = 1 network_dir = params['model_save_dir'] + '/' + exp_name if not os.path.isdir(network_dir): os.mkdir(network_dir) os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) config = tf.ConfigProto() config.gpu_options.allow_growth = True set_session(tf.Session(config=config)) vgg_model = truncated_vgg.vgg_norm() networks.make_trainable(vgg_model, False) response_weights = sio.loadmat( '../data/vgg_activation_distribution_train.mat') model = networks.network_posewarp(params) if not rdm: weight_path = str( os.path.join( params['model_save_dir'], os.path.join( f"{model_name}", f'{iter_num}.h5'))) # model name doesn't super work model.load_weights(weight_path) model.compile(optimizer=Adam(lr=1e-4), loss=[networks.vgg_loss(vgg_model, response_weights, 12)]) # train for T iterations train_feed = data_generation.create_feed( params, None, ModelMode.finetune, vid_i, txtfile=f'../testset_split_85_v3/train_{vid_i}_img.txt', do_augment=True) startTime = datetime.now() for step in range(T): x, y = next(train_feed) train_loss = model.train_on_batch(x, y) util.printProgress(step, 0, train_loss) if step % 1000 == 0: print_viz(train_feed, model) print(datetime.now() - startTime) model.save(network_dir + '/' + str(step) + '.h5') model.save(network_dir + '/' + str(step) + '.h5') # test on all items test_feed, dir_len = data_generation.create_test_feed( params, None, vid_i=vid_i, txtfile=f'../testset_split_85_v3/test_{vid_i}_img.txt', k_txtfile=f'../testset_split_85_v3/train_{vid_i}_img.txt') scores = np.zeros((dir_len, 3)) for j in range(dir_len): try: x, y, scale, pos, img_num = next(test_feed) arr_loss = model.predict_on_batch(x) except cv2.error as e: print("OpenCV Error, gonna ignore") continue i = 0 generated = (arr_loss[i] + 1) * 128 gen_resized = data_generation.reverse_center_and_scale_image( generated, img_width, img_height, pos, scale) target = (y[i] + 1) * 128 target_resized = data_generation.reverse_center_and_scale_image( target, img_width, img_height, pos, scale) source = (x[0][i] + 1) * 128 resized_source = cv2.resize(source, (0, 0), fx=2, fy=2) source_resized = data_generation.reverse_center_and_scale_image( source, img_width, img_height, pos, scale) modified_img = data_generation.add_source_to_image( gen_resized, resized_source) cv2.imwrite(save_dir + f'/{img_num:08d}.png', gen_resized) scores[j][0] = compare_ssim(gen_resized, target_resized, multichannel=True, data_range=256) scores[j][1] = compare_psnr(gen_resized, target_resized, data_range=256) scores[j][2] = compare_mse(gen_resized, target_resized) mean_scores = scores.mean(axis=0) std_scores = scores.std(axis=0) print(mean_scores) print(std_scores) save_dict = os.path.join(save_dir, f"saved_scores_{vid_i}.pkl") pickle.dump(scores, open(save_dict, "wb"))
def train(model_name, gpu_id): tf_config = tf.ConfigProto() tf_config.gpu_options.allow_growth = True tf_config.allow_soft_placement = True with tf.Session(config=tf_config) as sess: params = param.get_general_params() network_dir = params['model_save_dir'] + '/' + model_name if not os.path.isdir(network_dir): os.mkdir(network_dir) train_feed = data_generation.create_feed(params, params['data_dir'], "train") # test_feed = data_generation.create_feed(params, params['data_dir'], "test") os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) gan_lr = 1e-3 disc_lr = 1e-3 disc_loss = 0.1 generator = networks.network_posewarp(params) # generator.load_weights('../models/posewarp_vgg/100000.h5') discriminator = networks.discriminator(params) discriminator.compile(loss='binary_crossentropy', optimizer=Adam(lr=disc_lr)) vgg_model = truncated_vgg.vgg_norm() networks.make_trainable(vgg_model, False) response_weights = sio.loadmat('../Models/vgg_activation_distribution_train.mat') gan = networks.gan(generator, discriminator, params) gan.compile(optimizer=Adam(lr=gan_lr), loss=[networks.vgg_loss(vgg_model, response_weights, 12), 'binary_crossentropy'], loss_weights=[1.0, disc_loss]) n_iters = params['n_training_iter'] batch_size = params['batch_size'] summary_writer = tf.summary.FileWriter("./logs", graph=sess.graph) tr_x, tr_y = next(train_feed) # te_x, te_y = next(test_feed) # Prepare output directories if they don't exist. output_dir = '../Output/' + model_name + '/' if not os.path.isdir(output_dir): os.mkdir(output_dir) scipy.misc.imsave(output_dir + 'tr_orig_image.png', tr_x[0][0, :, :, :]) scipy.misc.imsave(output_dir + 'tr_targ_image.png', tr_y[0, :, :, :]) # scipy.misc.imsave(output_dir + 'te_orig_image.png', te_x[0][0, :, :, :]) # scipy.misc.imsave(output_dir + 'te_targ_image.png', te_y[0, :, :, :]) print("Batch size: " + str(batch_size)) for step in range(n_iters): x, y = next(train_feed) gen = generator.predict(x) # Train discriminator x_tgt_img_disc = np.concatenate((y, gen)) x_src_pose_disc = np.concatenate((x[1], x[1])) x_tgt_pose_disc = np.concatenate((x[2], x[2])) L = np.zeros([2 * batch_size]) L[0:batch_size] = 1 inputs = [x_tgt_img_disc, x_src_pose_disc, x_tgt_pose_disc] d_loss = discriminator.train_on_batch(inputs, L) # Train the discriminator a couple of iterations before starting the gan if step < 5: util.printProgress(step, 0, [0, d_loss]) step += 1 continue # TRAIN GAN L = np.ones([batch_size]) x, y = next(train_feed) g_loss = gan.train_on_batch(x, [y, L]) util.printProgress(step, 0, [g_loss[1], d_loss]) if step % params['test_interval'] == 0: print(gen[0]) gen = tf.get_default_graph().get_tensor_by_name("model_1/add_2_1/add:0") inp = tf.get_default_graph().get_tensor_by_name("in_img0:0") out = tf.get_default_graph().get_tensor_by_name("in_img1:0") p_s = tf.get_default_graph().get_tensor_by_name("mask_src/truediv:0") # p_t = tf.get_default_graph().get_tensor_by_name("in_pose1:0") image_summary_1 = tf.summary.image('images', [inp[0, :, :, :], out[0, :, :, :], gen[0, :, :, :]], max_outputs=100) # image_summary_2 = tf.summary.image('pose', [tf.reduce_sum(p_s[0, :, :, :], 2, keepdims=True)], max_outputs=100) image_summary_1 = sess.run(image_summary_1,feed_dict={"in_img0:0": x[0], "in_pose0:0": x[1], "in_pose1:0": x[2], "mask_prior:0": x[3], "trans_in:0": x[4], "in_img1:0": y, "input_3:0": x[0], "input_4:0": x[1], "input_5:0": x[2], "input_6:0": x[3], "input_7:0": x[4]}) # # img_gen = sess.run(image_summary_1,feed_dict={"in_img0:0": x[0], "in_pose0:0": x[1], "in_pose1:0": x[2], # "mask_prior:0": x[3], "trans_in:0": x[4], "in_img1:0": y, # "input_3:0": x[0], "input_4:0": x[1], "input_5:0": x[2], # "input_6:0": x[3], "input_7:0": x[4]}) # image_summary_2 = sess.run(image_summary_2, feed_dict={"in_img0:0" : x[0], "in_pose0:0" : x[1], "in_pose1:0" : x[2], # "mask_prior:0" : x[3], "trans_in:0" : x[4], "in_img1:0" : y}) summary_writer.add_summary(image_summary_1) # summary_writer.add_summary(image_summary_2) train_image = sess.run(gen, feed_dict={"in_img0:0": tr_x[0], "in_pose0:0": tr_x[1], "in_pose1:0": tr_x[2], "mask_prior:0": tr_x[3], "trans_in:0": tr_x[4], "in_img1:0": tr_y, "input_3:0": tr_x[0], "input_4:0": tr_x[1], "input_5:0": tr_x[2], "input_6:0": tr_x[3], "input_7:0": tr_x[4]}) # # test_image = sess.run(gen, feed_dict={"in_img0:0": te_x[0], "in_pose0:0": te_x[1], "in_pose1:0": te_x[2], # "mask_prior:0": te_x[3], "trans_in:0": te_x[4], "in_img1:0": te_y, # "input_3:0": te_x[0], "input_4:0": te_x[1], "input_5:0": te_x[2], # "input_6:0": te_x[3], "input_7:0": te_x[4]}) scipy.misc.imsave(output_dir + 'tr' + str(step) + ".png", train_image[0, :, :, :]) # scipy.misc.imsave(output_dir + 'te' + str(step) + ".png", test_image[0, :, :, :]) if step % params['model_save_interval'] == 0 and step > 0: gan.save(network_dir + '/' + str(step) + '.h5')
def test(model_name, gpu_id): params = param.get_general_params() network_dir = params['model_save_dir'] + '/' + model_name work_product_dir = params['project_dir'] + '/' + 'work_product' test_feed = data_generation.create_feed(params, params['data_dir'], 'test') os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) config = tf.ConfigProto() config.gpu_options.allow_growth = True set_session(tf.Session(config=config)) vgg_model = truncated_vgg.vgg_norm() networks.make_trainable(vgg_model, False) response_weights = sio.loadmat( 'posewarp-cvpr2018/data/vgg_activation_distribution_train.mat') model = networks.network_posewarp(params) model.summary() model.compile(optimizer=Adam(lr=1e-4), loss=[networks.vgg_loss(vgg_model, response_weights, 12)]) model.load_weights(network_dir + '/' + 'weights_model_gan_improved.h5') n_iters = params['n_training_iter'] x, y = next(test_feed) # src_pose = x[1][0][:,:,0] # trgt_pose = x[2][0][:,:,0] src_limb_masks = np.amax(np.asarray(x[3][0]), axis=2) src_limb_mask_1 = np.asarray(x[3][0][:, :, 0]) src_limb_mask_2 = np.asarray(x[3][0][:, :, 1]) src_limb_mask_3 = np.asarray(x[3][0][:, :, 2]) src_limb_mask_4 = np.asarray(x[3][0][:, :, 3]) src_limb_mask_5 = np.asarray(x[3][0][:, :, 4]) src_pose = np.amax(np.asarray(x[1][0]), axis=2) trgt_pose = np.amax(np.asarray(x[2][0]), axis=2) # for i in range(1,7): # src_pose = src_pose+ x[1][0][:,:,i] # trgt_pose = trgt_pose+ x[2][0][:,:,i] scipy.misc.imsave(work_product_dir + '/' + 'source_pose.jpg', src_pose) scipy.misc.imsave(work_product_dir + '/' + 'target_pose.jpg', trgt_pose) scipy.misc.imsave(work_product_dir + '/' + 'source_limb_mask.jpg', src_limb_masks) # scipy.misc.imsave(network_dir+'/'+'source_limb_mask_1.jpg',src_limb_mask_1) # scipy.misc.imsave(network_dir+'/'+'source_limb_mask_2.jpg',src_limb_mask_2) # scipy.misc.imsave(network_dir+'/'+'source_limb_mask_3.jpg',src_limb_mask_3) # scipy.misc.imsave(network_dir+'/'+'source_limb_mask_4.jpg',src_limb_mask_4) # scipy.misc.imsave(network_dir+'/'+'source_limb_mask_5.jpg',src_limb_mask_5) # scipy.misc.imsave(network_dir+'/'+'source_pose_new.jpg',src_pose_n) target_img = np.asarray(y[0]) src_img = np.asarray(x[0][0]) yimg = model.predict(x, 1) gen_img = np.asarray(yimg[0]) constarr = 255 * 0.5 * np.ones((256, 256, 3)) # scipy.misc.imsave(network_dir+'/'+'source_image.jpg',src_img) # scipy.misc.imsave(network_dir+'/'+'target_image.jpg',target_img) # scipy.misc.imsave(network_dir+'/'+'generated_target_image.jpg',gen_img) cv2.imwrite(work_product_dir + '/' + 'target.jpg', constarr + np.multiply(target_img, 0.5 * 255)) #target_img) cv2.imwrite(work_product_dir + '/' + 'gen_target.jpg', constarr + np.multiply(gen_img, 0.5 * 255)) cv2.imwrite(work_product_dir + '/' + 'source.jpg', constarr + np.multiply(src_img, 0.5 * 255)) #src_img)
def train(model_name, gpu_id): params = param.getGeneralParams() gpu = '/gpu:' + str(gpu_id) network_dir = params['project_dir'] + '/results/networks/' + model_name if not os.path.isdir(network_dir): os.mkdir(network_dir) train_feed = datageneration.createFeed(params, "train_vids.txt", 50000) test_feed = datageneration.createFeed(params, "test_vids.txt", 5000) batch_size = params['batch_size'] config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True set_session(tf.Session(config=config)) gan_lr = 5e-5 disc_lr = 5e-5 disc_loss = 0.1 vgg_model_num = 184000 with tf.device(gpu): vgg_model = myVGG.vgg_norm() networks.make_trainable(vgg_model, False) response_weights = sio.loadmat('mean_response.mat') generator = networks.network_fgbg(params, vgg_model, response_weights) generator.load_weights('../results/networks/fgbg_vgg_new/' + str(vgg_model_num) + '.h5') discriminator = networks.discriminator(params) discriminator.compile(loss=networks.wass, optimizer=RMSprop(disc_lr)) gan = networks.gan(generator, discriminator, params, vgg_model, response_weights, disc_loss, gan_lr) for step in xrange(vgg_model_num + 1, vgg_model_num + 5001): for j in xrange(2): for l in discriminator.layers: weights = l.get_weights() weights = [np.clip(w, -0.01, 0.01) for w in weights] l.set_weights(weights) X, Y = next(train_feed) with tf.device(gpu): gen = generator.predict(X) #Train discriminator networks.make_trainable(discriminator, True) X_tgt_img_disc = np.concatenate((Y, gen)) X_src_pose_disc = np.concatenate((X[1], X[1])) X_tgt_pose_disc = np.concatenate((X[2], X[2])) L = np.ones(2 * batch_size) L[0:batch_size] = -1 inputs = [X_tgt_img_disc, X_src_pose_disc, X_tgt_pose_disc] d_loss = discriminator.train_on_batch(inputs, L) networks.make_trainable(discriminator, False) #TRAIN GAN L = -1 * np.ones(batch_size) X, Y = next(train_feed) g_loss = gan.train_on_batch(X, [Y, L]) util.printProgress(step, 0, [g_loss[1], d_loss]) if (step % params['model_save_interval'] == 0): gan.save(network_dir + '/' + str(step) + '.h5') '''
def train(model_name, gpu_id): with tf.Session() as sess: params = param.get_general_params() network_dir = params['model_save_dir'] + '/' + model_name # Creates models directory if not exist. if not os.path.isdir(network_dir): os.mkdir(network_dir) train_feed = data_generation.create_feed(params, params['data_dir'], 'train') test_feed = data_generation.create_feed(params, params['data_dir'], 'test') os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) config = tf.ConfigProto() config.gpu_options.allow_growth = True set_session(tf.Session(config=config)) vgg_model = truncated_vgg.vgg_norm() networks.make_trainable(vgg_model, False) response_weights = sio.loadmat( '../data/vgg_activation_distribution_train.mat') model = networks.network_posewarp(params) model.compile( optimizer=Adam(lr=1e-4), loss=[networks.vgg_loss(vgg_model, response_weights, 12)]) n_iters = params['n_training_iter'] summary_writer = tf.summary.FileWriter( "D:\Proyectos\JEJU2018\Code\posewarp-cvpr2018\code\logs", graph=sess.graph) tr_x, tr_y = next(train_feed) te_x, te_y = next(test_feed) # Prepare output directories if they don't exist. output_dir = '../output/' + model_name + '/' if not os.path.isdir(output_dir): os.mkdir(output_dir) scipy.misc.imsave('../output/tr_orig_image.png', tr_x[0][0, :, :, :]) scipy.misc.imsave('../output/tr_targ_image.png', tr_y[0, :, :, :]) scipy.misc.imsave('../output/te_orig_image.png', te_x[0][0, :, :, :]) scipy.misc.imsave('../output/te_targ_image.png', te_y[0, :, :, :]) for step in range(0, n_iters): x, y = next(train_feed) train_loss = model.train_on_batch(x, y) util.printProgress(step, 0, train_loss) # out = sess.run(conv, feed_dict={"input_1:0" : x[0]}) # plt.matshow(out[0, :, :, 0]) # plt.show() gen = tf.get_default_graph().get_tensor_by_name( "loss/add_2_loss/lambda_5/add:0") inp = tf.get_default_graph().get_tensor_by_name("in_img0:0") out = tf.get_default_graph().get_tensor_by_name("in_img1:0") p_s = tf.get_default_graph().get_tensor_by_name( "mask_src/truediv:0") # p_t = tf.get_default_graph().get_tensor_by_name("in_pose1:0") image_summary_1 = tf.summary.image( 'images', [inp[0, :, :, :], out[0, :, :, :], gen[0, :, :, :]], max_outputs=100) # image_summary_2 = tf.summary.image('pose', [tf.reduce_sum(p_s[0, :, :, :], 2, keepdims=True)], max_outputs=100) image_summary_1 = sess.run(image_summary_1, feed_dict={ "in_img0:0": x[0], "in_pose0:0": x[1], "in_pose1:0": x[2], "mask_prior:0": x[3], "trans_in:0": x[4], "in_img1:0": y }) # image_summary_2 = sess.run(image_summary_2, feed_dict={"in_img0:0" : x[0], "in_pose0:0" : x[1], "in_pose1:0" : x[2], # "mask_prior:0" : x[3], "trans_in:0" : x[4], "in_img1:0" : y}) summary_writer.add_summary(image_summary_1) # summary_writer.add_summary(image_summary_2) train_image = sess.run(gen, feed_dict={ "in_img0:0": tr_x[0], "in_pose0:0": tr_x[1], "in_pose1:0": tr_x[2], "mask_prior:0": tr_x[3], "trans_in:0": tr_x[4], "in_img1:0": tr_y }) test_image = sess.run(gen, feed_dict={ "in_img0:0": te_x[0], "in_pose0:0": te_x[1], "in_pose1:0": te_x[2], "mask_prior:0": te_x[3], "trans_in:0": te_x[4], "in_img1:0": te_y }) if step > 0 and step % params['model_save_interval'] == 0: model.save_weights(network_dir + '/' + str(step) + '.h5')
def train(model_name, gpu_id): params = param.getGeneralParams() gpu = '/gpu:' + str(gpu_id) network_dir = params['project_dir'] + '/results/networks/' + model_name if not os.path.isdir(network_dir): os.mkdir(network_dir) train_feed = datageneration.createFeed(params, "train_vids.txt") test_feed = datageneration.createFeed(params, "test_vids.txt") batch_size = params['batch_size'] config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True with tf.Session(config=config) as sess: sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) gan_lr = 1e-4 disc_lr = 1e-4 disc_loss = 0.1 with tf.device(gpu): vgg_model = myVGG.vgg_norm() networks.make_trainable(vgg_model, False) response_weights = sio.loadmat('mean_response.mat') #generator = networks.network_pix2pix(params,vgg_model,response_weights) generator = networks.network_fgbg(params) generator.load_weights('../results/networks/fgbg_vgg/100000.h5') discriminator = networks.discriminator(params) discriminator.compile(loss='binary_crossentropy', optimizer=Adam(lr=disc_lr)) gan = networks.gan(generator, discriminator, params, vgg_model, response_weights, disc_loss, gan_lr) gan.compile(optimizer=Adam(lr=gan_lr), loss=[ networks.vggLoss(vgg_model, response_weights), 'binary_crossentropy' ], loss_weights=[1.0, disc_loss]) for step in xrange(10001): X, Y = next(train_feed) with tf.device(gpu): gen = generator.predict(X) #[0:3]) #Train discriminator X_tgt_img_disc = np.concatenate((Y, gen)) X_src_pose_disc = np.concatenate((X[1], X[1])) X_tgt_pose_disc = np.concatenate((X[2], X[2])) L = np.zeros([2 * batch_size]) L[0:batch_size] = 1 inputs = [X_tgt_img_disc, X_src_pose_disc, X_tgt_pose_disc] d_loss = discriminator.train_on_batch(inputs, L) #Train the discriminator a couple of iterations before starting the gan if (step < 5): util.printProgress(step, 0, [0, d_loss]) step += 1 continue #TRAIN GAN L = np.ones([batch_size]) X, Y = next(train_feed) g_loss = gan.train_on_batch(X, [Y, L]) util.printProgress(step, 0, [g_loss[1], d_loss]) ''' #Test if(step % params['test_interval'] == 0): n_batches = 8 test_loss = np.zeros(2) for j in xrange(n_batches): X,Y = next(warp_test_feed) #test_loss += np.array(generator.test_on_batch(X_warp,Y_warp)) L = np.zeros([batch_size,2]) L[:,1] = 1 #Fake images test_loss_j = gan_warp.test_on_batch(X_warp, [Y_warp,L]) test_loss += np.array(test_loss_j[1:3]) test_loss /= (n_batches) util.printProgress(step,1,test_loss) ''' if (step % params['model_save_interval'] == 0 and step > 0): gan.save(network_dir + '/' + str(step) + '.h5')