def evaluate(model_name, gpu_id):
    params = param.get_general_params()
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))

    vgg_model = truncated_vgg.vgg_norm()
    networks.make_trainable(vgg_model, False)
    response_weights = sio.loadmat(params['data_dir'] +
                                   '/vgg_activation_distribution_train.mat')
    model = networks.network_posewarp(params)

    model.compile(optimizer=Adam(),
                  loss=[networks.vgg_loss(vgg_model, response_weights, 12)])
    iterations = range(1000, 185001, 1000)

    n_batches = 25
    losses = []
    for i in iterations:
        print(i)
        model.load_weights('../models/' + model_name + '/' + str(i) + '.h5')
        np.random.seed(11)
        feed = data_generation.create_feed(params, params['data_dir'], 'train')
        loss = 0
        for batch in range(n_batches):
            x, y = next(feed)
            loss += model.evaluate(x, y)
        loss /= (n_batches * 1.0)
        losses.append(loss)
        sio.savemat('losses_by_iter.mat', {
            'losses': losses,
            'iterations': iterations
        })
def train(model_name, gpu_id):
    params = param.get_general_params()

    network_dir = params['model_save_dir'] + '/' + model_name

    if not os.path.isdir(network_dir):
        os.mkdir(network_dir)

    train_feed = data_generation.create_feed(params, params['data_dir'],
                                             'train')

    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))

    vgg_model = truncated_vgg.vgg_norm()
    networks.make_trainable(vgg_model, False)
    response_weights = sio.loadmat(
        'posewarp-cvpr2018/data/vgg_activation_distribution_train.mat')
    model = networks.network_posewarp(params)
    model.compile(optimizer=Adam(lr=1e-4),
                  loss=[networks.vgg_loss(vgg_model, response_weights, 12)])
    model.load_weights(network_dir + '/' + 'weights_gan_model.h5')
    model.summary()
    n_iters = params['n_training_iter']
    for step in range(0, n_iters):
        x, y = next(train_feed)
        train_loss = model.train_on_batch(x, y)
        util.printProgress('Iteration: ', step, ' Train_loss: ', train_loss)

        if step > 0 and step % params['model_save_interval'] == 0:
            model.save_weights(
                (network_dir + '/' + 'weights_model_gan_improved' + '.h5'))
Exemple #3
0
    def __init__(self, model_path='../models/vgg+gan_5000.h5'):
        params = param.get_general_params()
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True

        self.generator = networks.network_posewarp(params)
        self.generator.load_weights(model_path)
def test(model_name, gpu_id):

    params = param.get_general_params()

    TEST_PATH = params['data_dir'] + "/exam/test_golf/"

    SRC_IMG = TEST_PATH + "ref_img/"
    TGT_POS = TEST_PATH + "ref_pose/"

    with tf.Session() as sess:

        network_dir = params['model_save_dir'] + '/' + model_name

        # Creates models directory if not exist.
        if not os.path.isdir(network_dir):
            print("No model named ´" + model_name + "´ found!")
            raise

        img_feed = data_generation.create_feed(params, SRC_IMG, mode="test", do_augment=False)
        pos_feed = data_generation.create_feed(params, TGT_POS, mode="test", do_augment=False)

        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        set_session(tf.Session(config=config))

        vgg_model = truncated_vgg.vgg_norm()
        networks.make_trainable(vgg_model, False)
        response_weights = sio.loadmat('../data/vgg_activation_distribution_train.mat')

        ckp_name = [f for f in listdir(network_dir) if isfile(join(network_dir, f))][-1]

        model = networks.network_posewarp(params)
        model.compile(optimizer=Adam(lr=1e-4), loss=[networks.vgg_loss(vgg_model, response_weights, 12)])

        model.load_weights(network_dir + "/"  + ckp_name)

        n_iters = 1

        summary_writer = tf.summary.FileWriter("D:\Proyectos\JEJU2018\Code\posewarp-cvpr2018\code\logs", graph=sess.graph)

        for step in range(0, n_iters):

            x_img = next(img_feed)
            x_pos = next(pos_feed)

            # out = sess.run(conv, feed_dict={"input_1:0" : x[0]})
            # plt.matshow(out[0, :, :, 0])
            # plt.show()

            gen = tf.get_default_graph().get_tensor_by_name("loss/add_2_loss/lambda_5/add:0")
            inp = tf.get_default_graph().get_tensor_by_name("in_img0:0")
            # out = tf.get_default_graph().get_tensor_by_name("in_img1:0")
            image_summary_op = tf.summary.image('images', [inp[0, :, :, :], gen[0, :, :, :]], max_outputs=100)
            image_summary = sess.run(image_summary_op, feed_dict={"in_img0:0" : x_img[0], "in_pose0:0" : x_img[1], "in_pose1:0" : x_pos[2], "mask_prior:0" : x_img[3], "trans_in:0" : x_img[4]})
            summary_writer.add_summary(image_summary)

            print(image_summary_op)
Exemple #5
0
def run_posewarp(pw, source_image, target_image, source_joints, target_joints):
    # in: source image, source joints, target joints
    # out: source in target pose
    params = param.get_general_params()
    x, y = data_generation.format_network_input(params, source_image,
                                                target_image, source_joints,
                                                target_joints)
    out = pw.gen(x)
    return out, x, y
Exemple #6
0
def vid_to_seq(vid_path, mode="train", N_FRMS=10, resize=True):

    params = param.get_general_params()
    # Create corresponding directory where to save files. Directory shares same name as input video.
    save_path = params['data_dir'] + "/" + mode + "/" + vid_path.split(
        "/")[-1].split(".")[0] + "/frames/"

    print(save_path)

    if not os.path.exists(save_path):
        os.makedirs(save_path)

    cap = cv2.VideoCapture(vid_path)

    N_FRMS = int(N_FRMS)

    frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    print(frameCount)

    SKIP = frameCount // N_FRMS + 1

    buf = np.empty((frameCount // SKIP, frameHeight, frameWidth, 3),
                   np.dtype('uint8'))

    fc = 0
    f = 0
    ret = True

    while (fc < frameCount and ret):

        r, b = cap.read()
        fc += 1

        if fc % SKIP == 0:

            ret, buf[f] = r, b
            buf[f] = buf[f][:, :, ::-1]  # Inverts channels from BGR to RGB

            h, w, _ = buf[f].shape

            if resize:
                imsave(
                    save_path + str(f + 1) + ".png",
                    np.array(cv2.resize(np.array(buf[f], dtype=np.uint8),
                                        (500, int(h / w * 500))),
                             dtype=np.uint8))
            else:
                imsave(save_path + str(f + 1) + ".png",
                       np.array(buf[f], dtype=np.uint8))

            f += 1

    cap.release()
    return buf[:-1], save_path
Exemple #7
0
def test(model_name, save_dir, gpu_id, vid_i, iter_num=9999, dbg=False):
    params = param.get_general_params()
    img_width = params['IMG_WIDTH']
    img_height = params['IMG_HEIGHT']

    test_feed, dir_len = data_generation.create_test_feed(params, 5, vid_i=vid_i, txtfile=f'../testset_5_v3/test_{vid_i}_img.txt', k_txtfile=f'../testset_5_v3/train_{vid_i}_img.txt')

    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))

    vgg_model = truncated_vgg.vgg_norm()
    networks.make_trainable(vgg_model, False)
    response_weights = sio.loadmat('../data/vgg_activation_distribution_train.mat')
    model = networks.network_posewarp(params)
    weight_path = str(os.path.join(params['model_save_dir'], os.path.join(f"{model_name}", f'{iter_num}.h5'))) # model name doesn't super work
    model.load_weights(weight_path)
    model.compile(optimizer=Adam(lr=1e-4), loss=[networks.vgg_loss(vgg_model, response_weights, 12)])

    model.summary()
    n_iters = params['n_training_iter']
    gen = np.zeros((dir_len, 3, 256, 256))
    scores = np.zeros((dir_len, 3))

    for j in range(1 if dbg else dir_len):
        try:
          x, y, scale, pos, img_num, src = next(test_feed)
          arr_loss = model.predict_on_batch(x)
        except cv2.error as e:
          print("OpenCV Error, gonna ignore")
          continue
        i = 0
        generated = (arr_loss[i] + 1) * 128
        gen_resized = data_generation.reverse_center_and_scale_image(generated, img_width, img_height, pos, scale)
        target = (y[i] + 1) * 128
        target_resized = data_generation.reverse_center_and_scale_image(target, img_width, img_height, pos, scale)
        source = (x[0][i] + 1) * 128
        # resized_source = cv2.resize(source, (0, 0), fx=2, fy=2)
        # source_resized = data_generation.reverse_center_and_scale_image(source, img_width, img_height, pos, scale)
        modified_img = data_generation.add_source_to_image(gen_resized, src)
        cv2.imwrite(save_dir + f'/{img_num:08d}.png', modified_img)
        gen[j] = np.transpose(generated, (2, 0, 1))
        scores[j][0] = compare_ssim(generated, target, multichannel=True, data_range=256)
        scores[j][1] = compare_psnr(generated, target, data_range=256)
        scores[j][2] = compare_mse(generated, target)

    mean_scores = scores.mean(axis=0)
    std_scores = scores.std(axis=0)

    print(mean_scores)
    print(std_scores)
    save_dict = os.path.join(save_dir, f"saved_scores_{vid_i}.pkl")
    pickle.dump( scores, open( save_dict, "wb" ) )
Exemple #8
0
def test(model_name, gpu_id):

    with tf.Session() as sess:

        params = param.get_general_params()

        network_dir = params['model_save_dir'] + '/' + model_name

        # Creates models directory if not exist.
        if not os.path.isdir(network_dir):
            print(network_dir)
            os.mkdir(network_dir)

        test_feed = data_generation.create_feed(params, params['data_dir'], 'test')

        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        set_session(tf.Session(config=config))

        vgg_model = truncated_vgg.vgg_norm()
        networks.make_trainable(vgg_model, False)
        response_weights = sio.loadmat('../data/vgg_activation_distribution_train.mat')

        ckp_name = [f for f in listdir(network_dir) if isfile(join(network_dir, f))][-1]

        model = networks.network_posewarp(params)
        model.compile(optimizer=Adam(lr=1e-4), loss=[networks.vgg_loss(vgg_model, response_weights, 12)])

        model.load_weights(network_dir + "/"  + ckp_name)

        n_iters = 100

        summary_writer = tf.summary.FileWriter("D:\Proyectos\JEJU2018\Code\posewarp-cvpr2018\code\logs", graph=sess.graph)

        for step in range(0, n_iters):

            x, y = next(test_feed)

            test_loss = model.test_on_batch(x, y)
            util.printProgress(step, 0, test_loss)

            # out = sess.run(conv, feed_dict={"input_1:0" : x[0]})
            # plt.matshow(out[0, :, :, 0])
            # plt.show()

            gen = tf.get_default_graph().get_tensor_by_name("loss/add_2_loss/lambda_5/add:0")
            inp = tf.get_default_graph().get_tensor_by_name("in_img0:0")
            out = tf.get_default_graph().get_tensor_by_name("in_img1:0")
            image_summary_op = tf.summary.image('images', [inp[0, :, :, :], out[0, :, :, :], gen[0, :, :, :]], max_outputs=100)
            image_summary = sess.run(image_summary_op, feed_dict={"in_img0:0" : x[0], "in_pose0:0" : x[1], "in_pose1:0" : x[2], "mask_prior:0" : x[3], "trans_in:0" : x[4], "in_img1:0" : y})
            summary_writer.add_summary(image_summary)
def train(model_name, gpu_id):
    params = param.get_general_params()

    network_dir = params['model_save_dir'] + '/' + model_name
    if not os.path.isdir(network_dir):
        os.mkdir(network_dir)

    train_feed = data_generation.create_feed(params, params['data_dir'],
                                             'train')

    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))

    vgg_model = truncated_vgg.vgg_norm()
    networks.make_trainable(vgg_model, False)
    response_weights = sio.loadmat(params['data_dir'] +
                                   '/vgg_activation_distribution_train.mat')
    model = networks.network_posewarp(params)
    if not params['load_weights'] == None:
        model.load_weights(params['load_weights'])
    alpha = 0.4
    #model.compile(optimizer=Adam(lr=1e-4), loss=[networks.vgg_loss(vgg_model, response_weights, 12)])
    model.compile(
        optimizer=Adam(lr=1e-4),
        loss=lambda y_true, y_pred:
        (1 - alpha) * networks.vgg_loss(vgg_model, response_weights, 12)
        (y_true, y_pred) + alpha * tf.reduce_mean(tf.square(y_pred - y_true)))

    #model.summary()
    n_iters = params['n_training_iter']
    loss_note = []
    if params['load_weights'] == None:
        start = 0
    else:
        start = int(params['load_weights'].split("/")[-1][:-3])

    for step in range(start, n_iters):
        x, y = next(train_feed)

        train_loss = model.train_on_batch(x, y)
        loss_note.append([str(step), str(train_loss)])
        util.printProgress(step, 0, train_loss)
        if step > 0 and step % params['model_save_interval'] == 0:
            model.save(network_dir + '/' + str(step) + '.h5')
            pd.DataFrame(loss_note).to_csv(network_dir + f"/{step}.csv",
                                           header=None,
                                           index=None)
            loss_note = []
def seq_to_inf(frames, save_path=None):

    estimator = tf_pose.get_estimator(model="mobilenet_thin")

    J = param.get_general_params()['n_joints']

    skl = np.zeros([J, 2, len(frames)]) - 1
    bbx = np.zeros([len(frames), 4]) - 1

    for f, fr in enumerate(frames):

        points = estimator.inference(fr,
                                     resize_to_default=(432 > 0 and 368 > 0),
                                     upsample_size=4.0)[0].body_parts

        min_x, min_y = (+np.inf, +np.inf)
        max_x, max_y = (-np.inf, -np.inf)

        imw, imh = fr[:, :, 0].shape

        for key in points:
            if key < J:
                # Get the coordinates of the bodypart.
                x, y = ((points[key].x) * imh, (points[key].y) * imw)

                min_x = np.minimum(min_x, x)
                min_y = np.minimum(min_y, y)
                max_x = np.maximum(max_x, x)
                max_y = np.maximum(max_y, y)

                skl[key, 0, f] = x
                skl[key, 1, f] = y

                # plt.plot(skl[:, 0], skl[:, 1], "o", c="red", markersize=2)

        # # Plot bound box based on skeleton joints.
        # plt.plot([min_x, max_x, max_x, min_x, min_x],
        #          [min_y, min_y, max_y, max_y, min_y], "-", c="yellow")

        bbx[f, :] = [min_x, min_y, max_x - min_x, max_y - min_y]

    info = {"data": {"X": skl[:J], "bbox": bbx}}

    if save_path:
        sio.savemat(save_path + ".mat", info)

    return info
Exemple #11
0
def train(model_name, gpu_id, start_iter=0):
    params = param.get_general_params()

    network_dir = params['model_save_dir'] + '/' + model_name

    if not os.path.isdir(network_dir):
        os.mkdir(network_dir)

    tf_writer = tf.summary.FileWriter(network_dir + "/log/")

    train_feed = data_generation.create_feed(params, None, ModelMode.train)

    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))

    vgg_model = truncated_vgg.vgg_norm()
    networks.make_trainable(vgg_model, False)
    response_weights = sio.loadmat(
        '../data/vgg_activation_distribution_train.mat')
    model = networks.network_posewarp(params)
    model.load_weights('../models/vgg_100000.h5')
    model.compile(optimizer=Adam(lr=1e-4),
                  loss=[networks.vgg_loss(vgg_model, response_weights, 12)])

    #model.summary()
    n_iters = params['n_training_iter']

    for step in range(start_iter, n_iters + 1):
        x, y = next(train_feed)

        train_loss = model.train_on_batch(x, y)

        util.printProgress(step, 0, train_loss)
        summary = tf.Summary(value=[
            tf.Summary.Value(tag="train_loss", simple_value=train_loss)
        ])
        tf_writer.add_summary(summary, step)

        if step > 0 and step % 100 == 0:
            tf_writer.flush()

        if step > 0 and step % params['model_save_interval'] == 0:
            model.save(network_dir + '/' + str(step) + '.h5')
    model.save(network_dir + '/' + str(step) + '.h5')
def predict(model_name, gpu_id, save_file_name):
    params = param.get_general_params()
    network_dir = params['model_save_dir'] + '/' + model_name
    save_dir = params['model_save_dir'] + '/' + model_name + '/result'
    params['batch_size'] = 1
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)

    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))

    vgg_model = truncated_vgg.vgg_norm()
    networks.make_trainable(vgg_model, False)
    response_weights = sio.loadmat(params['data_dir'] +
                                   '/vgg_activation_distribution_train.mat')
    model = networks.network_posewarp(params)

    # model.compile(optimizer=Adam(), loss=[networks.vgg_loss(vgg_model, response_weights, 12)])

    model.load_weights(network_dir +
                       save_file_name)  # TODO not sure the final ckpt name
    np.random.seed(112)
    feed = data_generation.create_feed(params,
                                       params['data_dir'],
                                       'train',
                                       do_augment=False)
    cnt = 8
    while True:
        try:
            x, y = next(feed)
            inp = recover2img(x[0])
            cv2.imwrite(os.path.join(save_dir, str(cnt) + "inp.jpg"), inp[0])
            # cv2.imwrite(os.path.join(save_dir, str(cnt) + "map.jpg",x[2][0][:,:,0]))

            out = model.predict(x)
            out = recover2img(out[0])
            cv2.imwrite(os.path.join(save_dir, str(cnt) + ".jpg"), out)
            gt = recover2img(y[0])
            cv2.imwrite(os.path.join(save_dir, str(cnt) + "gt.jpg"), gt)
            cnt += 1
            break
        except:
            break
Exemple #13
0
def train(model_name, gpu_id):
    params = param.get_general_params()

    network_dir = params['model_save_dir'] + '/' + model_name

    if not os.path.isdir(network_dir):
        os.mkdir(network_dir)

    train_feed = data_generation.create_feed(params, params['data_dir'],
                                             'train')

    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))

    vgg_model = truncated_vgg.vgg_norm()
    networks.make_trainable(vgg_model, False)
    response_weights = sio.loadmat(
        '../data/vgg_activation_distribution_train.mat')
    model = networks.network_posewarp(params)
    model.compile(optimizer=Adam(lr=1e-4),
                  loss=[networks.vgg_loss(vgg_model, response_weights, 12)])

    #model.summary()
    n_iters = params['n_training_iter']

    log_dir = '../log/{:s}'.format(model_name)
    callback = TensorBoard(log_dir, write_graph=True)
    callback.set_model(model)
    train_names = ['train_loss']

    for step in range(0, n_iters):
        x, y = next(train_feed)

        train_loss = model.train_on_batch(x, y)

        util.printProgress(step, 0, train_loss)
        write_log(callback, train_names, [train_loss], step)

        if step > 0 and step % params['model_save_interval'] == 0:
            model.save(network_dir + '/' + str(step) + '.h5')
def reptile_outer_loop(model_name, gpu_id, dbg=False, k=5, T=20):
    network_dir = f'/home/jl5/data/data-posewarp/models/{model_name}'
    os.makedirs(network_dir)
    params = param.get_general_params()

    img_width = params['IMG_WIDTH']
    img_height = params['IMG_HEIGHT']
    # load the original pretrained weights
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))

    vgg_model = truncated_vgg.vgg_norm()
    networks.make_trainable(vgg_model, False)
    response_weights = sio.loadmat(
        '../data/vgg_activation_distribution_train.mat')
    model = networks.network_posewarp(params)
    weight_path = '../models/vgg_100000.h5'
    model.load_weights(weight_path)
    model.compile(optimizer=Adam(lr=1e-4),
                  loss=[networks.vgg_loss(vgg_model, response_weights, 12)])

    for i in range(params['meta_iterations'] + 1):
        print(i)
        # select k images
        data = data_generation.create_feed(params, k, ModelMode.metatrain)

        old_weights = deepcopy(extract_weights(model))
        # train on batch for T iterations starting from init/old weights
        model = train_T_iter(model, T, data)
        new_weights = extract_weights(model)
        updated_weights = compute_reptile(new_weights, old_weights,
                                          params['epsilon'])
        model = set_weights(model, updated_weights)

        # test every like 300 iterations?
        if i % params['metamodel_save_interval'] == 0:
            model.save(network_dir + '/' + str(i) + '.h5')

    return model
Exemple #15
0
def test(gpu_id):
    params = param.get_general_params()

    test_feed = data_generation.create_feed(params, params['data_dir'], 'test')

    pw = posewarp_wrapper.wrapper(gpu_id=gpu_id)

    n_iters = 10000

    for step in range(n_iters):

        x, y = next(test_feed)

        disp_data(x[0])
        plt.title('source')
        disp_data(y)
        plt.title('target gt')

        gen = pw.gen(x)

        disp_data(gen)
        plt.title('target gan')

        plt.show()
def train(model_name, gpu_id):

    with tf.Session() as sess:

        params = param.get_general_params()

        network_dir = params['model_save_dir'] + '/' + model_name

        # Creates models directory if not exist.
        if not os.path.isdir(network_dir):
            os.mkdir(network_dir)

        train_feed = data_generation.create_feed(params, params['data_dir'],
                                                 'train')
        test_feed = data_generation.create_feed(params, params['data_dir'],
                                                'test')

        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        set_session(tf.Session(config=config))

        vgg_model = truncated_vgg.vgg_norm()
        networks.make_trainable(vgg_model, False)
        response_weights = sio.loadmat(
            '../data/vgg_activation_distribution_train.mat')
        model = networks.network_posewarp(params)
        model.compile(
            optimizer=Adam(lr=1e-4),
            loss=[networks.vgg_loss(vgg_model, response_weights, 12)])

        n_iters = params['n_training_iter']

        summary_writer = tf.summary.FileWriter(
            "D:\Proyectos\JEJU2018\Code\posewarp-cvpr2018\code\logs",
            graph=sess.graph)

        tr_x, tr_y = next(train_feed)
        te_x, te_y = next(test_feed)

        # Prepare output directories if they don't exist.
        output_dir = '../output/' + model_name + '/'

        if not os.path.isdir(output_dir):
            os.mkdir(output_dir)

        scipy.misc.imsave('../output/tr_orig_image.png', tr_x[0][0, :, :, :])
        scipy.misc.imsave('../output/tr_targ_image.png', tr_y[0, :, :, :])
        scipy.misc.imsave('../output/te_orig_image.png', te_x[0][0, :, :, :])
        scipy.misc.imsave('../output/te_targ_image.png', te_y[0, :, :, :])

        for step in range(0, n_iters):
            x, y = next(train_feed)

            train_loss = model.train_on_batch(x, y)

            util.printProgress(step, 0, train_loss)

            # out = sess.run(conv, feed_dict={"input_1:0" : x[0]})
            # plt.matshow(out[0, :, :, 0])
            # plt.show()

            gen = tf.get_default_graph().get_tensor_by_name(
                "loss/add_2_loss/lambda_5/add:0")
            inp = tf.get_default_graph().get_tensor_by_name("in_img0:0")
            out = tf.get_default_graph().get_tensor_by_name("in_img1:0")
            p_s = tf.get_default_graph().get_tensor_by_name(
                "mask_src/truediv:0")
            # p_t = tf.get_default_graph().get_tensor_by_name("in_pose1:0")

            image_summary_1 = tf.summary.image(
                'images', [inp[0, :, :, :], out[0, :, :, :], gen[0, :, :, :]],
                max_outputs=100)
            # image_summary_2 = tf.summary.image('pose', [tf.reduce_sum(p_s[0, :, :, :], 2, keepdims=True)], max_outputs=100)

            image_summary_1 = sess.run(image_summary_1,
                                       feed_dict={
                                           "in_img0:0": x[0],
                                           "in_pose0:0": x[1],
                                           "in_pose1:0": x[2],
                                           "mask_prior:0": x[3],
                                           "trans_in:0": x[4],
                                           "in_img1:0": y
                                       })

            # image_summary_2 = sess.run(image_summary_2, feed_dict={"in_img0:0" : x[0], "in_pose0:0" : x[1], "in_pose1:0" : x[2],
            #                                                     "mask_prior:0" : x[3], "trans_in:0" : x[4], "in_img1:0" : y})

            summary_writer.add_summary(image_summary_1)
            # summary_writer.add_summary(image_summary_2)

            train_image = sess.run(gen,
                                   feed_dict={
                                       "in_img0:0": tr_x[0],
                                       "in_pose0:0": tr_x[1],
                                       "in_pose1:0": tr_x[2],
                                       "mask_prior:0": tr_x[3],
                                       "trans_in:0": tr_x[4],
                                       "in_img1:0": tr_y
                                   })

            test_image = sess.run(gen,
                                  feed_dict={
                                      "in_img0:0": te_x[0],
                                      "in_pose0:0": te_x[1],
                                      "in_pose1:0": te_x[2],
                                      "mask_prior:0": te_x[3],
                                      "trans_in:0": te_x[4],
                                      "in_img1:0": te_y
                                  })
            if step > 0 and step % params['model_save_interval'] == 0:
                model.save_weights(network_dir + '/' + str(step) + '.h5')
        ret = []
        for root, dirs, files in os.walk(path):
            for file in files:
                if 'DS_Store' not in file and file[0] != '.':
                    ret += [os.path.join(root, file)]
        return ret


if __name__ == '__main__':
    from net import MModel
    from torch.utils.data import DataLoader
    from torchvision import transforms
    from torch.utils.tensorboard import SummaryWriter
    import time

    params = get_general_params()
    params['IMG_HEIGHT'] = 256
    params['IMG_WIDTH'] = 256
    params['posemap_downsample'] = 2
    net = MModel(params, use_cuda=True)
    net.load_state_dict(torch.load(
        '/versa/kangliwei/motion_transfer/0424-gan/g_epoch_2000.pth'),
                        strict=True)
    net = net.cuda()
    net.eval()
    # ds = mtdataset(params, 'Standing Yoga Poses for Hips - Day 10 - The 30 Days of Yoga Challenge', 1, 'CHARLEY HULL 4K UHD SLOW MOTION FACE ON DRIVER GOLF SWING_1')
    ds = mtdataset(
        params,
        'Standing Yoga Poses for Hips - Day 10 - The 30 Days of Yoga Challenge',
        1, 'Tennis Tip_ Proper Weight Transfer On Topspin Groundstrokes')
    dl = DataLoader(ds, 1, False)
Exemple #18
0
def train(model_name, gpu_id):
    params = param.get_general_params()
    network_dir = params['model_save_dir'] + '/' + model_name

    if not os.path.isdir(network_dir):
        os.mkdir(network_dir)

    train_feed = data_generation.create_feed(params, params['data_dir'], 'train')

    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True

    gan_lr = 1e-4
    disc_lr = 1e-4
    disc_loss = 0.1

    generator = networks.network_posewarp(params)
    generator.load_weights('../models/vgg_100000.h5')

    discriminator = networks.discriminator(params)
    discriminator.compile(loss='binary_crossentropy', optimizer=Adam(lr=disc_lr))

    vgg_model = truncated_vgg.vgg_norm()
    networks.make_trainable(vgg_model, False)
    response_weights = sio.loadmat('../data/vgg_activation_distribution_train.mat')

    gan = networks.gan(generator, discriminator, params)
    gan.compile(optimizer=Adam(lr=gan_lr),
                loss=[networks.vgg_loss(vgg_model, response_weights, 12), 'binary_crossentropy'],
                loss_weights=[1.0, disc_loss])

    n_iters = 10000
    batch_size = params['batch_size']

    for step in range(n_iters):

        x, y = next(train_feed)

        gen = generator.predict(x)

        # Train discriminator
        x_tgt_img_disc = np.concatenate((y, gen))
        x_src_pose_disc = np.concatenate((x[1], x[1]))
        x_tgt_pose_disc = np.concatenate((x[2], x[2]))

        L = np.zeros([2 * batch_size])
        L[0:batch_size] = 1

        inputs = [x_tgt_img_disc, x_src_pose_disc, x_tgt_pose_disc]
        d_loss = discriminator.train_on_batch(inputs, L)

        # Train the discriminator a couple of iterations before starting the gan
        if step < 5:
            util.printProgress(step, 0, [0, d_loss])
            step += 1
            continue

        # TRAIN GAN
        L = np.ones([batch_size])
        x, y = next(train_feed)
        g_loss = gan.train_on_batch(x, [y, L])
        util.printProgress(step, 0, [g_loss[1], d_loss])

        if step % params['model_save_interval'] == 0 and step > 0:
            gan.save(network_dir + '/' + str(step) + '.h5')
Exemple #19
0
def seq_to_inf(seq_path, save_path=None):

    if not save_path:
        save_path = seq_path + "/../info/"

    fname = seq_path.split("/")[-3:-2][0]

    if not os.path.exists(save_path):
        os.makedirs(save_path)

    frames = np.array([
        cv2.imread(seq_path + "/" + file)[:, :, ::-1]
        for file in sorted(os.listdir(seq_path))
    ])

    estimator = tf_pose.get_estimator(model="mobilenet_thin")

    J = param.get_general_params()['n_joints']

    skl = np.zeros([J, 2, len(frames)]) - 1
    bbx = np.zeros([len(frames), 4]) - 1

    for f, fr in enumerate(frames):

        try:  # Captured bug: TO-DO: Solve it!
            points = estimator.inference(fr,
                                         resize_to_default=(432 > 0
                                                            and 368 > 0),
                                         upsample_size=4.0)[0].body_parts

            min_x, min_y = (+np.inf, +np.inf)
            max_x, max_y = (-np.inf, -np.inf)

            imw, imh = fr[:, :, 0].shape

            for key in points:
                if key < J:
                    # Get the coordinates of the bodypart.
                    x, y = ((points[key].x) * imh, (points[key].y) * imw)

                    min_x = np.minimum(min_x, x)
                    min_y = np.minimum(min_y, y)
                    max_x = np.maximum(max_x, x)
                    max_y = np.maximum(max_y, y)

                    skl[key, 0, f] = x
                    skl[key, 1, f] = y

                    # plt.plot(skl[:, 0], skl[:, 1], "o", c="red", markersize=2)

            # # Plot bound box based on skeleton joints.
            # plt.plot([min_x, max_x, max_x, min_x, min_x],
            #          [min_y, min_y, max_y, max_y, min_y], "-", c="yellow")

            bbx[f, :] = [min_x, min_y, max_x - min_x, max_y - min_y]

        except:
            continue

    info = {"data": {"X": skl[:J], "bbox": bbx}}

    sio.savemat(save_path + fname + ".mat", info)

    return info
def train(model_name, gpu_id):

    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))

    with tf.Session() as sess:

        params = param.get_general_params()

        network_dir = params['model_save_dir'] + '/' + model_name

        # Creates models directory if not exist.
        if not os.path.isdir(network_dir):
            os.mkdir(network_dir)

        train_feed = data_generation.create_feed(params, params['data_dir'],
                                                 'train')
        # test_feed  = data_generation.create_feed(params, params['data_dir'], 'test')

        tf_config = tf.ConfigProto()
        tf_config.gpu_options.allow_growth = True
        tf_config.allow_soft_placement = True

        with tf.Session(config=tf_config) as sess:

            # Load VGG truncated model.
            vgg_model = truncated_vgg.vgg_norm()
            networks.make_trainable(vgg_model, False)

            # Load VGG pretrained weights.
            response_weights = sio.loadmat(
                '../Models/vgg_activation_distribution_train.mat')

            # Create graph and compile keras model.
            model = networks.network_posewarp(params)
            tloss = networks.vgg_loss(vgg_model, response_weights, 12)
            model.compile(optimizer=Adam(lr=1e-4), loss=[tloss])

            # Get number of trainig steps.
            n_iters = params['n_training_iter']

            # Create a tensorboard writer.
            summary_writer = tf.summary.FileWriter("./logs/run2/",
                                                   graph=sess.graph)

            tr_x, tr_y = next(train_feed)
            # te_x, te_y = next(test_feed)

            # Prepare output directories if they don't exist.
            output_dir = '../Output/' + model_name + '/'

            if not os.path.isdir(output_dir):
                os.mkdir(output_dir)

            scipy.misc.imsave('../Output/' + model_name + '/tr_orig_image.png',
                              tr_x[0][0, :, :, :])
            scipy.misc.imsave('../Output/' + model_name + '/tr_targ_image.png',
                              tr_y[0, :, :, :])

            # scipy.misc.imsave('../Output/' + model_name + '/te_orig_image.png', te_x[0][0, :, :, :])
            # scipy.misc.imsave('../Output/' + model_name + '/te_targ_image.png', te_y[0, :, :, :])

            # Tensorboard logged tensors.
            gen = tf.get_default_graph().get_tensor_by_name(
                "loss/add_2_loss/lambda_5/add:0")[0, :, :, :]
            inp = tf.get_default_graph().get_tensor_by_name("in_img0:0")[
                0, :, :, :]
            msk = tf.get_default_graph().get_tensor_by_name("mask_prior:0")[
                0, :, :, 0:1]
            msk = tf.tile(msk, [1, 1, 3])
            out = tf.get_default_graph().get_tensor_by_name("in_img1:0")[
                0, :, :, :]

            for step in range(0, n_iters):

                # Train with next batch.
                x, y = next(train_feed)

                # plt.imshow(x[0][0, :, :, 0] * 3 - cv2.resize(x[3][0, :, :, 0], (256, 256)))
                # plt.show()
                # plt.imshow(x[0][1, :, :, 0] * 3 - cv2.resize(x[3][1, :, :, 0], (256, 256)))
                # plt.show()
                train_loss = model.train_on_batch(x, y)

                # Print training loss progress.
                util.printProgress(step, 0, train_loss)

                # Add training loss to tensorboard.
                summary = tf.Summary()
                summary.value.add(tag='loss', simple_value=train_loss)
                summary_writer.add_summary(summary, step)

                # plt.imshow(np.round(((((x[3][1]) / 2.0) + 0.5) * 255.0)).astype(np.uint8))
                # plt.show()
                #
                # plt.imshow(x[3][1, :, :, 0])
                # plt.show()

                if step % params['test_interval'] == 0:

                    # Set up tensorboard image summary.
                    image_summary_1 = tf.summary.image('images',
                                                       [inp, msk, out, gen],
                                                       max_outputs=100)

                    # Compute summary.
                    image_summary_1_run = sess.run(image_summary_1,
                                                   feed_dict={
                                                       "in_img0:0": x[0],
                                                       "in_pose0:0": x[1],
                                                       "in_pose1:0": x[2],
                                                       "mask_prior:0": x[3],
                                                       "trans_in:0": x[4],
                                                       "in_img1:0": y
                                                   })
                    # Register summary in tensorboard.
                    summary_writer.add_summary(image_summary_1_run)

                    # Compute training sample images.
                    train_image = sess.run(gen,
                                           feed_dict={
                                               "in_img0:0": tr_x[0],
                                               "in_pose0:0": tr_x[1],
                                               "in_pose1:0": tr_x[2],
                                               "mask_prior:0": tr_x[3],
                                               "trans_in:0": tr_x[4],
                                               "in_img1:0": tr_y
                                           })

                    # Save in disk computed sample images.
                    scipy.misc.imsave(output_dir + 'tr' + str(step) + ".png",
                                      train_image)

                # Save model checkpoints.
                if step > 0 and step % params['model_save_interval'] == 0:
                    model.save_weights(network_dir + '/' + str(step) + '.h5')
Exemple #21
0
            pretrained_states = torch.load(pretrained_network_path)
            utils_train.transfer_partial_weights(pretrained_states, network_single.to_pose, submodule=0) # last argument is to remove "network.single" prefix in saved network
            print("Done loading weights from config_dict['pretrained_posenet_network_path']")
        return network_single


def predict():
    #nonlocal output_dict
    #model.eval()
    with torch.no_grad():
        input_dict_cuda, label_dict_cuda = utils_data.nestedDictToDevice((input_dict, label_dict), device=device)
        output_dict_cuda = model(input_dict_cuda)
        output_dict = utils_data.nestedDictToDevice(output_dict_cuda, device='cpu')
    
if __name__ == "__main__":
    params = param.get_general_params()
    config_dict_module = utils_io.loadModule("configs/config_test_encodeDecode.py")
    config_dict = config_dict_module.config_dict
    config_dict['batch_size_test'] = 2
    config_dict['n_hidden_to3Dpose'] = config_dict.get('n_hidden_to3Dpose', 2)

    # load data
    device='cuda'

    data_loader = pickle.load(open('examples/test_set.pickl',"rb"))
    ex = data_loader[0][0]
    

    # load model
    model = load_network(config_dict)
    model = model.to(device)
Exemple #22
0
def test(model_name, gpu_id):
    params = param.get_general_params()
    network_dir = params['model_save_dir'] + '/' + model_name

    # if not os.path.isdir(network_dir):
    #     os.mkdir(network_dir)

    train_feed = data_generation.create_feed(params,
                                             params['data_dir'],
                                             'test',
                                             do_augment=False)

    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True

    gan_lr = 1e-4
    disc_lr = 1e-4
    disc_loss = 0.1

    generator = networks.network_posewarp(params)
    # generator.load_weights('../models/vgg_100000.h5')
    generator.load_weights(
        '/versa/kangliwei/motion_transfer/posewarp-cvpr2018/models/0301_fullfinetune/9000.h5'
    )

    mask_delta_model = Model(input=generator.input,
                             output=generator.get_layer('mask_delta').output)
    src_mask_model = Model(input=generator.input,
                           output=generator.get_layer('mask_src').output)

    discriminator = networks.discriminator(params)
    discriminator.compile(loss='binary_crossentropy',
                          optimizer=Adam(lr=disc_lr))

    vgg_model = truncated_vgg.vgg_norm()
    networks.make_trainable(vgg_model, False)
    response_weights = sio.loadmat(
        '../data/vgg_activation_distribution_train.mat')

    gan = networks.gan(generator, discriminator, params)
    gan.compile(optimizer=Adam(lr=gan_lr),
                loss=[
                    networks.vgg_loss(vgg_model, response_weights, 12),
                    'binary_crossentropy'
                ],
                loss_weights=[1.0, disc_loss])

    n_iters = 10000
    batch_size = params['batch_size']

    for step in range(n_iters):

        x, y = next(train_feed)

        gen = generator.predict(x)

        src_mask_delta = mask_delta_model.predict(x)
        print('delta_max', src_mask_delta.max())
        src_mask_delta = src_mask_delta * 255
        src_mask = src_mask_model.predict(x)
        print('mask_max', src_mask.max())
        src_mask = src_mask * 255
        # print('src_mask_delta', type(src_mask_delta), src_mask_delta.shape)

        y = (y / 2 + 0.5) * 255.0
        gen = (gen / 2 + 0.5) * 255.0
        for i in range(gen.shape[0]):  # iterate in batch
            cv2.imwrite('pics/src' + str(i) + '.jpg', x[0][i] * 255)
            cv2.imwrite('pics/gen' + str(i) + '.jpg', gen[i])
            cv2.imwrite('pics/y' + str(i) + '.jpg', y[i])
            for j in range(11):
                cv2.imwrite('pics/seg_delta_' + str(i) + '_' + str(j) + '.jpg',
                            src_mask_delta[i][:, :, j])
            for j in range(11):
                cv2.imwrite('pics/seg_' + str(i) + '_' + str(j) + '.jpg',
                            src_mask[i][:, :, j])
        break

        # Train discriminator
        x_tgt_img_disc = np.concatenate((y, gen))
        x_src_pose_disc = np.concatenate((x[1], x[1]))
        x_tgt_pose_disc = np.concatenate((x[2], x[2]))

        L = np.zeros([2 * batch_size])
        L[0:batch_size] = 1

        inputs = [x_tgt_img_disc, x_src_pose_disc, x_tgt_pose_disc]
        d_loss = discriminator.train_on_batch(inputs, L)

        # Train the discriminator a couple of iterations before starting the gan
        if step < 5:
            util.printProgress(step, 0, [0, d_loss])
            step += 1
            continue

        # TRAIN GAN
        L = np.ones([batch_size])
        x, y = next(train_feed)
        g_loss = gan.train_on_batch(x, [y, L])
        util.printProgress(step, 0, [g_loss[1], d_loss])

        if step % params['model_save_interval'] == 0 and step > 0:
            generator.save(network_dir + '/' + str(step) + '.h5')
        bbx[f, :] = [min_x, min_y, max_x - min_x, max_y - min_y]

    info = {"data": {"X": skl[:J], "bbox": bbx}}

    if save_path:
        sio.savemat(save_path + ".mat", info)

    return info


# Split vid_path in directory and name.
vid_dir = '/'.join(vid_path.split('/')[:-3]) + "/"
vid_name = '_'.join(vid_path.split('/')[-3:-1])

data_dir = param.get_general_params()['data_dir']

# frms = vid_to_seq(vid_path=vid_path, save_path=data_dir + "/train/frames/" + vid_name + "/")
# info = seq_to_inf(frms, save_path=data_dir + "/train/info/" + vid_name)
img = [
    cv2.imread(
        "D:/Proyectos/JEJU2018/Code/posewarp-cvpr2018/data/test/frames/Carlos/1.png"
    )[:, :, ::-1],
    cv2.imread(
        "D:/Proyectos/JEJU2018/Code/posewarp-cvpr2018/data/test/frames/Carlos/2.png"
    )[:, :, ::-1]
]

info = seq_to_inf(img, save_path=data_dir + "/test/info/Carlos")

info_name = 'D:/Proyectos/JEJU2018/Code/posewarp-cvpr2018/data/train/info/Golf-Swing-Front_003.mat'
Exemple #24
0
def test(model_name, gpu_id):
    params = param.get_general_params()
    network_dir = params['model_save_dir'] + '/' + model_name
    work_product_dir = params['project_dir'] + '/' + 'work_product'

    test_feed = data_generation.create_feed(params, params['data_dir'], 'test')

    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))

    vgg_model = truncated_vgg.vgg_norm()
    networks.make_trainable(vgg_model, False)
    response_weights = sio.loadmat(
        'posewarp-cvpr2018/data/vgg_activation_distribution_train.mat')
    model = networks.network_posewarp(params)
    model.summary()
    model.compile(optimizer=Adam(lr=1e-4),
                  loss=[networks.vgg_loss(vgg_model, response_weights, 12)])
    model.load_weights(network_dir + '/' + 'weights_model_gan_improved.h5')

    n_iters = params['n_training_iter']
    x, y = next(test_feed)

    #     src_pose = x[1][0][:,:,0]
    #     trgt_pose = x[2][0][:,:,0]
    src_limb_masks = np.amax(np.asarray(x[3][0]), axis=2)
    src_limb_mask_1 = np.asarray(x[3][0][:, :, 0])
    src_limb_mask_2 = np.asarray(x[3][0][:, :, 1])
    src_limb_mask_3 = np.asarray(x[3][0][:, :, 2])
    src_limb_mask_4 = np.asarray(x[3][0][:, :, 3])
    src_limb_mask_5 = np.asarray(x[3][0][:, :, 4])
    src_pose = np.amax(np.asarray(x[1][0]), axis=2)
    trgt_pose = np.amax(np.asarray(x[2][0]), axis=2)
    #     for i in range(1,7):
    #         src_pose = src_pose+ x[1][0][:,:,i]
    #         trgt_pose = trgt_pose+ x[2][0][:,:,i]

    scipy.misc.imsave(work_product_dir + '/' + 'source_pose.jpg', src_pose)
    scipy.misc.imsave(work_product_dir + '/' + 'target_pose.jpg', trgt_pose)
    scipy.misc.imsave(work_product_dir + '/' + 'source_limb_mask.jpg',
                      src_limb_masks)
    #     scipy.misc.imsave(network_dir+'/'+'source_limb_mask_1.jpg',src_limb_mask_1)
    #     scipy.misc.imsave(network_dir+'/'+'source_limb_mask_2.jpg',src_limb_mask_2)
    #     scipy.misc.imsave(network_dir+'/'+'source_limb_mask_3.jpg',src_limb_mask_3)
    #     scipy.misc.imsave(network_dir+'/'+'source_limb_mask_4.jpg',src_limb_mask_4)
    #     scipy.misc.imsave(network_dir+'/'+'source_limb_mask_5.jpg',src_limb_mask_5)
    #     scipy.misc.imsave(network_dir+'/'+'source_pose_new.jpg',src_pose_n)

    target_img = np.asarray(y[0])
    src_img = np.asarray(x[0][0])

    yimg = model.predict(x, 1)
    gen_img = np.asarray(yimg[0])

    constarr = 255 * 0.5 * np.ones((256, 256, 3))

    #     scipy.misc.imsave(network_dir+'/'+'source_image.jpg',src_img)
    #     scipy.misc.imsave(network_dir+'/'+'target_image.jpg',target_img)
    #     scipy.misc.imsave(network_dir+'/'+'generated_target_image.jpg',gen_img)

    cv2.imwrite(work_product_dir + '/' + 'target.jpg',
                constarr + np.multiply(target_img, 0.5 * 255))  #target_img)
    cv2.imwrite(work_product_dir + '/' + 'gen_target.jpg',
                constarr + np.multiply(gen_img, 0.5 * 255))
    cv2.imwrite(work_product_dir + '/' + 'source.jpg',
                constarr + np.multiply(src_img, 0.5 * 255))  #src_img)
def train(model_name, gpu_id):

    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    tf_config.allow_soft_placement = True

    with tf.Session(config=tf_config) as sess:

        params = param.get_general_params()
        network_dir = params['model_save_dir'] + '/' + model_name

        if not os.path.isdir(network_dir):
            os.mkdir(network_dir)

        train_feed = data_generation.create_feed(params, params['data_dir'], "train")
        # test_feed = data_generation.create_feed(params,  params['data_dir'], "test")

        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

        gan_lr  = 1e-3
        disc_lr = 1e-3
        disc_loss = 0.1

        generator = networks.network_posewarp(params)
        # generator.load_weights('../models/posewarp_vgg/100000.h5')

        discriminator = networks.discriminator(params)
        discriminator.compile(loss='binary_crossentropy', optimizer=Adam(lr=disc_lr))

        vgg_model = truncated_vgg.vgg_norm()
        networks.make_trainable(vgg_model, False)
        response_weights = sio.loadmat('../Models/vgg_activation_distribution_train.mat')

        gan = networks.gan(generator, discriminator, params)

        gan.compile(optimizer=Adam(lr=gan_lr),
                    loss=[networks.vgg_loss(vgg_model, response_weights, 12), 'binary_crossentropy'],
                    loss_weights=[1.0, disc_loss])

        n_iters = params['n_training_iter']
        batch_size = params['batch_size']

        summary_writer = tf.summary.FileWriter("./logs", graph=sess.graph)

        tr_x, tr_y = next(train_feed)
        # te_x, te_y = next(test_feed)

        # Prepare output directories if they don't exist.
        output_dir = '../Output/' + model_name + '/'

        if not os.path.isdir(output_dir):
            os.mkdir(output_dir)

        scipy.misc.imsave(output_dir + 'tr_orig_image.png', tr_x[0][0, :, :, :])
        scipy.misc.imsave(output_dir + 'tr_targ_image.png', tr_y[0, :, :, :])
        # scipy.misc.imsave(output_dir + 'te_orig_image.png', te_x[0][0, :, :, :])
        # scipy.misc.imsave(output_dir + 'te_targ_image.png', te_y[0, :, :, :])

        print("Batch size: " + str(batch_size))

        for step in range(n_iters):

            x, y = next(train_feed)

            gen = generator.predict(x)

            # Train discriminator
            x_tgt_img_disc  = np.concatenate((y, gen))
            x_src_pose_disc = np.concatenate((x[1], x[1]))
            x_tgt_pose_disc = np.concatenate((x[2], x[2]))

            L = np.zeros([2 * batch_size])
            L[0:batch_size] = 1

            inputs = [x_tgt_img_disc, x_src_pose_disc, x_tgt_pose_disc]
            d_loss = discriminator.train_on_batch(inputs, L)

            # Train the discriminator a couple of iterations before starting the gan
            if step < 5:
                util.printProgress(step, 0, [0, d_loss])
                step += 1
                continue

            # TRAIN GAN
            L = np.ones([batch_size])
            x, y = next(train_feed)
            g_loss = gan.train_on_batch(x, [y, L])
            util.printProgress(step, 0, [g_loss[1], d_loss])

            if step % params['test_interval'] == 0:

                print(gen[0])

                gen = tf.get_default_graph().get_tensor_by_name("model_1/add_2_1/add:0")
                inp = tf.get_default_graph().get_tensor_by_name("in_img0:0")
                out = tf.get_default_graph().get_tensor_by_name("in_img1:0")
                p_s = tf.get_default_graph().get_tensor_by_name("mask_src/truediv:0")
                # p_t = tf.get_default_graph().get_tensor_by_name("in_pose1:0")


                image_summary_1 = tf.summary.image('images', [inp[0, :, :, :], out[0, :, :, :], gen[0, :, :, :]], max_outputs=100)
                # image_summary_2 = tf.summary.image('pose', [tf.reduce_sum(p_s[0, :, :, :], 2, keepdims=True)], max_outputs=100)

                image_summary_1 = sess.run(image_summary_1,feed_dict={"in_img0:0": x[0], "in_pose0:0": x[1], "in_pose1:0": x[2],
                                                                      "mask_prior:0": x[3], "trans_in:0": x[4], "in_img1:0": y,
                                                                      "input_3:0": x[0], "input_4:0": x[1], "input_5:0": x[2],
                                                                      "input_6:0": x[3], "input_7:0": x[4]})
                #
                # img_gen =  sess.run(image_summary_1,feed_dict={"in_img0:0": x[0], "in_pose0:0": x[1], "in_pose1:0": x[2],
                #                                                "mask_prior:0": x[3], "trans_in:0": x[4], "in_img1:0": y,
                #                                                "input_3:0": x[0], "input_4:0": x[1], "input_5:0": x[2],
                #                                                "input_6:0": x[3], "input_7:0": x[4]})


                # image_summary_2 = sess.run(image_summary_2, feed_dict={"in_img0:0" : x[0], "in_pose0:0" : x[1], "in_pose1:0" : x[2],
                #                                                     "mask_prior:0" : x[3], "trans_in:0" : x[4], "in_img1:0"  : y})

                summary_writer.add_summary(image_summary_1)
                # summary_writer.add_summary(image_summary_2)

                train_image = sess.run(gen, feed_dict={"in_img0:0": tr_x[0], "in_pose0:0": tr_x[1], "in_pose1:0": tr_x[2],
                                                       "mask_prior:0": tr_x[3], "trans_in:0": tr_x[4], "in_img1:0": tr_y,
                                                       "input_3:0": tr_x[0], "input_4:0": tr_x[1], "input_5:0": tr_x[2],
                                                       "input_6:0": tr_x[3], "input_7:0": tr_x[4]})
                #
                # test_image = sess.run(gen, feed_dict={"in_img0:0": te_x[0], "in_pose0:0": te_x[1], "in_pose1:0": te_x[2],
                #                                       "mask_prior:0": te_x[3], "trans_in:0": te_x[4], "in_img1:0": te_y,
                #                                       "input_3:0": te_x[0], "input_4:0": te_x[1], "input_5:0": te_x[2],
                #                                       "input_6:0": te_x[3], "input_7:0": te_x[4]})


                scipy.misc.imsave(output_dir + 'tr' + str(step) + ".png", train_image[0, :, :, :])
                # scipy.misc.imsave(output_dir + 'te' + str(step) + ".png", test_image[0, :, :, :])

            if step % params['model_save_interval'] == 0 and step > 0:
                gan.save(network_dir + '/' + str(step) + '.h5')
def finetune(model_name,
             exp_name,
             save_dir,
             gpu_id,
             vid_i,
             T,
             iter_num,
             rdm=False):
    params = param.get_general_params()
    img_width = params['IMG_WIDTH']
    img_height = params['IMG_HEIGHT']
    # params['batch_size'] = 1

    network_dir = params['model_save_dir'] + '/' + exp_name

    if not os.path.isdir(network_dir):
        os.mkdir(network_dir)

    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))

    vgg_model = truncated_vgg.vgg_norm()
    networks.make_trainable(vgg_model, False)
    response_weights = sio.loadmat(
        '../data/vgg_activation_distribution_train.mat')
    model = networks.network_posewarp(params)
    if not rdm:
        weight_path = str(
            os.path.join(
                params['model_save_dir'],
                os.path.join(
                    f"{model_name}",
                    f'{iter_num}.h5')))  # model name doesn't super work
        model.load_weights(weight_path)
    model.compile(optimizer=Adam(lr=1e-4),
                  loss=[networks.vgg_loss(vgg_model, response_weights, 12)])

    # train for T iterations
    train_feed = data_generation.create_feed(
        params,
        None,
        ModelMode.finetune,
        vid_i,
        txtfile=f'../testset_split_85_v3/train_{vid_i}_img.txt',
        do_augment=True)
    startTime = datetime.now()
    for step in range(T):
        x, y = next(train_feed)
        train_loss = model.train_on_batch(x, y)
        util.printProgress(step, 0, train_loss)

        if step % 1000 == 0:
            print_viz(train_feed, model)
            print(datetime.now() - startTime)
            model.save(network_dir + '/' + str(step) + '.h5')
    model.save(network_dir + '/' + str(step) + '.h5')

    # test on all items
    test_feed, dir_len = data_generation.create_test_feed(
        params,
        None,
        vid_i=vid_i,
        txtfile=f'../testset_split_85_v3/test_{vid_i}_img.txt',
        k_txtfile=f'../testset_split_85_v3/train_{vid_i}_img.txt')
    scores = np.zeros((dir_len, 3))
    for j in range(dir_len):
        try:
            x, y, scale, pos, img_num = next(test_feed)
            arr_loss = model.predict_on_batch(x)
        except cv2.error as e:
            print("OpenCV Error, gonna ignore")
            continue
        i = 0
        generated = (arr_loss[i] + 1) * 128
        gen_resized = data_generation.reverse_center_and_scale_image(
            generated, img_width, img_height, pos, scale)
        target = (y[i] + 1) * 128
        target_resized = data_generation.reverse_center_and_scale_image(
            target, img_width, img_height, pos, scale)
        source = (x[0][i] + 1) * 128
        resized_source = cv2.resize(source, (0, 0), fx=2, fy=2)
        source_resized = data_generation.reverse_center_and_scale_image(
            source, img_width, img_height, pos, scale)
        modified_img = data_generation.add_source_to_image(
            gen_resized, resized_source)
        cv2.imwrite(save_dir + f'/{img_num:08d}.png', gen_resized)
        scores[j][0] = compare_ssim(gen_resized,
                                    target_resized,
                                    multichannel=True,
                                    data_range=256)
        scores[j][1] = compare_psnr(gen_resized,
                                    target_resized,
                                    data_range=256)
        scores[j][2] = compare_mse(gen_resized, target_resized)

    mean_scores = scores.mean(axis=0)
    std_scores = scores.std(axis=0)

    print(mean_scores)
    print(std_scores)
    save_dict = os.path.join(save_dir, f"saved_scores_{vid_i}.pkl")
    pickle.dump(scores, open(save_dict, "wb"))
def main(gpu_id):
    params = param.get_general_params()

    train_feed = data_generation.create_feed(params, params['data_dir'])

    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))

    vgg_model = truncated_vgg.vgg_norm()

    n_layers = len(vgg_model.outputs)
    n_batches = 2000

    # First, calculate mean activation of each channel
    mean_response = []
    num_elements = []
    for batch in range(n_batches):
        print(batch)
        x, y = next(train_feed)
        pred_batch = vgg_model.predict(util.vgg_preprocess(x[0]))

        for i in range(n_layers):
            sum_i = np.sum(pred_batch[i], axis=(0, 1, 2))
            n_elt = np.prod(pred_batch[i].shape[0:3])

            if batch == 0:
                mean_response.append(sum_i)
                num_elements.append(n_elt)
            else:
                mean_response[i] += sum_i
                num_elements[i] += n_elt

    for i in range(n_layers):
        mean_response[i] /= (1.0 * num_elements[i])

    # Now calculate std. dev. of each channel
    std_response = []
    for batch in range(n_batches):
        print(batch)
        x, y = next(train_feed)
        pred_batch = vgg_model.predict(util.vgg_preprocess(x[0]))

        for i in range(len(pred_batch)):
            mean_response_i = np.reshape(mean_response[i], (1, 1, 1, -1))
            mean_response_i = np.tile(mean_response_i,
                                      (pred_batch[i].shape[0:3]) + (1, ))

            d = np.sum((pred_batch[i] - mean_response_i)**2, axis=(0, 1, 2))
            if batch == 0:
                std_response.append(d)
            else:
                std_response[i] += d

    for i in range(n_layers):
        std_response[i] = np.sqrt(std_response[i] / (num_elements[i] - 1.0))

    responses = {}
    for i in range(n_layers):
        responses[str(i)] = (mean_response[i], std_response[i])

    sio.savemat('vgg_train_statistics.mat', responses)