def test():
    args = cmd.args()

    args.images_dir_test = args.images_dir_train
    args.pairs_file_test = 'data/market-re-id-pairs.csv'
    pairs_for_each = 2
    train_file_name = 'train.txt'
    store_train_images = True
    generated_as_separate = False

    df_keypoints = pd.read_csv(args.annotations_file_train, sep=':')
    df = filter_not_valid(df_keypoints)

    print('Compute pair for train re-id...')
    pairs_df_train = make_pairs(df, pairs_for_each)
    print('Number of pairs: %s' % len(pairs_df_train))
    pairs_df_train.to_csv('data/market-re-id-pairs.csv', index=False)

    dataset = PoseHMDataset(test_phase=True, **vars(args))
    generator = make_generator(args.image_size, args.use_input_pose,
                               args.warp_skip, args.disc_type, args.warp_agg,
                               args.use_bg, args.pose_rep_type)
    assert (args.generator_checkpoint is not None)
    generator.load_weights(args.generator_checkpoint)

    print("Generate images...")
    generate_images(dataset,
                    generator,
                    args.use_input_pose,
                    args.generated_images_dir,
                    store_train_images=store_train_images)

    print("Creating train file...")
    create_train_file(args.generated_images_dir, train_file_name,
                      generated_as_separate)
示例#2
0
def main():
    args = cmd.args()

    date_str = '{}'.format(
        datetime.datetime.today().strftime('%Y-%m-%d_%H-%M-%S'))
    sys.stdout = Logger(
        osp.join(args.output_dir, 'log_{}.txt'.format(date_str)))
    # save opts
    with open(osp.join(args.output_dir, 'args_{}.json'.format(date_str)),
              'w') as fp:
        json.dump(vars(args), fp, indent=1)

    generator = make_generator(args.image_size, args.use_input_pose,
                               args.warp_skip, args.disc_type, args.warp_agg,
                               args.debug)
    if args.generator_checkpoint is not None:
        generator.load_weights(args.generator_checkpoint)

    discriminator = make_discriminator(args.image_size, args.use_input_pose,
                                       args.warp_skip, args.disc_type,
                                       args.warp_agg)
    if args.discriminator_checkpoint is not None:
        discriminator.load_weights(args.discriminator_checkpoint)

    dataset = PoseHMDataset(test_phase=False, **vars(args))

    gan = CGAN(generator, discriminator, **vars(args))
    trainer = Trainer(dataset, gan, **vars(args))

    trainer.train()
示例#3
0
def test():
    args = cmd.args()
    print("Loading images...")
    generated_images = load_generated_images(args.generated_images_dir)

    print("Compute inception score...")
    inception_score = get_inception_score(generated_images)
    print("Inception score %s" % inception_score[0])

    return inception_score
示例#4
0
def test():
    args = cmd.args()

    args.images_dir_test = args.images_dir_train
    args.pairs_file_test = 'data/market-re-id-pairs.csv'

    dataset = PoseHMDataset(test_phase=True, **vars(args))
    generator = make_generator(args.image_size, args.use_input_pose,
                               args.warp_skip, args.disc_type, args.warp_agg)
    assert (args.generator_checkpoint is not None)
    generator.load_weights(args.generator_checkpoint)

    print("Generate images...")
    generate_images(dataset, generator, args.use_input_pose,
                    args.generated_images_dir)
示例#5
0
def main():
    args = cmd.args()

    generator = make_generator(args.image_size, args.use_input_pose, args.warp_skip, args.disc_type, args.warp_agg)
    if args.generator_checkpoint is not None:
        generator.load_weights(args.generator_checkpoint)
    
    discriminator = make_discriminator(args.image_size, args.use_input_pose, args.warp_skip, args.disc_type, args.warp_agg)
    if args.discriminator_checkpoint is not None:
        discriminator.load_weights(args.discriminator_checkpoint)
    
    dataset = PoseHMDataset(test_phase=False, **vars(args))
    
    gan = CGAN(generator, discriminator, **vars(args))
    trainer = Trainer(dataset, gan, **vars(args))
    
    trainer.train()
示例#6
0
文件: test.py 项目: zjuming/pose-gan
def test():
    args = cmd.args()
    if args.load_generated_images:
        print ("Loading images...")
        input_images, target_images, generated_images, names = load_generated_images(args.generated_images_dir)
    else:
        print ("Generate images...")
        from keras import backend as K
        if args.use_dropout_test:
            K.set_learning_phase(1)
        dataset = PoseHMDataset(test_phase=True, **vars(args))
        generator = make_generator(args.image_size, args.use_input_pose, args.warp_skip, args.disc_type, args.warp_agg,
                                   args.use_bg, args.pose_rep_type)
        assert (args.generator_checkpoint is not None)
        generator.load_weights(args.generator_checkpoint)
        input_images, target_images, generated_images, names = generate_images(dataset, generator, args.use_input_pose)
        print ("Save images to %s..." % (args.generated_images_dir, ))
        save_images(input_images, target_images, generated_images, names,
                        args.generated_images_dir)

    print ("Compute inception score...")
    inception_score = get_inception_score(generated_images)
    print ("Inception score %s" % inception_score[0])

    print ("Compute structured similarity score (SSIM)...")
    structured_score = ssim_score(generated_images, target_images)
    print ("SSIM score %s" % structured_score)

    print ("Compute l1 score...")
    norm_score = l1_score(generated_images, target_images)
    print ("L1 score %s" % norm_score)

    print ("Compute masked inception score...")
    generated_images_masked = create_masked_image(names, generated_images, args.annotations_file_test)
    reference_images_masked = create_masked_image(names, target_images, args.annotations_file_test)
    inception_score_masked = get_inception_score(generated_images_masked)

    print ("Inception score masked %s" % inception_score_masked[0])
    print ("Compute masked SSIM...")
    structured_score_masked = ssim_score(generated_images_masked, reference_images_masked)
    print ("SSIM score masked %s" % structured_score_masked)

    print ("Inception score = %s, masked = %s; SSIM score = %s, masked = %s; l1 score = %s" %
           (inception_score, inception_score_masked, structured_score, structured_score_masked, norm_score))
示例#7
0
def test():
    args = cmd.args()
    if args.load_generated_images:
        print("Loading images...")
        input_images, target_images, generated_images, names = load_generated_images(
            args.generated_images_dir)
    else:
        print("Generate images...")
        from keras import backend as K
        if args.use_dropout_test:
            K.set_learning_phase(1)
        dataset = PoseHMDataset(test_phase=True, **vars(args))
        generator = make_generator(args.image_size, args.use_input_pose,
                                   args.warp_skip, args.disc_type,
                                   args.warp_agg)
        assert (args.generator_checkpoint is not None)
        generator.load_weights(args.generator_checkpoint)
        input_images, target_images, generated_images, names = generate_images(
            dataset, generator, args.use_input_pose)
        print("Save images to %s..." % (args.generated_images_dir, ))
        save_images(input_images, target_images, generated_images, names,
                    args.generated_images_dir)

    if args.is_test_real_data:
        generated_images = target_images

    print('Compute FID score')
    fid = FID_score()
    fid_score = fid.calculate_fid_images(generated_images, target_images)
    print("FID score %s" % fid_score)

    print("Compute inception score...")
    inception_score = get_inception_score(generated_images)
    print("Inception score %s" % inception_score[0])

    print("Compute structured similarity score (SSIM)...")
    structured_score = ssim_score(generated_images, target_images)
    print("SSIM score %s" % structured_score)

    print("Compute l1 score...")
    norm_score = l1_score(generated_images, target_images)
    print("L1 score %s" % norm_score)
示例#8
0
文件: train.py 项目: freez1ng/PCGAN
def main():
    args = cmd.args()

    generator = make_generator(args.image_size, args.use_input_pose,
                               args.warp_agg, args.num_landmarks,
                               args.num_mask)
    generator.summary()
    if args.generator_checkpoint is not None:
        generator.load_weights(args.generator_checkpoint, by_name=True)

    discriminator = make_discriminator(args.image_size, args.use_input_pose,
                                       args.num_landmarks, args.num_mask)
    if args.discriminator_checkpoint is not None:
        discriminator.load_weights(args.discriminator_checkpoint)

    dataset = PoseHMDataset(test_phase=False, **vars(args))
    gan = CGAN(generator, discriminator, **vars(args))

    trainer = Trainer(dataset, gan, **vars(args))

    trainer.train()
示例#9
0
import pandas as pd
from cmd import args
import pose_transform
import pose_utils
from itertools import permutations

args = args()


def filter_not_valid(df_keypoints):
    def check_valid(x):
        kp_array = pose_utils.load_pose_cords_from_strings(
            x['keypoints_y'], x['keypoints_x'])
        distractor = x['name'].startswith('-1') or x['name'].startswith('0000')
        return pose_transform.check_valid(kp_array) and not distractor

    return df_keypoints[df_keypoints.apply(check_valid, axis=1)].copy()


def make_pairs(df):
    persons = df.apply(lambda x: '_'.join(x['name'].split('_')[0:1]), axis=1)
    df['person'] = persons
    fr, to = [], []
    for person in pd.unique(persons):
        pairs = zip(*list(permutations(df[df['person'] == person]['name'], 2)))
        if len(pairs) != 0:
            fr += list(pairs[0])
            to += list(pairs[1])
    pair_df = pd.DataFrame(index=range(len(fr)))
    pair_df['from'] = fr
    pair_df['to'] = to
示例#10
0
文件: test.py 项目: freez1ng/PCGAN
def test():
    args = cmd.args()
    if args.load_generated_images:
        print("Loading images...")
        input_images, target_images, generated_images, names = load_generated_images(
            args.generated_images_dir)
    else:
        print("Generate images...")
        from keras import backend as K
        if args.use_dropout_test:
            K.set_learning_phase(1)
        dataset = PoseHMDataset(test_phase=True, **vars(args))
        generator = make_generator(args.image_size, args.use_input_pose,
                                   args.warp_agg, args.num_landmarks,
                                   args.num_mask)
        assert (args.generator_checkpoint is not None)
        generator.load_weights(args.generator_checkpoint)
        input_images, pose_inp_array, out_pose, pose_out_array, inp_pose, target_images, generated_images, names = generate_images(
            dataset, generator, args.use_input_pose)
        print("Save images to %s..." % (args.generated_images_dir, ))
        save_images(input_images, pose_inp_array, out_pose, pose_out_array,
                    inp_pose, target_images, generated_images, names,
                    args.generated_images_dir)

    print("Compute inception score...")
    inception_score = get_inception_score(generated_images)
    print("Inception score %s" % inception_score[0])

    # print ("Compute Frechet distance...")
    # fid.create_inception_graph('/tmp/imagenet/classify_image_graph_def.pb')  # load the graph into the current TF graph
    # with tf.Session() as sess:
    #     sess.run(tf.global_variables_initializer())
    #     mu_gen, sigma_gen = fid.calculate_activation_statistics(generated_images, sess, batch_size=100)
    #     mu_real, sigma_real = fid.calculate_activation_statistics(target_images, sess, batch_size=100)
    #
    # fid_value = fid.calculate_frechet_distance(mu_gen, sigma_gen, mu_real, sigma_real)
    # print ("Frechet distance %s" %  fid_value)

    print("Compute structured similarity score (SSIM)...")
    structured_score = ssim_score(generated_images, target_images)
    print("SSIM score %s" % structured_score)

    print("Compute l1 score...")
    norm_score = l1_score(generated_images, target_images)
    print("L1 score %s" % norm_score)

    print("Compute masked inception score...")
    generated_images_masked = create_masked_image(names, generated_images,
                                                  args.annotations_file_test)
    reference_images_masked = create_masked_image(names, target_images,
                                                  args.annotations_file_test)
    inception_score_masked = get_inception_score(generated_images_masked)
    print("Inception score masked %s" % inception_score_masked[0])

    # print ("Compute masked Frechet distance...")
    # fid.create_inception_graph('/tmp/imagenet/classify_image_graph_def.pb')  # load the graph into the current TF graph
    # with tf.Session() as sess:
    #     sess.run(tf.global_variables_initializer())
    #     mu_gen, sigma_gen = fid.calculate_activation_statistics(generated_images_masked, sess, batch_size=100)
    #     mu_real, sigma_real = fid.calculate_activation_statistics(reference_images_masked, sess, batch_size=100)
    #
    # fid_value = fid.calculate_frechet_distance(mu_gen, sigma_gen, mu_real, sigma_real)
    # print("Frechet distance masked %s" % fid_value)

    print("Compute masked SSIM...")
    structured_score_masked = ssim_score(generated_images_masked,
                                         reference_images_masked)
    print("SSIM score masked %s" % structured_score_masked)

    print(
        "Inception score = %s, masked = %s; SSIM score = %s, masked = %s; l1 score = %s"
        % (inception_score, inception_score_masked, structured_score,
           structured_score_masked, norm_score))