Пример #1
0
def main(argv):
    parser = dan_run_loop.DANArgParser()
    parser.set_defaults(data_dir='./data_dir',
                        model_dir='/output',
                        data_format='channels_last',
                        train_epochs=20,
                        epochs_per_eval=10,
                        batch_size=64)

    flags = parser.parse_args(args=argv[1:])

    mean_shape = None
    imgs_mean = None
    imgs_std = None

    flags_trans = {
        'train': tf.estimator.ModeKeys.TRAIN,
        'eval': tf.estimator.ModeKeys.EVAL,
        'predict': tf.estimator.ModeKeys.PREDICT
    }

    flags.mode = flags_trans[flags.mode]

    if flags.mode == tf.estimator.ModeKeys.TRAIN:
        mean_shape, imgs_mean, imgs_std = read_dataset_info(flags.data_dir)

    def vgg16_model_fn(features, labels, mode, params):
        return dan_run_loop.dan_model_fn(features=features,
                                         groundtruth=labels,
                                         mode=mode,
                                         stage=params['dan_stage'],
                                         num_lmark=params['num_lmark'],
                                         model_class=VGG16Model,
                                         mean_shape=mean_shape,
                                         imgs_mean=imgs_mean,
                                         imgs_std=imgs_std,
                                         data_format=params['data_format'],
                                         multi_gpu=params['multi_gpu'])

    input_function = flags.use_synthetic_data and get_synth_input_fn(
    ) or vgg16_input_fn

    if flags.mode == tf.estimator.ModeKeys.PREDICT:

        faceset = '/media/morzh/ext4_volume/data/Faces/all_in_one/set_004_rect/'
        file_img = '2UiNSKC3sGw.jpg'
        file_rect = faceset + file_img + '.rect'
        # rect = np.loadtxt(file_rect)

        input_function = img_input_fn(faceset + file_img, (0, 0, 112, 112),
                                      112, 74)

    dan_run_loop.dan_main(flags, vgg16_model_fn, input_function)
Пример #2
0
def main(argv):
    parser = dan_run_loop.DANArgParser()
    parser.set_defaults(data_dir='./data_dir',
                        model_dir='./model_dirl2loss',
                        data_format='channels_last',
                        train_epochs=20,
                        epochs_per_eval=10,
                        batch_size=64)

    flags = parser.parse_args(args=argv[1:])

    mean_shape = None
    imgs_mean = None
    imgs_std = None

    flags_trans = {
        'train': tf.estimator.ModeKeys.TRAIN,
        'eval': tf.estimator.ModeKeys.EVAL,
        'predict': tf.estimator.ModeKeys.PREDICT
    }

    flags.mode = flags_trans[flags.mode]

    if flags.mode == tf.estimator.ModeKeys.TRAIN:
        mean_shape, imgs_mean, imgs_std = read_dataset_info(flags.data_dir)

    def vgg16_model_fn(features, labels, mode, params):
        return dan_run_loop.dan_model_fn(features=features,
                                         groundtruth=labels,
                                         mode=mode,
                                         stage=params['dan_stage'],
                                         num_lmark=params['num_lmark'],
                                         model_class=VGG16Model,
                                         mean_shape=mean_shape,
                                         imgs_mean=imgs_mean,
                                         imgs_std=imgs_std,
                                         data_format=params['data_format'],
                                         multi_gpu=params['multi_gpu'])

    input_function = flags.use_synthetic_data and get_synth_input_fn(
    ) or vgg16_input_fn  #use vgg16_input_fn
    print(flags.use_synthetic_data)

    if flags.mode == tf.estimator.ModeKeys.PREDICT:
        input_function = video_input_fn(flags.data_dir, 112, flags.num_lmark)
    dan_run_loop.dan_main(flags, vgg16_model_fn, input_function)