Exemple #1
0
 def setUp(self):
     self.config = get_config('config.yaml', 'validate')
     converter_config = get_config('config.yaml', 'prepare')
     self.converter = Converter(converter_config)
     if not self.config['cuda']:
         os.environ['CUDA_VISIBLE_DEVICES'] = ''
     self.sess, self.age, self.gender, self.images_pl = load_network(
         self.config['pretrained_model_folder_or_file'])
Exemple #2
0
    def __init__(self, config):
        # parameters
        self._models_config = get_config(config, 'models')
        self._train_config = get_config(config, 'train')
        self._dataset_config = get_config(
            config, 'datasets')[self._train_config['dataset']]
        if not self._train_config['balance_dataset']:
            self._dataset_config['balance'] = None
        lr_method = self._train_config['learning_rate']
        lr_config = get_config(config, 'learning_rates')
        self._lr_config = lr_config[
            'linear'] if lr_method == 'test_lr' else lr_config[lr_method]
        self.learning_rate_manager = LearningRateManager(
            lr_method, self._lr_config)
        self.model = models[self._train_config['model']](
            **self._models_config[self._train_config['model']])
        self.num_epochs = self._train_config['epochs']
        self.train_size = 0
        self.test_size = None
        self.batch_size = self._train_config['batch_size']
        self.save_frequency = self._train_config['save_frequency']
        self.val_frequency = self._train_config['val_frequency']
        self.mode = self._train_config['mode']
        self.model_path = self._train_config['model_path']
        self.experiment_folder = self.get_experiment_folder(self.mode)

        # operations
        self.global_step = self.model.global_step
        self.train_mode = tf.placeholder(tf.bool)
        self.init_op = None
        self.train_op = None
        self.reset_global_step_op = None
        self.train_summary = None
        self.train_init_op = None
        self.test_summary = None
        self.test_init_op = None
        self.images = tf.placeholder(tf.float32, shape=[None, 256, 256, 3])
        self.age_labels = tf.placeholder(tf.int32)
        self.gender_labels = tf.placeholder(tf.int32)
        self.test_lr = list(
        ) if self.learning_rate_manager.method_name == 'test_lr' else None
Exemple #3
0
def validate(config):
    validation_config = get_config(config, 'dataset_validation')
    dataset_config = get_config(config,
                                'datasets')[validation_config['dataset']]
    batch_size = validation_config['batch']
    num_epochs = validation_config['epochs']
    next_data_element, train_init_op, train_size = init_data_loader(
        batch_size,
        dataset_config['full_desc_path'],
        dataset_config['images_path'],
        dataset_config['balance'],
        epochs=num_epochs)
    print('dataset_size: ', train_size)
    print('train_size // batch_size', train_size // batch_size)
    num_batches = train_size // batch_size + \
        (train_size % batch_size != 0)
    with tf.Graph().as_default() and tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        print('num_epochs*num_batches', num_epochs * num_batches)
        sess.run(train_init_op)
        for batch_idx in range(num_epochs * num_batches):
            train_images, train_age_labels, train_gender_labels, file_paths = sess.run(
                next_data_element)
            print(f'batch_inx: {batch_idx}, file_paths_len: {len(file_paths)}')
Exemple #4
0
import argparse
from functools import partial

from age_gender.utils.config_parser import get_config
from age_gender.utils.converter import ConverterManager
from age_gender.utils.splitter import train_test_split_dataset

if __name__ == "__main__":
    config = get_config('config.yaml', 'prepare')

    parser = argparse.ArgumentParser()
    choices = {'convert_dataset': ConverterManager(config).run,
               'split_dataset': partial(train_test_split_dataset, config)}
    parser.add_argument('command', type=str, choices=choices.keys(), help='dataset preparation command')
    args = parser.parse_args()

    choices[args.command]()
Exemple #5
0
        json.dump(positive, Path(config['results_path']).joinpath('positive.json').open(mode='w'))

def load_network(config):
    pretrained_model_folder_or_file = config['pretrained_model_folder_or_file']
    sess = tf.Session()
    images_pl = tf.placeholder(tf.float32, shape=[None, 256, 256 ,3], name='input_image')
    images_norm = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), images_pl)
    model = InceptionResnetV1(phase_train=False, is_training=False)
    variables_to_restore, age_logits, gender_logits = model.inference(images_norm)
    gender = tf.argmax(tf.nn.softmax(gender_logits), 1)
    age_ = tf.cast(tf.constant([i for i in range(0, 101)]), tf.float32)
    age = tf.reduce_sum(tf.multiply(tf.nn.softmax(age_logits), age_), axis=1)
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    sess.run(init_op)
    saver = ModelSaver(variables_to_restore)
    saver.restore_model(sess, pretrained_model_folder_or_file)
    return sess, age, gender, images_pl


if __name__ == '__main__':
    config = get_config('config.yaml','mining')
    converter_config = get_config('config.yaml','prepare')
    converter = Converter(converter_config)
    if not config['cuda']:
        os.environ['CUDA_VISIBLE_DEVICES'] = ''
    sess, age, gender, images_pl = load_network(config)
    Miner(config, converter, sess).run()


Exemple #6
0
        if name != 'lr':
            metrics_deque[name].append(metric)
            metric = np.mean(metrics_deque[name])
        test_lr_chunk[name] = float(metric)
        summary = summary_pb2.Summary.Value(tag=f'{mode}/{name}',
                                            simple_value=metric)
        summaries_list.append(summary)
    if test_lr is not None:
        test_lr_chunk['lr'] = float(
            metrics_and_errors['lr']
        )  # необходимо значение lr взятое из train стадии
        test_lr.append(test_lr_chunk)
    summaries = summary_pb2.Summary(value=summaries_list)
    return summaries


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("--config",
                        type=str,
                        default="config.yaml",
                        help="config")
    args = parser.parse_args()
    config = get_config(args.config)
    if not config['train']['cuda']:
        os.environ['CUDA_VISIBLE_DEVICES'] = ''
    if config['train']['mode'] not in ['start', 'continue', 'test']:
        raise ValueError('Invalid mode!')

    ModelManager(config).train()
Exemple #7
0
    def init_data_loader(self, dataset_path):
        dataset_json = json.load(Path(dataset_path).open())
        data_folder = os.path.dirname(dataset_path)
        if Path(self._config['results_path']).is_file():
            prev_result = json.load(open(self._config['results_path'], 'r'))
            for name in self.results_names:
                self.results[name] += prev_result[name]
            processed_files = [
                fn[fn.find(data_folder) + len(data_folder) + 1:]
                for fn in prev_result['file_name']
            ]
            dataset_json = list(
                filter(lambda it: it['file_name'] not in processed_files,
                       dataset_json))
        loader = DataLoader(dataset_json, data_folder)
        dataset = loader.create_dataset(perform_shuffle=False,
                                        batch_size=self.batch_size)
        iterator = tf.data.Iterator.from_structure(dataset.output_types,
                                                   dataset.output_shapes)
        next_data_element = iterator.get_next()
        data_loader_init_op = iterator.make_initializer(dataset)
        return next_data_element, data_loader_init_op, loader.dataset_len()


if __name__ == '__main__':
    config = get_config('config.yaml', 'inference')
    print(config)
    if not config['cuda']:
        os.environ['CUDA_VISIBLE_DEVICES'] = ''
    ModelManager(config).inference()
Exemple #8
0
    validation_config = get_config(config, 'dataset_validation')
    dataset_config = get_config(config,
                                'datasets')[validation_config['dataset']]
    batch_size = validation_config['batch']
    num_epochs = validation_config['epochs']
    next_data_element, train_init_op, train_size = init_data_loader(
        batch_size,
        dataset_config['full_desc_path'],
        dataset_config['images_path'],
        dataset_config['balance'],
        epochs=num_epochs)
    print('dataset_size: ', train_size)
    print('train_size // batch_size', train_size // batch_size)
    num_batches = train_size // batch_size + \
        (train_size % batch_size != 0)
    with tf.Graph().as_default() and tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        print('num_epochs*num_batches', num_epochs * num_batches)
        sess.run(train_init_op)
        for batch_idx in range(num_epochs * num_batches):
            train_images, train_age_labels, train_gender_labels, file_paths = sess.run(
                next_data_element)
            print(f'batch_inx: {batch_idx}, file_paths_len: {len(file_paths)}')


if __name__ == '__main__':
    config = get_config('config.yaml')
    if not config['dataset_validation']['cuda']:
        os.environ['CUDA_VISIBLE_DEVICES'] = ''
    validate(config)