Esempio n. 1
0
def train(infer_func, params):
    image_width = params['image_width']
    image_height = params['image_height']
    image_format = params['image_format']
    batch_size = params['batch_size']
    distort_color = params['distort_color']
    data_dir = params['data_dir']
    data_idx_dir = params['data_idx_dir']
    log_dir = params['log_dir']
    precision = params['precision']
    momentum = params['momentum']
    learning_rate_init = params['learning_rate_init']
    learning_rate_power = params['learning_rate_power']
    weight_decay = params['weight_decay']
    loss_scale = params['loss_scale']
    larc_eta = params['larc_eta']
    larc_mode = params['larc_mode']
    num_iter = params['num_iter']
    checkpoint_secs = params['checkpoint_secs']
    display_every = params['display_every']
    iter_unit = params['iter_unit']
    dali_cpu = params['dali_cpu']
    epoch_evaluation = params['epoch_evaluation']
    use_xla = params['use_xla']

    # Determinism is not fully supported by all TF ops.
    # Disabling until remaining wrinkles can be ironed out.
    deterministic = False
    if deterministic:
        tf.set_random_seed(2 * (1 + hvd.rank()))
        random.seed(3 * (1 + hvd.rank()))
        np.random.seed(2)

    log_dir = None if log_dir == "" else log_dir
    data_dir = None if data_dir == "" else data_dir
    data_idx_dir = None if data_idx_dir == "" else data_idx_dir

    global_batch_size = batch_size * hvd.size()
    if data_dir is not None:
        filename_pattern = os.path.join(data_dir, '%s-*')
        train_filenames = sorted(tf.gfile.Glob(filename_pattern % 'train'))
        num_training_samples = _get_num_records(train_filenames)
    else:
        num_training_samples = global_batch_size
    train_idx_filenames = None
    if data_idx_dir is not None:
        filename_pattern = os.path.join(data_idx_dir, '%s-*')
        train_idx_filenames = sorted(tf.gfile.Glob(filename_pattern % 'train'))

    if iter_unit.lower() == 'epoch':
        nstep = num_training_samples * num_iter // global_batch_size
        num_epochs = num_iter
        decay_steps = nstep
    else:
        nstep = num_iter
        num_epochs = max(nstep * global_batch_size // num_training_samples, 1)
        decay_steps = 90 * num_training_samples // global_batch_size

    nstep_per_epoch = num_training_samples // global_batch_size

    # Horovod: pin GPU to be used to process local rank (one GPU per process)
    gpu_options = GPUOptions(per_process_gpu_memory_fraction=0.7)
    config = ConfigProto(gpu_options=gpu_options)
    if use_xla:
        config.graph_options.optimizer_options.global_jit_level = (
            tf.OptimizerOptions.ON_1)
    #config.gpu_options.allow_growth = True
    config.gpu_options.visible_device_list = str(hvd.local_rank())
    config.gpu_options.force_gpu_compatible = True  # Force pinned memory
    config.intra_op_parallelism_threads = 1  # Avoid pool of Eigen threads
    config.inter_op_parallelism_threads = max(2, 40 // hvd.size() - 2)

    classifier = tf.estimator.Estimator(
        model_fn=_cnn_model_function,
        model_dir=log_dir,
        params={
            'model': infer_func,
            'format': image_format,
            'dtype': tf.float16 if precision == 'fp16' else tf.float32,
            'momentum': momentum,
            'learning_rate_init': learning_rate_init,
            'learning_rate_power': learning_rate_power,
            'decay_steps': decay_steps,
            'weight_decay': weight_decay,
            'loss_scale': loss_scale,
            'larc_eta': larc_eta,
            'larc_mode': larc_mode,
            'deterministic': deterministic,
            'n_classes': 1000,
            'dali_cpu': dali_cpu,
        },
        config=tf.estimator.RunConfig(
            tf_random_seed=2 * (1 + hvd.rank()) if deterministic else None,
            session_config=config,
            save_checkpoints_secs=checkpoint_secs if hvd.rank() == 0 else None,
            save_checkpoints_steps=nstep if hvd.rank() == 0 else None,
            keep_checkpoint_every_n_hours=3))

    print("Training")
    if not deterministic:
        num_preproc_threads = 4
    else:
        num_preproc_threads = 1

    training_hooks = [
        hvd.BroadcastGlobalVariablesHook(0),
        _PrefillStagingAreasHook()
    ]
    if hvd.rank() == 0:
        training_hooks.append(
            _LogSessionRunHook(global_batch_size, num_training_samples,
                               display_every))

    input_func = lambda: nvutils.image_set(train_filenames,
                                           batch_size,
                                           image_height,
                                           image_width,
                                           training=True,
                                           distort_color=distort_color,
                                           deterministic=deterministic,
                                           num_threads=num_preproc_threads,
                                           dali_cpu=dali_cpu,
                                           idx_filenames=train_idx_filenames)

    if epoch_evaluation:
        classifier_eval, eval_input_func, eval_steps = create_validation_estimator(
            infer_func, params)

    try:
        if epoch_evaluation:
            for i in range(num_epochs):
                classifier.train(input_fn=input_func,
                                 steps=nstep // num_epochs,
                                 hooks=training_hooks)
                if hvd.rank() == 0:
                    eval_result = classifier_eval.evaluate(
                        input_fn=eval_input_func, steps=eval_steps)
                    print('epoch {} top1: {}%'.format(
                        i, eval_result['top1_accuracy'] * 100))
                    print('epoch {} top5: {}%'.format(
                        i, eval_result['top5_accuracy'] * 100))
        else:
            classifier.train(input_fn=input_func,
                             max_steps=nstep,
                             hooks=training_hooks)

    except KeyboardInterrupt:
        print("Keyboard interrupt")
Esempio n. 2
0
import tensorflow as tf
from math import ceil
import cv2
import pdb

from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession

config = ConfigProto()
config.inter_op_parallelism_threads = 4
config.intra_op_parallelism_threads = 4
# tf.compat.v1.keras.backend.set_session(tf.compat.v1.Session(config=config))
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)


class SSDDetectors:
    """
		Classe de détection charger d'utiliser les modèles tensorflow.
		Cette classe va permettre de rendre l'utilisation des modèles plus facile

		:param
		@path_model: path_model est une chaine de caractère chargé d'indiquer l'emplacement du modèle tensorflow sauvegarder dans un format SavedModel
		@path_label: path_label est une chaine de caractère d'indiquer l'emplacement d'un fichier textes indiquants les catégories possibles
	"""
    def __init__(self, path_model, path_label):
        self.model = tf.saved_model.load(path_model + '/saved_model')
        self.model = self.model.signatures['serving_default']
        self.list_label = self.read_labels(path_label)

    @staticmethod