示例#1
0
        runner.train(
            iter_unit=RUNNING_CONFIG.iter_unit,
            num_iter=RUNNING_CONFIG.num_iter,
            batch_size=RUNNING_CONFIG.batch_size,
            warmup_steps=RUNNING_CONFIG.warmup_steps,
            log_every_n_steps=RUNNING_CONFIG.log_every_n_steps,
            weight_decay=RUNNING_CONFIG.weight_decay,
            learning_rate_init=RUNNING_CONFIG.learning_rate_init,
            momentum=RUNNING_CONFIG.momentum,
            loss_scale=RUNNING_CONFIG.loss_scale,
            use_static_loss_scaling=FLAGS.use_static_loss_scaling,
            is_benchmark=RUNNING_CONFIG.mode == 'training_benchmark',
        )

    if RUNNING_CONFIG.mode in ["train_and_evaluate", 'evaluate', 'inference_benchmark']:

        if RUNNING_CONFIG.mode == 'inference_benchmark' and hvd_utils.is_using_hvd():
            raise NotImplementedError("Only single GPU inference is implemented.")

        elif not hvd_utils.is_using_hvd() or hvd.rank() == 0:

            runner.evaluate(
                iter_unit=RUNNING_CONFIG.iter_unit if RUNNING_CONFIG.mode != "train_and_evaluate" else "epoch",
                num_iter=RUNNING_CONFIG.num_iter if RUNNING_CONFIG.mode != "train_and_evaluate" else 1,
                warmup_steps=RUNNING_CONFIG.warmup_steps,
                batch_size=RUNNING_CONFIG.batch_size,
                log_every_n_steps=RUNNING_CONFIG.log_every_n_steps,
                is_benchmark=RUNNING_CONFIG.mode == 'inference_benchmark'
            )
示例#2
0
    if FLAGS.mode in ["train_and_evaluate", 'evaluate', 'inference_benchmark']:

        if FLAGS.mode == 'inference_benchmark' and hvd_utils.is_using_hvd():
            raise NotImplementedError(
                "Only single GPU inference is implemented.")

        elif not hvd_utils.is_using_hvd() or hvd.rank() == 0:

            runner.evaluate(iter_unit=FLAGS.iter_unit
                            if FLAGS.mode != "train_and_evaluate" else "epoch",
                            num_iter=FLAGS.num_iter
                            if FLAGS.mode != "train_and_evaluate" else 1,
                            warmup_steps=FLAGS.warmup_steps,
                            batch_size=FLAGS.batch_size,
                            log_every_n_steps=FLAGS.display_every,
                            is_benchmark=FLAGS.mode == 'inference_benchmark',
                            export_dir=FLAGS.export_dir,
                            quantize=FLAGS.quantize,
                            symmetric=FLAGS.symmetric,
                            use_final_conv=FLAGS.use_final_conv,
                            use_qdq=FLAGS.use_qdq)

    if FLAGS.mode == 'predict':
        if FLAGS.to_predict is None:
            raise ValueError("No data to predict on.")

        if not os.path.isfile(FLAGS.to_predict):
            raise ValueError("Only prediction on single images is supported!")

        if hvd_utils.is_using_hvd():
示例#3
0
            
            runner.train(
                iter_unit=FLAGS.iter_unit,
                num_iter=FLAGS.eval_every,
                batch_size=FLAGS.batch_size,
                warmup_steps=FLAGS.warmup_steps,
                weight_decay=FLAGS.weight_decay,
                learning_rate_init=FLAGS.lr_init,
                momentum=FLAGS.momentum,
                is_benchmark=FLAGS.mode == 'training_benchmark'
            )
                
            runner.evaluate(
                iter_unit= "epoch",
                num_iter= 1,
                warmup_steps=FLAGS.warmup_steps,
                batch_size=FLAGS.batch_size,
                is_benchmark=FLAGS.mode == 'inference_benchmark'
            )
        
    else:  

        if FLAGS.mode in ["train", "train_and_evaluate", "training_benchmark"]:
            runner.train(
                iter_unit=FLAGS.iter_unit,
                num_iter=FLAGS.num_iter,
                batch_size=FLAGS.batch_size,
                warmup_steps=FLAGS.warmup_steps,
                weight_decay=FLAGS.weight_decay,
                learning_rate_init=FLAGS.lr_init,
                momentum=FLAGS.momentum,
            "train", "train_and_evaluate", "training_benchmark"
    ]:
        runner.train(
            iter_unit=RUNNING_CONFIG.iter_unit,
            num_iter=RUNNING_CONFIG.num_iter,
            batch_size=RUNNING_CONFIG.batch_size,
            warmup_steps=RUNNING_CONFIG.warmup_steps,
            weight_decay=RUNNING_CONFIG.weight_decay,
            learning_rate=RUNNING_CONFIG.learning_rate,
            learning_rate_decay_factor=RUNNING_CONFIG.
            learning_rate_decay_factor,
            learning_rate_decay_steps=RUNNING_CONFIG.learning_rate_decay_steps,
            rmsprop_decay=RUNNING_CONFIG.rmsprop_decay,
            rmsprop_momentum=RUNNING_CONFIG.rmsprop_momentum,
            use_auto_loss_scaling=FLAGS.use_auto_loss_scaling,
            augment_data=RUNNING_CONFIG.augment_data,
            is_benchmark=RUNNING_CONFIG.exec_mode == 'training_benchmark')

    if RUNNING_CONFIG.exec_mode in [
            "train_and_evaluate", 'evaluate', 'inference_benchmark'
    ] and hvd.rank() == 0:
        runner.evaluate(
            iter_unit=RUNNING_CONFIG.iter_unit
            if RUNNING_CONFIG.exec_mode != "train_and_evaluate" else "epoch",
            num_iter=RUNNING_CONFIG.num_iter
            if RUNNING_CONFIG.exec_mode != "train_and_evaluate" else 1,
            warmup_steps=RUNNING_CONFIG.warmup_steps,
            batch_size=RUNNING_CONFIG.batch_size,
            is_benchmark=RUNNING_CONFIG.exec_mode == 'inference_benchmark',
            save_eval_results_to_json=RUNNING_CONFIG.save_eval_results_to_json)
示例#5
0
            JSONStreamBackend(verbosity=Verbosity.VERBOSE, filename=log_path),
            StdOutBackend(verbosity=Verbosity.DEFAULT)
        ]
    DLLogger.init(backends=backends)
    DLLogger.log(data=vars(FLAGS), step='PARAMETER')

    runner = Runner(FLAGS, DLLogger)

    if FLAGS.mode in ["train", "train_and_eval", "training_benchmark"]:
        runner.train()
        
    if FLAGS.mode in ['eval', 'evaluate', 'inference_benchmark']:
        if FLAGS.mode == 'inference_benchmark' and hvd_utils.is_using_hvd():
            raise NotImplementedError("Only single GPU inference is implemented.")
        elif not hvd_utils.is_using_hvd() or hvd.rank() == 0:
            runner.evaluate()
            
    if FLAGS.mode == 'predict':
        if FLAGS.to_predict is None:
            raise ValueError("No data to predict on.")

        if not os.path.isdir(FLAGS.to_predict):
            raise ValueError("Provide directory with images to infer!")

        if hvd_utils.is_using_hvd():
            raise NotImplementedError("Only single GPU inference is implemented.")

        elif not hvd_utils.is_using_hvd() or hvd.rank() == 0:
            runner.predict(FLAGS.to_predict, FLAGS.inference_checkpoint)

    if FLAGS.mode == 'savemodel_as_backbone':
            is_benchmark=FLAGS.mode == 'training_benchmark',
        )

    if FLAGS.mode in ["train_and_evaluate", 'evaluate', 'inference_benchmark']:

        if FLAGS.mode == 'inference_benchmark' and hvd_utils.is_using_hvd():
            raise NotImplementedError(
                "Only single GPU inference is implemented.")

        elif not hvd_utils.is_using_hvd() or hvd.rank() == 0:

            runner.evaluate(iter_unit=FLAGS.iter_unit
                            if FLAGS.mode != "train_and_evaluate" else "epoch",
                            num_iter=FLAGS.num_iter
                            if FLAGS.mode != "train_and_evaluate" else 1,
                            warmup_steps=FLAGS.warmup_steps,
                            batch_size=FLAGS.batch_size,
                            log_every_n_steps=FLAGS.display_every,
                            is_benchmark=FLAGS.mode == 'inference_benchmark',
                            export_dir=FLAGS.export_dir)

    if FLAGS.mode == 'predict':
        if FLAGS.to_predict is None:
            raise ValueError("No data to predict on.")

        if not os.path.isfile(FLAGS.to_predict):
            raise ValueError("Only prediction on single images is supported!")

        if hvd_utils.is_using_hvd():
            raise NotImplementedError(
                "Only single GPU inference is implemented.")