Esempio n. 1
0
                            if FLAGS.mode != "train_and_evaluate" else "epoch",
                            num_iter=FLAGS.num_iter
                            if FLAGS.mode != "train_and_evaluate" else 1,
                            warmup_steps=FLAGS.warmup_steps,
                            batch_size=FLAGS.batch_size,
                            log_every_n_steps=FLAGS.display_every,
                            is_benchmark=FLAGS.mode == 'inference_benchmark',
                            export_dir=FLAGS.export_dir,
                            quantize=FLAGS.quantize,
                            symmetric=FLAGS.symmetric,
                            use_final_conv=FLAGS.use_final_conv,
                            use_qdq=FLAGS.use_qdq)

    if FLAGS.mode == 'predict':
        if FLAGS.to_predict is None:
            raise ValueError("No data to predict on.")

        if not os.path.isfile(FLAGS.to_predict):
            raise ValueError("Only prediction on single images is supported!")

        if hvd_utils.is_using_hvd():
            raise NotImplementedError(
                "Only single GPU inference is implemented.")

        elif not hvd_utils.is_using_hvd() or hvd.rank() == 0:
            runner.predict(FLAGS.to_predict,
                           quantize=FLAGS.quantize,
                           symmetric=FLAGS.symmetric,
                           use_qdq=FLAGS.use_qdq,
                           use_final_conv=FLAGS.use_final_conv)
Esempio n. 2
0
        if FLAGS.mode == 'inference_benchmark' and hvd_utils.is_using_hvd():
            raise NotImplementedError(
                "Only single GPU inference is implemented.")

        elif not hvd_utils.is_using_hvd() or hvd.rank() == 0:

            runner.evaluate(iter_unit=FLAGS.iter_unit
                            if FLAGS.mode != "train_and_evaluate" else "epoch",
                            num_iter=FLAGS.num_iter
                            if FLAGS.mode != "train_and_evaluate" else 1,
                            warmup_steps=FLAGS.warmup_steps,
                            batch_size=FLAGS.batch_size,
                            log_every_n_steps=FLAGS.display_every,
                            is_benchmark=FLAGS.mode == 'inference_benchmark',
                            export_dir=FLAGS.export_dir)

    if FLAGS.mode == 'predict':
        if FLAGS.to_predict is None:
            raise ValueError("No data to predict on.")

        if not os.path.isfile(FLAGS.to_predict):
            raise ValueError("Only prediction on single images is supported!")

        if hvd_utils.is_using_hvd():
            raise NotImplementedError(
                "Only single GPU inference is implemented.")

        elif not hvd_utils.is_using_hvd() or hvd.rank() == 0:
            runner.predict(FLAGS.to_predict)
Esempio n. 3
0
            StdOutBackend(verbosity=Verbosity.DEFAULT)
        ]
    DLLogger.init(backends=backends)
    DLLogger.log(data=vars(FLAGS), step='PARAMETER')

    runner = Runner(FLAGS, DLLogger)

    if FLAGS.mode in ["train", "train_and_eval", "training_benchmark"]:
        runner.train()
        
    if FLAGS.mode in ['eval', 'evaluate', 'inference_benchmark']:
        if FLAGS.mode == 'inference_benchmark' and hvd_utils.is_using_hvd():
            raise NotImplementedError("Only single GPU inference is implemented.")
        elif not hvd_utils.is_using_hvd() or hvd.rank() == 0:
            runner.evaluate()
            
    if FLAGS.mode == 'predict':
        if FLAGS.to_predict is None:
            raise ValueError("No data to predict on.")

        if not os.path.isdir(FLAGS.to_predict):
            raise ValueError("Provide directory with images to infer!")

        if hvd_utils.is_using_hvd():
            raise NotImplementedError("Only single GPU inference is implemented.")

        elif not hvd_utils.is_using_hvd() or hvd.rank() == 0:
            runner.predict(FLAGS.to_predict, FLAGS.inference_checkpoint)

    if FLAGS.mode == 'savemodel_as_backbone':
        runner.savemodel_as_backbone()
Esempio n. 4
0
    DLLogger.log(data=vars(config), step='PARAMETER')

    #========== initialize the runner
    runner = Runner(config, DLLogger)

    #========== determine the operation mode of the runner (tr,eval,predict)
    if config.mode in ["train", "train_and_eval", "training_benchmark"]:
        runner.train()
    if config.mode in ['eval', 'evaluate', 'inference_benchmark']:
        if config.mode == 'inference_benchmark' and hvd_utils.is_using_hvd():
            raise NotImplementedError(
                "Only single GPU inference is implemented.")
        elif hvd_utils.is_using_hvd():
            raise NotImplementedError(
                "Only single GPU evaluation is implemented.")
        else:
            runner.evaluate()
    if config.mode == 'predict':
        if config.predict_img_dir is None:
            raise ValueError("No data to predict on.")

        if not os.path.isdir(config.predict_img_dir):
            raise ValueError("Provide directory with images to infer!")

        if hvd_utils.is_using_hvd():
            raise NotImplementedError(
                "Only single GPU inference is implemented.")

        elif not hvd_utils.is_using_hvd() or hvd.rank() == 0:
            runner.predict(config.predict_img_dir, config.predict_ckpt)