コード例 #1
0
def main():
    args = parse_args()

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    batch_size = prepare_tf_context(num_gpus=args.num_gpus,
                                    batch_size=args.batch_size)

    classes = 1000
    net, inputs_desc = prepare_model(
        model_name=args.model,
        classes=classes,
        use_pretrained=args.use_pretrained,
        pretrained_model_file_path=args.resume.strip())

    val_dataflow = get_data(is_train=False,
                            batch_size=batch_size,
                            data_dir_path=args.data_dir)

    assert (args.use_pretrained or args.resume.strip())
    test(net=net,
         session_init=inputs_desc,
         val_dataflow=val_dataflow,
         do_calc_flops=args.calc_flops,
         extended_log=True)
コード例 #2
0
def main():
    args = parse_args()
    args.seed = init_rand(seed=args.seed)

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)
    logger.set_logger_dir(args.save_dir)

    batch_size = prepare_tf_context(num_gpus=args.num_gpus,
                                    batch_size=args.batch_size)

    classes = 1000
    net, inputs_desc = prepare_model(
        model_name=args.model,
        classes=classes,
        use_pretrained=args.use_pretrained,
        pretrained_model_file_path=args.resume.strip())

    train_dataflow = get_data(is_train=True,
                              batch_size=batch_size,
                              data_dir_path=args.data_dir)
    val_dataflow = get_data(is_train=False,
                            batch_size=batch_size,
                            data_dir_path=args.data_dir)

    train_net(net=net,
              session_init=inputs_desc,
              batch_size=batch_size,
              num_epochs=args.num_epochs,
              train_dataflow=train_dataflow,
              val_dataflow=val_dataflow)