def extract_images(gpus, model_config, data_config, trained_checkpoint,
                   pad_to_shape, processor_type, annot_type, is_debug,
                   export_folder, **kwargs):

    os.environ["CUDA_VISIBLE_DEVICES"] = gpus

    pipeline_config = read_config(model_config, data_config)

    if pad_to_shape is not None and isinstance(pad_to_shape, str):
        pad_to_shape = [
            int(dim) if dim != '-1' else None
            for dim in pad_to_shape.split(',')
        ]

    input_reader = pipeline_config.input_reader
    input_reader.shuffle = False
    if len(input_reader.tf_record_input_reader) > 1:
        input_reader.tf_record_input_reader.pop()
        print("REMOVED INPUT READER:\n", input_reader)
    ignore_label = input_reader.ignore_label

    num_classes, segmentation_model = model_builder.build(
        pipeline_config.model, is_training=False, ignore_label=ignore_label)
    with tf.device("cpu:0"):
        dataset = dataset_builder.build(input_reader, 1)

    num_gpu = len(gpus.split(","))

    num_examples = sum(
        [r.num_examples for r in input_reader.tf_record_input_reader])

    run_inference_graph(segmentation_model, trained_checkpoint, dataset,
                        num_examples, ignore_label, pad_to_shape, num_classes,
                        processor_type, annot_type, num_gpu, export_folder,
                        **kwargs)
示例#2
0
def run_experiment(gpus, model_config, data_config, trained_checkpoint,
                   pad_to_shape, processor_type, annot_type, is_debug,
                   **kwargs):
    had_error = None
    try:
        os.environ["CUDA_VISIBLE_DEVICES"] = gpus
        print_buffer = StringIO()
        if not is_debug:
            sys.stdout = print_buffer
            sys.stderr = print_buffer

        pipeline_config = read_config(model_config, data_config)

        if pad_to_shape is not None and isinstance(pad_to_shape, str):
            pad_to_shape = [
                int(dim) if dim != '-1' else None
                for dim in pad_to_shape.split(',')
            ]

        input_reader = pipeline_config.input_reader
        input_reader.shuffle = True
        ignore_label = input_reader.ignore_label

        num_classes, segmentation_model = model_builder.build(
            pipeline_config.model,
            is_training=False,
            ignore_label=ignore_label)
        with tf.device("cpu:0"):
            dataset = dataset_builder.build(input_reader, 1)

        num_gpu = len(gpus.split(","))

        num_examples = sum(
            [r.num_examples for r in input_reader.tf_record_input_reader])

        result = run_inference_graph(segmentation_model, trained_checkpoint,
                                     dataset, num_examples, ignore_label,
                                     pad_to_shape, num_classes, processor_type,
                                     annot_type, num_gpu, **kwargs)
        had_error = False
    except Exception as ex:
        if is_debug:
            raise ex
        print(traceback.format_exc())
        had_error = True
        result = None

    sys.stdout = sys.__stdout__
    sys.stderr = sys.__stderr__
    tf.reset_default_graph()

    return print_buffer, result, had_error
def main(_):
    #test_plots()
    eval_dir = FLAGS.eval_dir
    output_directory = os.path.join(eval_dir, "inf")
    suff = ""
    if FLAGS.global_mean:
        suff = "_G"
    else:
        suff = "_L"
    if FLAGS.global_cov:
        suff += "G"
    else:
        suff += "L"
    dist_dir = os.path.join(eval_dir, "class_dist" + suff)
    min_dir = os.path.join(eval_dir, "min" + suff)
    hist_dir = os.path.join(eval_dir, "hist" + suff)
    dump_dir = os.path.join(eval_dir, "dump" + suff)

    tf.gfile.MakeDirs(output_directory)
    tf.gfile.MakeDirs(min_dir)
    tf.gfile.MakeDirs(dist_dir)
    tf.gfile.MakeDirs(hist_dir)
    pipeline_config = read_config(FLAGS.model_config, FLAGS.data_config)

    pad_to_shape = None
    if FLAGS.input_shape:
        input_shape = [
            int(dim) if dim != '-1' else None
                for dim in FLAGS.input_shape.split(',')]
    else:
        raise ValueError('Must supply `input_shape`')

    if FLAGS.pad_to_shape:
        pad_to_shape = [
            int(dim) if dim != '-1' else None
                for dim in FLAGS.pad_to_shape.split(',')]

    label_map = (CITYSCAPES_LABEL_IDS
        if FLAGS.label_ids else CITYSCAPES_LABEL_COLORS)

    input_reader = pipeline_config.input_reader
    input_reader.shuffle = True
    ignore_label = input_reader.ignore_label

    num_classes, segmentation_model = model_builder.build(
        pipeline_config.model, is_training=False, ignore_label=ignore_label)
    dataset = dataset_builder.build(input_reader, 1)

    run_inference_graph(segmentation_model, FLAGS.trained_checkpoint,
                        dataset, input_reader.num_examples, ignore_label, input_shape, pad_to_shape,
                        label_map, output_directory, num_classes, eval_dir, min_dir, dist_dir, hist_dir,
                        dump_dir)
def main(_):
    assert FLAGS.output_dir, '`output_dir` missing.'

    output_directory = FLAGS.output_dir
    tf.gfile.MakeDirs(output_directory)
    pipeline_config = read_config(FLAGS.model_config, FLAGS.data_config)

    pad_to_shape = None
    if FLAGS.input_shape:
        input_shape = [
            int(dim) if dim != '-1' else None
            for dim in FLAGS.input_shape.split(',')
        ]
    else:
        raise ValueError('Must supply `input_shape`')

    patch_size = None
    if FLAGS.patch_size:
        patch_size = [int(dim) for dim in FLAGS.patch_size.split(',')]
        assert len(patch_size) == 2, "patch size must be h,w"

    if FLAGS.pad_to_shape:
        pad_to_shape = [
            int(dim) if dim != '-1' else None
            for dim in FLAGS.pad_to_shape.split(',')
        ]

    label_map = (CITYSCAPES_LABEL_IDS
                 if FLAGS.label_ids else CITYSCAPES_LABEL_COLORS)

    ignore_label = pipeline_config.input_reader.ignore_label

    num_classes, segmentation_model = model_builder.build(
        pipeline_config.model, ignore_label=ignore_label, is_training=False)

    #input_reader = pipeline_config.eval_input_reader
    input_reader = pipeline_config.input_reader
    input_reader.shuffle = False
    input_dict = dataset_builder.build(input_reader, epoch)

    num_examples = sum(
        [r.num_examples for r in input_reader.tf_record_input_reader])
    iters = min(num_examples, FLAGS.max_iters)

    run_inference_graph(segmentation_model, FLAGS.trained_checkpoint,
                        input_dict, iters, input_shape, pad_to_shape,
                        label_map, output_directory, num_classes, patch_size)
def main(_):
    tf.gfile.MakeDirs(FLAGS.logdir)

    pipeline_config = read_config(FLAGS.model_config, FLAGS.data_config)

    model_config = pipeline_config.model
    train_config = pipeline_config.train_config
    input_config = pipeline_config.input_reader

    create_model_fn = functools.partial(model_builder.build,
                                        model_config=model_config,
                                        is_training=True,
                                        ignore_label=input_config.ignore_label)

    create_input_fn = functools.partial(dataset_builder.build,
                                        input_reader_config=input_config)

    is_chief = (FLAGS.task == 0)

    train_segmentation_model(
        create_model_fn,
        create_input_fn,
        train_config,
        model_config,
        master=FLAGS.master,
        task=FLAGS.task,
        is_chief=is_chief,
        startup_delay_steps=FLAGS.startup_delay_steps,
        train_dir=FLAGS.logdir,
        num_clones=FLAGS.num_clones,
        num_worker_replicas=FLAGS.num_replicas,
        clone_on_cpu=FLAGS.clone_on_cpu,
        replica_id=FLAGS.task,
        num_replicas=FLAGS.num_replicas,
        num_ps_tasks=FLAGS.num_ps_tasks,
        max_checkpoints_to_keep=FLAGS.max_checkpoints_to_keep,
        save_interval_secs=FLAGS.save_interval_secs,
        image_summaries=FLAGS.image_summaries,
        log_memory=FLAGS.log_memory,
        sync_bn_accross_gpu=FLAGS.sync_bn)
示例#6
0
def main(_):
    tf.gfile.MakeDirs(FLAGS.eval_dir)
    if not tf.gfile.IsDirectory(FLAGS.train_dir):
        raise ValueError('`train_dir` must be a valid directory '
                         'containing model checkpoints from training.')
    pipeline_config = read_config(FLAGS.model_config, FLAGS.data_config)
    eval_config = pipeline_config.eval_config
    input_config = pipeline_config.input_reader
    model_config = pipeline_config.model

    #TODO:handle this special case better
    class_loss_type = model_config.pspnet.loss.classification_loss.WhichOneof(
        'loss_type')
    if class_loss_type == "confidence":
        num_extra_class = 1
    else:
        num_extra_class = 0

    create_input_fn = functools.partial(dataset_builder.build,
                                        input_reader_config=input_config)
    create_model_fn = functools.partial(model_builder.build,
                                        model_config=model_config,
                                        is_training=False,
                                        ignore_label=input_config.ignore_label)

    eval_input_type = eval_config.eval_input_type
    input_type = eval_input_type.WhichOneof('eval_input_type_oneof')
    if input_type == 'cropped_eval_input':
        cropped_eval_input = eval_input_type.cropped_eval_input
        input_dims = (cropped_eval_input.height, cropped_eval_input.width)
        cropped_evaluation = True
    elif input_type == 'padded_eval_input':
        padded_eval_input = eval_input_type.padded_eval_input
        input_dims = (padded_eval_input.height, padded_eval_input.width)
        cropped_evaluation = False
    else:
        raise ValueError('Must specify an `eval_input_type` for evaluation.')

    if FLAGS.evaluate_all_from_checkpoint is not None:
        checkpoints_to_evaluate = get_checkpoints_from_path(
            FLAGS.evaluate_all_from_checkpoint, FLAGS.train_dir)
        # Run eval on each checkpoint only once. Exit when done.
        for curr_checkpoint in checkpoints_to_evaluate:
            tf.reset_default_graph()
            eval_segmentation_model_once(curr_checkpoint,
                                         create_model_fn,
                                         create_input_fn,
                                         input_dims,
                                         eval_config,
                                         num_extra_class=num_extra_class,
                                         input_reader=input_config,
                                         eval_dir=FLAGS.eval_dir,
                                         cropped_evaluation=cropped_evaluation,
                                         image_summaries=FLAGS.image_summaries,
                                         verbose=FLAGS.verbose)
    else:
        eval_segmentation_model(
            create_model_fn,
            create_input_fn,
            input_dims,
            eval_config,
            num_extra_class=num_extra_class,
            input_reader=input_config,
            train_dir=FLAGS.train_dir,
            eval_dir=FLAGS.eval_dir,
            cropped_evaluation=cropped_evaluation,
            evaluate_single_checkpoint=FLAGS.evaluate_all_from_checkpoint,
            image_summaries=FLAGS.image_summaries,
            verbose=FLAGS.verbose)