Beispiel #1
0
def main(_):
    output_directory = FLAGS.output_dir
    tf.gfile.MakeDirs(output_directory)
    pipeline_config = pipeline_pb2.PipelineConfig()
    with tf.gfile.GFile(FLAGS.config_path, 'r') as f:
        text_format.Merge(f.read(), pipeline_config)

    pad_to_shape = None
    if FLAGS.input_shape:
        input_shape = [
            int(dim) if dim != '-1' else None
            for dim in FLAGS.input_shape.split(',')
        ]
    else:
        raise ValueError('Must supply `input_shape`')

    if FLAGS.pad_to_shape:
        pad_to_shape = [
            int(dim) if dim != '-1' else None
            for dim in FLAGS.pad_to_shape.split(',')
        ]

    input_images = _get_images_from_path(FLAGS.input_path)
    label_map = (CITYSCAPES_LABEL_IDS
                 if FLAGS.label_ids else CITYSCAPES_LABEL_COLORS)

    num_classes, segmentation_model = model_builder.build(
        pipeline_config.model, is_training=False)

    run_inference_graph(segmentation_model, FLAGS.trained_checkpoint,
                        input_images, input_shape, pad_to_shape, label_map,
                        output_directory)
def main(_):
    tf.gfile.MakeDirs(FLAGS.logdir)
    pipeline_config = pipeline_pb2.PipelineConfig()
    with tf.gfile.GFile(FLAGS.config_path, "r") as f:
        proto_str = f.read()
        text_format.Merge(proto_str, pipeline_config)

    model_config = pipeline_config.model
    train_config = pipeline_config.train_config
    input_config = pipeline_config.train_input_reader

    create_model_fn = functools.partial(model_builder.build,
                                        model_config=model_config,
                                        is_training=True)

    create_input_fn = functools.partial(dataset_builder.build,
                                        input_reader_config=input_config)

    is_chief = (FLAGS.task == 0)

    checkpoint_nodes = FLAGS.checkpoint_nodes
    if checkpoint_nodes is None:
        checkpoint_nodes = ICNET_GRADIENT_CHECKPOINTS
    else:
        checkpoint_nodes = checkpoint_nodes.replace(" ", "").split(",")

    train_segmentation_model(
        create_model_fn,
        create_input_fn,
        train_config,
        model_config,
        master=FLAGS.master,
        task=FLAGS.task,
        is_chief=is_chief,
        startup_delay_steps=FLAGS.startup_delay_steps,
        train_dir=FLAGS.logdir,
        num_clones=FLAGS.num_clones,
        num_worker_replicas=FLAGS.num_replicas,
        clone_on_cpu=FLAGS.clone_on_cpu,
        replica_id=FLAGS.task,
        num_replicas=FLAGS.num_replicas,
        num_ps_tasks=FLAGS.num_ps_tasks,
        max_checkpoints_to_keep=FLAGS.max_checkpoints_to_keep,
        save_interval_secs=FLAGS.save_interval_secs,
        image_summaries=FLAGS.image_summaries,
        log_memory=FLAGS.log_memory,
        gradient_checkpoints=checkpoint_nodes)
    def setup(self):
        self.pipeline_config = pipeline_pb2.PipelineConfig()
        with tf.gfile.GFile(self.config_path, 'r') as f:
            text_format.Merge(f.read(), self.pipeline_config)

        self.num_classes, self.segmentation_model = model_builder.build(
            self.pipeline_config.model, is_training=False)
        self.outputs, self.placeholder_tensor = deploy_segmentation_inference_graph(
            model=self.segmentation_model,
            input_shape=self.input_shape,
            pad_to_shape=self.pad_to_shape,
            label_color_map=self.label_color_map)

        self.gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
        self.sess = tf.Session(config=tf.ConfigProto(
            gpu_options=self.gpu_options))
        self.input_graph_def = tf.get_default_graph().as_graph_def()
        self.saver = tf.train.Saver()
        self.saver.restore(self.sess, self.trained_checkpoint_prefix)
def main(_):
    pipeline_config = pipeline_pb2.PipelineConfig()
    with tf.gfile.GFile(FLAGS.config_path, 'r') as f:
        text_format.Merge(f.read(), pipeline_config)
    if FLAGS.input_shape:
        input_shape = [
            int(dim) if dim != '-1' else None
            for dim in FLAGS.input_shape.split(',')]
    else:
        input_shape = None

    pad_to_shape = None
    if FLAGS.pad_to_shape:
        pad_to_shape = [
            int(dim) if dim != '-1' else None
            for dim in FLAGS.pad_to_shape.split(',')]

    export_inference_graph(pipeline_config,
                           FLAGS.trained_checkpoint,
                           FLAGS.output_dir, input_shape,
                           pad_to_shape)
def main(_):
    eval_dir = FLAGS.eval_dir

    thresh_dir = FLAGS.thresh_dir
    tf.gfile.MakeDirs(thresh_dir)

    pipeline_config = pipeline_pb2.PipelineConfig()
    with tf.gfile.GFile(FLAGS.config_path, 'r') as f:
        text_format.Merge(f.read(), pipeline_config)

    num_classes = pipeline_config.model.pspnet.num_classes

    input_reader = pipeline_config.ood_train_input_reader
    input_reader.shuffle = True
    input_reader.num_epochs = epoch
    input_dict = thresh_dataset.build(input_reader, eval_dir,
                                      FLAGS.max_softmax)

    ignore_label = pipeline_config.ood_config.ignore_label

    run_inference_graph(input_dict, input_reader.num_examples, ignore_label,
                        num_classes, eval_dir, thresh_dir)
def main(_):
    assert FLAGS.output_dir, '`output_dir` missing.'
    assert FLAGS.split_type, '`split_type` missing.'
    assert (FLAGS.cityscapes_dir) or \
           (FLAGS.input_pattern and FLAGS.annot_pattern), \
           'Must specify either `cityscapes_dir` or ' \
           '`input_pattern` and `annot_pattern`.'

    output_directory = FLAGS.output_dir
    tf.gfile.MakeDirs(output_directory)
    pipeline_config = pipeline_pb2.PipelineConfig()
    with tf.gfile.GFile(FLAGS.config_path, 'r') as f:
        text_format.Merge(f.read(), pipeline_config)

    pad_to_shape = None
    if FLAGS.input_shape:
        input_shape = [
            int(dim) if dim != '-1' else None
            for dim in FLAGS.input_shape.split(',')
        ]
    else:
        raise ValueError('Must supply `input_shape`')

    patch_size = None
    if FLAGS.patch_size:
        patch_size = [int(dim) for dim in FLAGS.patch_size.split(',')]
        assert len(patch_size) == 2, "patch size must be h,w"

    if FLAGS.pad_to_shape:
        pad_to_shape = [
            int(dim) if dim != '-1' else None
            for dim in FLAGS.pad_to_shape.split(',')
        ]

    if FLAGS.cityscapes_dir:
        search_image_files = os.path.join(FLAGS.cityscapes_dir,
                                          _DEFAULT_DIR['input'],
                                          FLAGS.split_type, '*',
                                          _DEFAULT_PATTEN['input'])
        search_annot_files = os.path.join(FLAGS.cityscapes_dir,
                                          _DEFAULT_DIR['label'],
                                          FLAGS.split_type, '*',
                                          _DEFAULT_PATTEN['label'])
        input_images = glob.glob(search_image_files)
        annot_filenames = glob.glob(search_annot_files)
    else:
        input_images = glob.glob(FLAGS.input_pattern)
        annot_filenames = glob.glob(FLAGS.annot_pattern)

    if len(input_images) != len(annot_filenames):
        print("images: ", len(input_images))
        print("annot: ", len(annot_filenames))
        raise ValueError('Supplied patterns do not have image counts.')

    input_images = sorted(input_images)
    annot_filenames = sorted(annot_filenames)

    label_map = (CITYSCAPES_LABEL_IDS
                 if FLAGS.label_ids else CITYSCAPES_LABEL_COLORS)

    num_classes, segmentation_model = model_builder.build(
        pipeline_config.model, is_training=False)

    run_inference_graph(segmentation_model, FLAGS.trained_checkpoint,
                        input_images, annot_filenames, input_shape,
                        pad_to_shape, label_map, output_directory, num_classes,
                        patch_size)
def main(_):
    #test_plots()
    eval_dir = FLAGS.eval_dir
    output_directory = os.path.join(eval_dir, "inf")
    suff = ""
    if FLAGS.global_mean:
        suff = "_G"
    else:
        suff = "_L"
    if FLAGS.global_cov:
        suff += "G"
    else:
        suff += "L"
    dist_dir = os.path.join(eval_dir, "class_dist" + suff)
    min_dir = os.path.join(eval_dir, "min" + suff)
    hist_dir = os.path.join(eval_dir, "hist" + suff)
    dump_dir = os.path.join(eval_dir, "dump" + suff)

    tf.gfile.MakeDirs(output_directory)
    tf.gfile.MakeDirs(min_dir)
    tf.gfile.MakeDirs(dist_dir)
    tf.gfile.MakeDirs(hist_dir)
    pipeline_config = pipeline_pb2.PipelineConfig()
    with tf.gfile.GFile(FLAGS.config_path, 'r') as f:
        text_format.Merge(f.read(), pipeline_config)

    pad_to_shape = None
    if FLAGS.input_shape:
        input_shape = [
            int(dim) if dim != '-1' else None
            for dim in FLAGS.input_shape.split(',')
        ]
    else:
        raise ValueError('Must supply `input_shape`')

    if FLAGS.pad_to_shape:
        pad_to_shape = [
            int(dim) if dim != '-1' else None
            for dim in FLAGS.pad_to_shape.split(',')
        ]

    label_map = (CITYSCAPES_LABEL_IDS
                 if FLAGS.label_ids else CITYSCAPES_LABEL_COLORS)

    num_classes, segmentation_model = model_builder.build(
        pipeline_config.model, is_training=False)

    if FLAGS.do_ood:
        if FLAGS.write_out or FLAGS.use_train:
            input_reader = pipeline_config.ood_train_input_reader
        else:
            input_reader = pipeline_config.ood_eval_input_reader
    else:
        if FLAGS.use_train:
            input_reader = pipeline_config.train_input_reader
        else:
            input_reader = pipeline_config.eval_input_reader

    input_reader.shuffle = True
    input_reader.num_epochs = 1
    input_reader.num_examples = min(1500, input_reader.num_examples)
    input_dict = dataset_builder.build(input_reader)

    ignore_label = pipeline_config.ood_config.ignore_label

    run_inference_graph(segmentation_model, FLAGS.trained_checkpoint,
                        input_dict, input_reader.num_examples, ignore_label,
                        input_shape, pad_to_shape, label_map, output_directory,
                        num_classes, eval_dir, min_dir, dist_dir, hist_dir,
                        dump_dir)
def main(_):
    tf.gfile.MakeDirs(FLAGS.eval_dir)
    if not tf.gfile.IsDirectory(FLAGS.train_dir):
        raise ValueError('`train_dir` must be a valid directory '
                         'containing model checkpoints from training.')
    pipeline_config = pipeline_pb2.PipelineConfig()
    with tf.gfile.GFile(FLAGS.config_path, "r") as f:
        proto_str = f.read()
        text_format.Merge(proto_str, pipeline_config)
    eval_config = pipeline_config.eval_config
    input_config = pipeline_config.eval_input_reader
    model_config = pipeline_config.model

    create_input_fn = functools.partial(
        dataset_builder.build,
        input_reader_config=input_config)
    create_model_fn = functools.partial(
        model_builder.build,
        model_config=model_config,
        is_training=False)

    eval_input_type = eval_config.eval_input_type
    input_type = eval_input_type.WhichOneof('eval_input_type_oneof')
    if input_type == 'cropped_eval_input':
        cropped_eval_input = eval_input_type.cropped_eval_input
        input_dims = (cropped_eval_input.height,
                      cropped_eval_input.width)
        cropped_evaluation = True
    elif input_type == 'padded_eval_input':
        padded_eval_input = eval_input_type.padded_eval_input
        input_dims = (padded_eval_input.height,
                      padded_eval_input.width)
        cropped_evaluation = False
    else:
        raise ValueError('Must specify an `eval_input_type` for evaluation.')

    if FLAGS.evaluate_all_from_checkpoint is not None:
        checkpoints_to_evaluate = get_checkpoints_from_path(
            FLAGS.evaluate_all_from_checkpoint, FLAGS.train_dir)
        # Run eval on each checkpoint only once. Exit when done.
        for curr_checkpoint in checkpoints_to_evaluate:
            tf.reset_default_graph()
            eval_segmentation_model_once(curr_checkpoint,
                                         create_model_fn,
                                         create_input_fn,
                                         input_dims,
                                         eval_config,
                                         eval_dir=FLAGS.eval_dir,
                                         cropped_evaluation=cropped_evaluation,
                                         image_summaries=FLAGS.image_summaries,
                                         verbose=FLAGS.verbose)
    else:
        eval_segmentation_model(
            create_model_fn,
            create_input_fn,
            input_dims,
            eval_config,
            train_dir=FLAGS.train_dir,
            eval_dir=FLAGS.eval_dir,
            cropped_evaluation=cropped_evaluation,
            evaluate_single_checkpoint=FLAGS.evaluate_all_from_checkpoint,
            image_summaries=FLAGS.image_summaries,
            verbose=FLAGS.verbose)
Beispiel #9
0
def load_config(pipeline_config_path):
    pipeline_config = pipeline_pb2.PipelineConfig()
    with open(pipeline_config_path, 'r') as f:
        text_format.Merge(f.read(), pipeline_config)

    return pipeline_config
Beispiel #10
0
            else:
                outputs = model(images)
                loss = criterion(outputs, labels)
            _, preds = torch.max(outputs, 1)
            running_loss += loss.item() * images.size(0)
            running_corrects += torch.sum(preds == labels.data)
            pass
    epoch_loss = running_loss / (len(dataloader) * batch_size)
    epoch_acc = running_corrects.double() / (len(dataloader) * batch_size)

    return epoch_loss, epoch_acc.item()


if __name__ == "__main__":
    pipeline_config = "./config/pipeline_config.proto"
    pipeline = pipeline_pb2.PipelineConfig()
    with open(pipeline_config, "r") as f:
        proto_str = f.read()
        text_format.Merge(proto_str, pipeline)
    print(pipeline)

    data_root = pipeline.data_root
    model_name = pipeline.model.model_name
    num_classes = pipeline.model.num_classes
    num_epochs = pipeline.train_config.num_epochs
    batch_size = pipeline.train_config.batch_size
    checkpoints_path = pipeline.train_config.checkpoints_path
    use_mixup = pipeline.train_config.use_mixup

    if not os.path.exists(checkpoints_path):
        os.makedirs(checkpoints_path)