Esempio n. 1
0
    def build(cfg):
        task_cfgs = cfg['tasks']
        tasks = []
        mode = cfg["mode"]
        if mode not in ['inversed', 'detached', 'normal']:
            raise ValueError(mode)

        for task, task_cfg in task_cfgs.items():
            # depending on the backbone model
            task_cfg['input_dim'] = 2048
            if task == "attribute":
                tasks.append(AttributeBranch.build(task_cfg))
            elif task == "classification":
                task_cfg['num_classes'] = cfg['num_classes']
                tasks.append(ClassificationBranch.build(task_cfg))
            elif task == "pose":
                task_cfg['num_joints'] = cfg['num_joints']
                tasks.append(Pose2DHead.build(task_cfg))
            elif task == 'reid':
                tasks.append(BaselineReidBranch.build(task_cfg))
            else:
                raise ValueError("Unknown task: {}".format(task))

        reid_head = BaselineReidBranch.build(cfg['reid'])
        backbone = model_builder.build(cfg['backbone'])
        return MultiHeadMultiTaskNetwork(backbone, reid_head, tasks), [], []
def extract_images(gpus, model_config, data_config, trained_checkpoint,
                   pad_to_shape, processor_type, annot_type, is_debug,
                   export_folder, **kwargs):

    os.environ["CUDA_VISIBLE_DEVICES"] = gpus

    pipeline_config = read_config(model_config, data_config)

    if pad_to_shape is not None and isinstance(pad_to_shape, str):
        pad_to_shape = [
            int(dim) if dim != '-1' else None
            for dim in pad_to_shape.split(',')
        ]

    input_reader = pipeline_config.input_reader
    input_reader.shuffle = False
    if len(input_reader.tf_record_input_reader) > 1:
        input_reader.tf_record_input_reader.pop()
        print("REMOVED INPUT READER:\n", input_reader)
    ignore_label = input_reader.ignore_label

    num_classes, segmentation_model = model_builder.build(
        pipeline_config.model, is_training=False, ignore_label=ignore_label)
    with tf.device("cpu:0"):
        dataset = dataset_builder.build(input_reader, 1)

    num_gpu = len(gpus.split(","))

    num_examples = sum(
        [r.num_examples for r in input_reader.tf_record_input_reader])

    run_inference_graph(segmentation_model, trained_checkpoint, dataset,
                        num_examples, ignore_label, pad_to_shape, num_classes,
                        processor_type, annot_type, num_gpu, export_folder,
                        **kwargs)
Esempio n. 3
0
def export_inference_graph(input_type,
                           pipeline_config,
                           trained_checkpoint_prefix,
                           output_directory,
                           input_shape=None,
                           output_collection_name='inference_op',
                           additional_output_tensor_names=None):
  """Exports inference graph for the model specified in the pipeline config.

  Args:
    input_type: Type of input for the graph. Can be one of [`image_tensor`,
      `tf_example`].
    pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto.
    trained_checkpoint_prefix: Path to the trained checkpoint file.
    output_directory: Path to write outputs.
    input_shape: Sets a fixed shape for an `image_tensor` input. If not
      specified, will default to [None, None, None, 3].
    output_collection_name: Name of collection to add output tensors to.
      If None, does not add output tensors to a collection.
    additional_output_tensor_names: list of additional output
      tensors to include in the frozen graph.
  """
  detection_model = model_builder.build(pipeline_config.model,
                                        is_training=False)
  _export_inference_graph(input_type, detection_model,
                          pipeline_config.eval_config.use_moving_averages,
                          trained_checkpoint_prefix,
                          output_directory, additional_output_tensor_names,
                          input_shape, output_collection_name,
                          graph_hook_fn=None)
  pipeline_config.eval_config.use_moving_averages = False
  config_text = text_format.MessageToString(pipeline_config)
  with tf.gfile.Open(
      os.path.join(output_directory, 'pipeline.config'), 'wb') as f:
    f.write(config_text)
Esempio n. 4
0
    def transform_and_pad_input_data_fn(tensor_dict):
      """Combines transform and pad operation."""
      data_augmentation_options = [
          preprocessor_builder.build(step)
          for step in train_config.data_augmentation_options
      ]
      data_augmentation_fn = functools.partial(
          augment_input_data,
          data_augmentation_options=data_augmentation_options)
      model = model_builder.build(model_config, is_training=True)
      image_resizer_config = config_util.get_image_resizer_config(model_config)
      image_resizer_fn = image_resizer_builder.build(image_resizer_config)
      transform_data_fn = functools.partial(
          transform_input_data, model_preprocess_fn=model.preprocess,
          image_resizer_fn=image_resizer_fn,
          num_classes=config_util.get_number_of_classes(model_config),
          data_augmentation_fn=data_augmentation_fn,
          merge_multiple_boxes=train_config.merge_multiple_label_boxes,
          retain_original_image=train_config.retain_original_images,
          use_multiclass_scores=train_config.use_multiclass_scores,
          use_bfloat16=train_config.use_bfloat16)

      tensor_dict = pad_input_data_to_static_shapes(
          tensor_dict=transform_data_fn(tensor_dict),
          max_num_boxes=train_input_config.max_number_of_boxes,
          num_classes=config_util.get_number_of_classes(model_config),
          spatial_image_shape=config_util.get_spatial_image_size(
              image_resizer_config))
      return (_get_features_dict(tensor_dict), _get_labels_dict(tensor_dict))
Esempio n. 5
0
def export_inference_graph(input_type,
                           pipeline_config,
                           trained_checkpoint_prefix,
                           output_directory,
                           input_shape=None,
                           optimize_graph=True,
                           output_collection_name='inference_op',
                           additional_output_tensor_names=None):
  """Exports inference graph for the model specified in the pipeline config.

  Args:
    input_type: Type of input for the graph. Can be one of [`image_tensor`,
      `tf_example`].
    pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto.
    trained_checkpoint_prefix: Path to the trained checkpoint file.
    output_directory: Path to write outputs.
    input_shape: Sets a fixed shape for an `image_tensor` input. If not
      specified, will default to [None, None, None, 3].
    optimize_graph: Whether to optimize graph using Grappler.
    output_collection_name: Name of collection to add output tensors to.
      If None, does not add output tensors to a collection.
    additional_output_tensor_names: list of additional output
    tensors to include in the frozen graph.
  """
  detection_model = model_builder.build(pipeline_config.model,
                                        is_training=False)
  _export_inference_graph(input_type, detection_model,
                          pipeline_config.eval_config.use_moving_averages,
                          trained_checkpoint_prefix,
                          output_directory, additional_output_tensor_names,
                          input_shape, optimize_graph, output_collection_name)
    def build(cfg):
        task_cfgs = cfg['tasks']
        tasks = []
        for task, task_cfg in task_cfgs.items():
            # depending on the backbone model
            task_cfg['input_dim'] = 2048
            if task == "attribute":
                tasks.append(AttributeBranch.build(task_cfg))
            elif task == "classification":
                task_cfg['num_classes'] = cfg['num_classes']
                tasks.append(ClassificationBranch.build(task_cfg))
            elif task == "pose":
                task_cfg['num_joints'] = cfg['num_joints']
                tasks.append(Pose2DHead.build(task_cfg))
            elif task == 'reid':
                print("created reid")
                tasks.append(BaselineReidBranch.build(task_cfg))
            else:
                raise ValueError("Unknown task: {}".format(task))

        num_seg_classes = cfg['num_seg_classes']
        sem_head = FpnSemHead(num_seg_classes, 256)
        backbone = model_builder.build(cfg['backbone'])
        model = MultiHeadSemMultiTaskNetwork(backbone, sem_head, tasks)
        return model, [], []
Esempio n. 7
0
def main(_):
    output_directory = FLAGS.output_dir
    tf.gfile.MakeDirs(output_directory)
    pipeline_config = pipeline_pb2.PipelineConfig()
    with tf.gfile.GFile(FLAGS.config_path, 'r') as f:
        text_format.Merge(f.read(), pipeline_config)

    pad_to_shape = None
    if FLAGS.input_shape:
        input_shape = [
            int(dim) if dim != '-1' else None
            for dim in FLAGS.input_shape.split(',')
        ]
    else:
        raise ValueError('Must supply `input_shape`')

    if FLAGS.pad_to_shape:
        pad_to_shape = [
            int(dim) if dim != '-1' else None
            for dim in FLAGS.pad_to_shape.split(',')
        ]

    input_images = _get_images_from_path(FLAGS.input_path)
    label_map = (CITYSCAPES_LABEL_IDS
                 if FLAGS.label_ids else CITYSCAPES_LABEL_COLORS)

    num_classes, segmentation_model = model_builder.build(
        pipeline_config.model, is_training=False)

    run_inference_graph(segmentation_model, FLAGS.trained_checkpoint,
                        input_images, input_shape, pad_to_shape, label_map,
                        output_directory)
 def build(cfg):
     backbone = model_builder.build(cfg['backbone'])
     num_classes = cfg['num_seg_classes']
     model = Semantic(backbone, num_classes)
     skips = ["fc"]
     duplicates = []
     return model, skips, duplicates
    def _predict_input_fn(params=None):
        """Decodes serialized tf.Examples and returns `ServingInputReceiver`.

    Args:
      params: Parameter dictionary passed from the estimator.

    Returns:
      `ServingInputReceiver`.
    """
        del params
        example = tf.placeholder(dtype=tf.string,
                                 shape=[],
                                 name='input_feature')

        num_classes = config_util.get_number_of_classes(model_config)
        model = model_builder.build(model_config, is_training=False)
        image_resizer_config = config_util.get_image_resizer_config(
            model_config)
        image_resizer_fn = image_resizer_builder.build(image_resizer_config)

        transform_fn = functools.partial(transform_input_data,
                                         model_preprocess_fn=model.preprocess,
                                         image_resizer_fn=image_resizer_fn,
                                         num_classes=num_classes,
                                         data_augmentation_fn=None)

        decoder = tf_example_decoder.TfExampleDecoder(
            load_instance_masks=False)
        input_dict = transform_fn(decoder.decode(example))
        images = tf.to_float(input_dict[fields.InputDataFields.image])
        images = tf.expand_dims(images, axis=0)

        return tf.estimator.export.ServingInputReceiver(
            features={fields.InputDataFields.image: images},
            receiver_tensors={SERVING_FED_EXAMPLE_KEY: example})
Esempio n. 10
0
def export_inference_graph(input_type,
                           pipeline_config,
                           trained_checkpoint_prefix,
                           output_directory,
                           input_shape=None,
                           output_collection_name='inference_op',
                           additional_output_tensor_names=None,
                           write_inference_graph=False,
                           use_side_inputs=False,
                           side_input_shapes=None,
                           side_input_names=None,
                           side_input_types=None):
    """Exports inference graph for the model specified in the pipeline config.

  Args:
    input_type: Type of input for the graph. Can be one of ['image_tensor',
      'encoded_image_string_tensor', 'tf_example'].
    pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto.
    trained_checkpoint_prefix: Path to the trained checkpoint file.
    output_directory: Path to write outputs.
    input_shape: Sets a fixed shape for an `image_tensor` input. If not
      specified, will default to [None, None, None, 3].
    output_collection_name: Name of collection to add output tensors to.
      If None, does not add output tensors to a collection.
    additional_output_tensor_names: list of additional output
      tensors to include in the frozen graph.
    write_inference_graph: If true, writes inference graph to disk.
    use_side_inputs: If True, the model requires side_inputs.
    side_input_shapes: List of shapes of the side input tensors,
      required if use_side_inputs is True.
    side_input_names: List of names of the side input tensors,
      required if use_side_inputs is True.
    side_input_types: List of types of the side input tensors,
      required if use_side_inputs is True.
  """
    detection_model = model_builder.build(pipeline_config.model,
                                          is_training=False)
    graph_rewriter_fn = None
    if pipeline_config.HasField('graph_rewriter'):
        graph_rewriter_config = pipeline_config.graph_rewriter
        graph_rewriter_fn = graph_rewriter_builder.build(graph_rewriter_config,
                                                         is_training=False)
    _export_inference_graph(
        input_type,
        detection_model,
        pipeline_config.eval_config.use_moving_averages,
        trained_checkpoint_prefix,
        output_directory,
        additional_output_tensor_names,
        input_shape,
        output_collection_name,
        graph_hook_fn=graph_rewriter_fn,
        write_inference_graph=write_inference_graph,
        use_side_inputs=use_side_inputs,
        side_input_shapes=side_input_shapes,
        side_input_names=side_input_names,
        side_input_types=side_input_types)
    pipeline_config.eval_config.use_moving_averages = False
    config_util.save_pipeline_config(pipeline_config, output_directory)
 def build(cfg):
     backbone = model_builder.build(cfg['backbone'])
     num_classes = cfg['num_seg_classes']
     variation = cfg.get('variation', 'v1')
     model = ReidSeg(backbone, num_classes, variation)
     skips = ["fc"]
     duplicates = []
     return model, skips, duplicates
Esempio n. 12
0
 def build(cfg):
     backbone = model_builder.build(cfg['backbone'])
     num_joints = cfg['num_joints']
     split = cfg['split']
     single_head = cfg['single_head']
     model = PoseReid(backbone, split, num_joints, single_head)
     skips = []
     return model, skips, []
Esempio n. 13
0
def test_attribute_evaluation():
    evaluation, model_cfgs = evaluation_builder.build(attribute_evaluation_cfg)
    with torch.no_grad():
        for model_cfg in model_cfgs:
            model = model_builder.build(model_cfg)
            model = DataParallel(model)
            score = evaluate(evaluation, model, delete=True)
            print(score)
Esempio n. 14
0
    def build(cfg):
        num_joints = cfg['num_joints']
        backbone = model_builder.build(cfg['backbone'])

        model = Pose(backbone, num_joints)
        skips = []
        duplicate = []

        return model, skips, duplicate
def construct_graph(pipeline_config, inputs):
    print('construct graph')
    detection_model = model_builder.build(pipeline_config.model,
                                          is_training=False)
    inputs = tf.to_float(inputs)
    preprocessed_inputs = detection_model.preprocess(inputs)
    output_tensors = detection_model.predict(preprocessed_inputs)
    postprocessed_tensors = detection_model.postprocess(output_tensors)
    return output_tensors, postprocessed_tensors
def test_evaluation_cfg():
    with open('./configs/market_evaluate.json') as f:
        cfg = json.load(f)
    cfg = build_config(cfg)
    evaluation_cfg = cfg['evaluation']
    dataloaders, model_cfgs = evaluation_builder.build(evaluation_cfg)
    with torch.no_grad():
        for model_cfg in model_cfgs:
            model = model_builder.build(model_cfg)
Esempio n. 17
0
 def build(cfg):
     num_classes = cfg['num_classes']
     merging_block = merging_block_builder.build(cfg.get('merging_block'))
     backbone = model_builder.build(cfg['backbone'])
     cfg['input_dim'] = 2048
     class_branch = ClassificationBranch.build(cfg)
     model = Classification(backbone, class_branch, num_classes)
     skips = ['fc']
     duplicate = []
     return model, skips, duplicate
Esempio n. 18
0
 def test_create_faster_rcnn_model_from_config_with_example_miner(self):
     model_text_proto = """
   faster_rcnn {
     num_classes: 3
     feature_extractor {
       type: 'faster_rcnn_inception_resnet_v2'
     }
     image_resizer {
       keep_aspect_ratio_resizer {
         min_dimension: 600
         max_dimension: 1024
       }
     }
     first_stage_anchor_generator {
       grid_anchor_generator {
         scales: [0.25, 0.5, 1.0, 2.0]
         aspect_ratios: [0.5, 1.0, 2.0]
         height_stride: 16
         width_stride: 16
       }
     }
     first_stage_box_predictor_conv_hyperparams {
       regularizer {
         l2_regularizer {
         }
       }
       initializer {
         truncated_normal_initializer {
         }
       }
     }
     second_stage_box_predictor {
       mask_rcnn_box_predictor {
         fc_hyperparams {
           op: FC
           regularizer {
             l2_regularizer {
             }
           }
           initializer {
             truncated_normal_initializer {
             }
           }
         }
       }
     }
     hard_example_miner {
       num_hard_examples: 10
       iou_threshold: 0.99
     }
   }"""
     model_proto = model_pb2.DetectionModel()
     text_format.Merge(model_text_proto, model_proto)
     model = model_builder.build(model_proto, is_training=True)
     self.assertIsNotNone(model._hard_example_miner)
Esempio n. 19
0
    def create_model(self, model_config):
        """Builds a DetectionModel based on the model config.

        Args:
          model_config: A model.proto object containing the config for the desired
            DetectionModel.

        Returns:
          DetectionModel based on the config.
        """
        return model_builder.build(model_config, is_training=True)
Esempio n. 20
0
def test_reid_evaluation():
    dataloader = dataloader_builder.build(reid_cfg)
    # restore
    model_cfgs = evaluation_model_builder.build(baseline_model_cfg)

    model = model_builder.build(model_cfgs[0])
    model = DataParallel(model)
    _run = {'config': {'device': torch.device('cuda')}}
    score = evaluate([dataloader], model, _run, "test")

    print(score)
def export_inference_graph(pipeline_config,
                           trained_checkpoint_prefix,
                           output_directory,
                           input_shape=None,
                           pad_to_shape=None,
                           output_colours=False,
                           output_collection_name='predictions'):

    _, segmentation_model = model_builder.build(pipeline_config.model,
                                                is_training=False)

    tf.gfile.MakeDirs(output_directory)
    frozen_graph_path = os.path.join(output_directory,
                                     'frozen_inference_graph.pb')
    eval_graphdef_path = os.path.join(output_directory, 'export_graph.pbtxt')
    saved_model_path = os.path.join(output_directory, 'saved_model')
    model_path = os.path.join(output_directory, 'model.ckpt')

    outputs, placeholder_tensor = deploy_segmentation_inference_graph(
        model=segmentation_model,
        input_shape=input_shape,
        pad_to_shape=pad_to_shape,
        label_color_map=(CITYSCAPES_LABEL_COLORS if output_colours else None),
        output_collection_name=output_collection_name)

    profile_inference_graph(tf.get_default_graph())

    saver = tf.train.Saver()
    input_saver_def = saver.as_saver_def()

    graph_def = tf.get_default_graph().as_graph_def()
    f = tf.gfile.FastGFile(eval_graphdef_path, "w")
    f.write(str(graph_def))

    write_graph_and_checkpoint(
        inference_graph_def=tf.get_default_graph().as_graph_def(),
        model_path=model_path,
        input_saver_def=input_saver_def,
        trained_checkpoint_prefix=trained_checkpoint_prefix)

    output_node_names = outputs.name.split(":")[0]

    freeze_graph_with_def_protos(
        input_graph_def=tf.get_default_graph().as_graph_def(),
        input_saver_def=input_saver_def,
        input_checkpoint=trained_checkpoint_prefix,
        output_graph=frozen_graph_path,
        output_node_names=output_node_names,
        restore_op_name='save/restore_all',
        filename_tensor_name='save/Const:0',
        clear_devices=True,
        initializer_nodes='')

    print("Done!")
Esempio n. 22
0
 def build(cfg):
     backbone = model_builder.build(cfg['backbone'])
     branch_name = cfg.get('branch_type', 'baseline')
     if branch_name == 'baseline':
         reid_branch = BaselineReidBranch.build(cfg)
     elif branch_name == 'split':
         reid_branch = SplitReidBranch.build(cfg)
     else:
         raise ValueError
     model = Baseline(backbone, reid_branch)
     skips = ["fc"]
     return model, skips, []
Esempio n. 23
0
def test_model_builder(model_name):
    model_cfg["name"] = model_name
    model_cfg.update(dataset_info)
    model = model_builder.build(model_cfg)
    if model_name == "trinet":
        assert isinstance(model, TriNet)
    elif model_name == "mgn":
        assert isinstance(model, MGN)
    elif model_name == "classification":
        assert isinstance(model, Classification)
    else:
        raise RuntimeError
Esempio n. 24
0
        def transform_and_pad_input_data_fn(tensor_dict):
            model = model_builder.build(model_config, is_training=False)
            image_resizer_config = config_utils.get_image_resizer_config(model_config)
            image_resizer_fn = image_resizer_builder.build(image_resizer_config)

            transform_data_fn = functools.partial(
                transform_input_data,
                model_preprocess_fn=model.preprocess,
                image_resizer_fn=image_resizer_fn)

            tensor_dict = transform_data_fn(tensor_dict)
            return (_get_features_dict(tensor_dict), _get_label_dict(tensor_dict))
Esempio n. 25
0
def run_experiment(gpus, model_config, data_config, trained_checkpoint,
                   pad_to_shape, processor_type, annot_type, is_debug,
                   **kwargs):
    had_error = None
    try:
        os.environ["CUDA_VISIBLE_DEVICES"] = gpus
        print_buffer = StringIO()
        if not is_debug:
            sys.stdout = print_buffer
            sys.stderr = print_buffer

        pipeline_config = read_config(model_config, data_config)

        if pad_to_shape is not None and isinstance(pad_to_shape, str):
            pad_to_shape = [
                int(dim) if dim != '-1' else None
                for dim in pad_to_shape.split(',')
            ]

        input_reader = pipeline_config.input_reader
        input_reader.shuffle = True
        ignore_label = input_reader.ignore_label

        num_classes, segmentation_model = model_builder.build(
            pipeline_config.model,
            is_training=False,
            ignore_label=ignore_label)
        with tf.device("cpu:0"):
            dataset = dataset_builder.build(input_reader, 1)

        num_gpu = len(gpus.split(","))

        num_examples = sum(
            [r.num_examples for r in input_reader.tf_record_input_reader])

        result = run_inference_graph(segmentation_model, trained_checkpoint,
                                     dataset, num_examples, ignore_label,
                                     pad_to_shape, num_classes, processor_type,
                                     annot_type, num_gpu, **kwargs)
        had_error = False
    except Exception as ex:
        if is_debug:
            raise ex
        print(traceback.format_exc())
        had_error = True
        result = None

    sys.stdout = sys.__stdout__
    sys.stderr = sys.__stderr__
    tf.reset_default_graph()

    return print_buffer, result, had_error
def main(_):
    #test_plots()
    eval_dir = FLAGS.eval_dir
    output_directory = os.path.join(eval_dir, "inf")
    suff = ""
    if FLAGS.global_mean:
        suff = "_G"
    else:
        suff = "_L"
    if FLAGS.global_cov:
        suff += "G"
    else:
        suff += "L"
    dist_dir = os.path.join(eval_dir, "class_dist" + suff)
    min_dir = os.path.join(eval_dir, "min" + suff)
    hist_dir = os.path.join(eval_dir, "hist" + suff)
    dump_dir = os.path.join(eval_dir, "dump" + suff)

    tf.gfile.MakeDirs(output_directory)
    tf.gfile.MakeDirs(min_dir)
    tf.gfile.MakeDirs(dist_dir)
    tf.gfile.MakeDirs(hist_dir)
    pipeline_config = read_config(FLAGS.model_config, FLAGS.data_config)

    pad_to_shape = None
    if FLAGS.input_shape:
        input_shape = [
            int(dim) if dim != '-1' else None
                for dim in FLAGS.input_shape.split(',')]
    else:
        raise ValueError('Must supply `input_shape`')

    if FLAGS.pad_to_shape:
        pad_to_shape = [
            int(dim) if dim != '-1' else None
                for dim in FLAGS.pad_to_shape.split(',')]

    label_map = (CITYSCAPES_LABEL_IDS
        if FLAGS.label_ids else CITYSCAPES_LABEL_COLORS)

    input_reader = pipeline_config.input_reader
    input_reader.shuffle = True
    ignore_label = input_reader.ignore_label

    num_classes, segmentation_model = model_builder.build(
        pipeline_config.model, is_training=False, ignore_label=ignore_label)
    dataset = dataset_builder.build(input_reader, 1)

    run_inference_graph(segmentation_model, FLAGS.trained_checkpoint,
                        dataset, input_reader.num_examples, ignore_label, input_shape, pad_to_shape,
                        label_map, output_directory, num_classes, eval_dir, min_dir, dist_dir, hist_dir,
                        dump_dir)
Esempio n. 27
0
def test_pose_writing():
    pose_dataset = dataset_builder.build(pose_dataset_cfg)
    pose_dataloader = torch.utils.data.DataLoader(pose_dataset,
                                                  batch_size=10,
                                                  num_workers=0)
    model_cfg = evaluation_model_builder.build(pose_model_cfg)
    model = model_builder.build(model_cfg[0])
    model = DataParallel(model)
    filename = "tests/pose.h5"
    if os.path.isfile(filename):
        os.remove(filename)
        print("deleted old {}".format(filename))
    write_to_h5(pose_dataloader, model, "tests/pose.h5", ["pose"])
def test_pose_writing():
    pose_dataset = dataset_builder.build(pose_dataset_cfg)
    pose_dataloader = torch.utils.data.DataLoader(pose_dataset,
                                                  batch_size=10,
                                                  num_workers=0)
    model_cfg = model_cfg_builder.build(pose_model_cfg)[0]
    dataset_info = pose_dataset.info
    new_cfg = dataset_info.copy()
    new_cfg.update(model_cfg)
    print(new_cfg)
    model = model_builder.build(new_cfg)
    model = DataParallel(model)
    output_file = './tests/pose.h5'
    write_to_h5(pose_dataloader, model, output_file, ['emb', 'pose'])
def test_train_cfg(cfg_file):
    with open(cfg_file) as f:
        cfg = json.load(f)

    cfg = build_config(cfg)
    print(cfg)
    train_cfg = cfg['training']
    dataloader_cfg = train_cfg['dataloader']
    model_cfg = train_cfg['model']
    optimizer_cfg = train_cfg['optimizer']
    loss_cfg = train_cfg['losses']
    scheduler_cfg = train_cfg['scheduler']
    device = torch.device('cuda')
    dataloader = dataloader_builder(dataloader_cfg)
    dataset = dataloader.dataset
    model = model_builder.build(model_cfg, dataset.info)

    optimizer = optimizer_builder.build(optimizer_cfg, model.parameters())

    #optimizer = torch.optim.SGD(model.parameters(), lr=eps0, momentum=0.9, weight_decay=5e-4)
    lr_scheduler = scheduler_builder.build(scheduler_cfg, optimizer)
    loss = loss_builder.build(loss_cfg)
    file_logger = log.get_file_logger()
    model = torch.nn.DataParallel(model)
    # new experiment
    model = model.train()
    trained_models = []
    while lr_scheduler.run:
        lr_scheduler.step()
        for batch_id, (data, split_info) in enumerate(dataloader):
            #print(data)
            optimizer.zero_grad()
            data['imgs'] = data['img'].to(device)
            print("imgs", data['imgs'])
            imgs = Variable(data['img'], requires_grad=True)
            endpoints = model(imgs, model.module.endpoints)
            # threoretically losses could also be caluclated distributed.
            losses = loss(endpoints, data, split_info)
            print("losses", losses)
            print(torch.mean(losses))
            loss_mean = torch.mean(losses)
            loss_mean.backward()
            optimizer.step()
        break
    path = file_logger.save_checkpoint(model, optimizer,
                                       lr_scheduler.last_epoch)
    if path:
        trained_models.append(path)
    file_logger.close()
def main(_):
    assert FLAGS.output_dir, '`output_dir` missing.'

    output_directory = FLAGS.output_dir
    tf.gfile.MakeDirs(output_directory)
    pipeline_config = read_config(FLAGS.model_config, FLAGS.data_config)

    pad_to_shape = None
    if FLAGS.input_shape:
        input_shape = [
            int(dim) if dim != '-1' else None
            for dim in FLAGS.input_shape.split(',')
        ]
    else:
        raise ValueError('Must supply `input_shape`')

    patch_size = None
    if FLAGS.patch_size:
        patch_size = [int(dim) for dim in FLAGS.patch_size.split(',')]
        assert len(patch_size) == 2, "patch size must be h,w"

    if FLAGS.pad_to_shape:
        pad_to_shape = [
            int(dim) if dim != '-1' else None
            for dim in FLAGS.pad_to_shape.split(',')
        ]

    label_map = (CITYSCAPES_LABEL_IDS
                 if FLAGS.label_ids else CITYSCAPES_LABEL_COLORS)

    ignore_label = pipeline_config.input_reader.ignore_label

    num_classes, segmentation_model = model_builder.build(
        pipeline_config.model, ignore_label=ignore_label, is_training=False)

    #input_reader = pipeline_config.eval_input_reader
    input_reader = pipeline_config.input_reader
    input_reader.shuffle = False
    input_dict = dataset_builder.build(input_reader, epoch)

    num_examples = sum(
        [r.num_examples for r in input_reader.tf_record_input_reader])
    iters = min(num_examples, FLAGS.max_iters)

    run_inference_graph(segmentation_model, FLAGS.trained_checkpoint,
                        input_dict, iters, input_shape, pad_to_shape,
                        label_map, output_directory, num_classes, patch_size)