コード例 #1
0
def load_model_and_weights(args, mode, appcfg):
    log.debug("---------------------------->")
    from falcon.arch import Model as M

    archcfg = M.get_archcfg(appcfg)
    log.debug("archcfg: {}".format(archcfg))
    cmdcfg = archcfg

    # modelcfg_path = os.path.join(appcfg.PATHS.AI_MODEL_CFG_PATH, cmdcfg.model_info)
    modelcfg_path = apputil.get_abs_path(appcfg, cmdcfg, 'AI_MODEL_CFG_PATH')
    log.info("modelcfg_path: {}".format(modelcfg_path))
    modelcfg = M.get_modelcfg(modelcfg_path)

    log.info("modelcfg: {}".format(modelcfg))

    num_classes_model = apputil.get_num_classes(modelcfg)
    name = modelcfg.name

    cmdcfg.name = name
    cmdcfg.config.NAME = name
    cmdcfg.config.NUM_CLASSES = num_classes_model

    dnnmod = M.get_module(cmdcfg.dnnarch)
    load_model_and_weights = M.get_module_fn(dnnmod, "load_model_and_weights")
    model = load_model_and_weights(args, mode, cmdcfg, modelcfg, appcfg)

    status = True
    return status
コード例 #2
0
def load_model_and_weights(mode, cmdcfg, appcfg):
    """
  Load the model and weights

  Preferences
  Device to load the neural network on.
  Useful if you're training a model on the same
  machine, in which case use CPU and leave the
  GPU for training.
  values: '/cpu:0' or '/gpu:0'
  """
    log.info("load_model_and_weights:----------------------------->")

    device = appcfg['APP']['DEVICE']
    dnncfg = get_dnncfg(cmdcfg['config'])
    log.info("device: {}".format(device))
    log.info("dnncfg: {}".format(dnncfg))

    model = None
    log_dir_path = apputil.get_abs_path(appcfg, cmdcfg, 'AI_LOGS')
    # # with tf.device(device):
    # #   model = modellib.MaskRCNN(mode=mode, config=dnncfg, model_dir=log_dir_path)

    # # weights = cmdcfg['weights']
    # # if weights and weights.lower() == "last":
    # #     weights_path = model.find_last()
    # # else:
    # #   weights_path = cmdcfg['weights_path']

    # # log.debug("Loading weights from weights_path: {}".format(weights_path))

    # # ## TODO: pass this error to the router for API consumption
    # # if not os.path.exists(weights_path) or not os.path.isfile(weights_path):
    # #   raise Exception('weights_path does not exists: {}'.format(weights_path))

    # # if mode == appcfg['APP']['TRAIN_MODE']:
    # #   model.load_weights(weights_path, by_name=cmdcfg['load_weights']['by_name'], exclude=cmdcfg['load_weights']['exclude'])
    # # elif mode == appcfg['APP']['TEST_MODE']:
    # #   model.load_weights(weights_path, by_name=cmdcfg['load_weights']['by_name'])

    # # log.info("Loaded weights successfully from weights_path: {}".format(weights_path))

    ## used earlier for loading the model in the ckpt format
    ## Ref: lanenet-lane-detection/tools/predict.py
    # net = lanenet.LaneNet(phase='test', net_flag='vgg')

    weights_path = cmdcfg['weights_path']
    log.debug("Loading weights from weights_path: {}".format(weights_path))

    graph = load_graph(weights_path)

    return graph
コード例 #3
0
def test_load_model(args, mode, appcfg):
    log.debug("---------------------------->")
    from falcon.arch import Model as M

    archcfg = M.get_archcfg(appcfg)
    log.debug("archcfg: {}".format(archcfg))
    cmdcfg = archcfg

    modelcfg_path = os.path.join(appcfg.PATHS.AI_MODEL_CFG_PATH,
                                 cmdcfg.model_info)
    log.info("modelcfg_path: {}".format(modelcfg_path))
    modelcfg = M.get_modelcfg(modelcfg_path)
    class_names = apputil.get_class_names(modelcfg)
    log.debug("class_names: {}".format(class_names))

    num_classes = len(class_names)
    name = modelcfg.name

    cmdcfg.name = name
    cmdcfg.config.NAME = name
    cmdcfg.config.NUM_CLASSES = num_classes

    dnnmod = M.get_module(cmdcfg.dnnarch)

    weights_path = apputil.get_abs_path(appcfg, modelcfg, 'AI_WEIGHTS_PATH')
    cmdcfg['weights_path'] = weights_path

    load_model_and_weights = M.get_module_fn(dnnmod, "load_model_and_weights")
    model = load_model_and_weights(mode, cmdcfg, appcfg)

    log.debug("model: {}".format(model))

    # modelsummary_path = os.path.join(appcfg.PATHS.AI_LOGS, name+"-summary.txt")
    # log.debug("modelsummary_path: {}".format(modelsummary_path))

    # # msummary = model.keras_model.summary()
    # # log.debug("model.keras_model.summary(): {}".format(msummary))
    # # with open(modelsummary_path, 'w') as fw:
    # #   fw.write(msummary)

    return
コード例 #4
0
def get_dataset_dicts(cfg, class_ids, id_map, imgs, anns, bbox_mode):
    # print(anns)

    ## => quick hack for running detectron2 with gaze data
    ## TODO: Convert to COCO -> if done in pre-processing, this step not required here

    imgs_anns = list(zip(imgs, anns))
    dataset_dicts = []
    extra_annotation_keys = None

    ## TODO: iscrowd is available in attributes as something like group
    ann_keys = ["iscrowd", "bbox", "keypoints", "category_id", "lbl_id"
                ] + (extra_annotation_keys or [])
    num_instances_without_valid_segmentation = 0

    # print("imgs_anns: {}".format(imgs_anns[:2]))

    for (img_dict, anno_dict_list) in imgs_anns:
        # print("img_dict: {}".format(img_dict))
        # print("anno_dict_list: {}".format(len(anno_dict_list)))

        filtered_anns = []
        for key in anno_dict_list:
            if key["ant_type"] == "polygon":
                filtered_anns.append(key)

        # print("img_dict: {}".format(img_dict))
        # print("anno_dict_list: {}".format(len(anno_dict_list)))
        # print("filtered_anns: {}".format(len(filtered_anns)))
        # print("filtered_anns: {}".format(filtered_anns))
        # print("anno_dict_list: {}".format(anno_dict_list))
        if len(filtered_anns) != 0:
            image_path = apputil.get_abs_path(
                cfg, img_dict, 'AI_ANNON_DATA_HOME_LOCAL')  ##image_root
            filepath = os.path.join(image_path, img_dict['filename'])
            record = {}
            record["file_name"] = filepath
            record["height"] = img_dict["height"]
            record["width"] = img_dict["width"]
            image_id = record["image_id"] = img_dict["img_id"]  ## coco: id

            objs = []
            # for anno in anno_dict_list:
            for anno in filtered_anns:
                if anno["ant_type"] == "polygon":
                    assert anno["img_id"] == image_id  ## image_id
                    obj = {key: anno[key] for key in ann_keys if key in anno}
                    ##TODO: convert bbbox to coco format
                    _bbox = obj['bbox']

                    ##TODO: verify what is BoxMode.XYWH_ABS
                    coco_frmt_bbox = [
                        _bbox['xmin'], _bbox['ymin'], _bbox['width'],
                        _bbox['height']
                    ]
                    #print("coco_frmt_bbox: {}".format(coco_frmt_bbox))
                    obj['bbox'] = coco_frmt_bbox
                    ## TODO: get polygon from shape_attributes and convert to coco format
                    #segm = anno.get("segmentation", None)

                    # assert not anno["region_attributes"]
                    # segm=None

                    # if anno["ant_type"]=="polygon":
                    anno = anno["shape_attributes"]
                    # print("anno: {}".format(anno))
                    px = anno["all_points_x"]
                    py = anno["all_points_y"]
                    poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)]
                    poly = [p for x in poly for p in x]

                    segm = [poly]

                    if segm:  # either list[list[float]] or dict(RLE)
                        if not isinstance(segm, dict):
                            # filter out invalid polygons (< 3 points)
                            segm = [
                                poly for poly in segm
                                if len(poly) % 2 == 0 and len(poly) >= 6
                            ]
                            if len(segm) == 0:
                                num_instances_without_valid_segmentation += 1
                                continue  # ignore this instance
                            obj["segmentation"] = segm

                    obj["bbox_mode"] = bbox_mode
                    obj["category_id"] = id_map[obj["lbl_id"]]  ## category_id
                    objs.append(obj)

            record["annotations"] = objs
            dataset_dicts.append(record)

    return dataset_dicts
コード例 #5
0
def evaluate(args, mode, appcfg):
  """prepare the report configuration like paths, report names etc. and calls the report generation function
  """
  log.debug("evaluate---------------------------->")

  subset = args.eval_on
  iou_threshold = args.iou
  log.debug("subset: {}".format(subset))
  log.debug("iou_threshold: {}".format(iou_threshold))
  get_mask = True
  auto_show = False

  datacfg = apputil.get_datacfg(appcfg)
  dbcfg = apputil.get_dbcfg(appcfg)

  log.debug("appcfg: {}".format(appcfg))
  log.debug("datacfg: {}".format(datacfg))
  
  dataset, num_classes, num_images, class_names, total_stats, total_verify = apputil.get_dataset_instance(appcfg, dbcfg, datacfg, subset)
  colors = viz.random_colors(len(class_names))

  log.debug("-------")
  log.debug("len(colors), colors: {},{}".format(len(colors), colors))

  log.debug("class_names: {}".format(class_names))
  log.debug("len(class_names): {}".format(len(class_names)))
  log.debug("num_classes: {}".format(num_classes))
  log.debug("num_images: {}".format(num_images))

  log.debug("len(dataset.image_info): {}".format(len(dataset.image_info)))
  log.debug("len(dataset.image_ids): {}".format(len(dataset.image_ids)))
  # log.debug("dataset: {}".format(vars(dataset)))

  log.debug("-------")
  
  # log.debug("TODO: color: cc")
  # cc = dict(zip(class_names,colors))

  name = dataset.name
  datacfg.name = name
  datacfg.classes = class_names
  datacfg.num_classes = num_classes
  
  archcfg = apputil.get_archcfg(appcfg)
  log.debug("archcfg: {}".format(archcfg))
  cmdcfg = archcfg

  if 'save_viz_and_json' not in cmdcfg:
    cmdcfg.save_viz_and_json = False
  
  save_viz = args.save_viz
  log.debug("save_viz: {}".format(save_viz))
  cmdcfg.save_viz_and_json = save_viz

  modelcfg_path = os.path.join(appcfg.PATHS.AI_MODEL_CFG_PATH, cmdcfg.model_info)
  log.info("modelcfg_path: {}".format(modelcfg_path))
  modelcfg = apputil.get_modelcfg(modelcfg_path)

  ## for prediction, get the label information from the model information
  class_names_model = apputil.get_class_names(modelcfg)
  log.debug("class_names_model: {}".format(class_names_model))

  cmdcfg.name = name
  cmdcfg.config.NAME = modelcfg.name
  cmdcfg.config.NUM_CLASSES = len(class_names_model)

  # class_names = apputil.get_class_names(datacfg)
  # log.debug("class_names: {}".format(class_names))
  weights_path = apputil.get_abs_path(appcfg, modelcfg, 'AI_WEIGHTS_PATH')
  cmdcfg['weights_path'] = weights_path

  ## Prepare directory structure and filenames for reporting the evluation results
  now = datetime.datetime.now()
  ## create log directory based on timestamp for evaluation reporting
  timestamp = "{:%d%m%y_%H%M%S}".format(now)
  datacfg_ts = datacfg.timestamp if 'TIMESTAMP' in datacfg else timestamp

  save_viz_and_json = cmdcfg.save_viz_and_json
  # iou_threshold = cmdcfg.iou_threshold
  if 'evaluate_no_of_result' not in cmdcfg:
    evaluate_no_of_result = -1
  else:
    evaluate_no_of_result = cmdcfg.evaluate_no_of_result


  def clean_iou(iou):
    return str("{:f}".format(iou)).replace('.','')[:3]

  path = appcfg['PATHS']['AI_LOGS']
  # evaluate_dir = datacfg_ts+"-evaluate_"+clean_iou(iou_threshold)+"-"+name+"-"+subset+"-"+timestamp
  evaluate_dir = "evaluate_"+clean_iou(iou_threshold)+"-"+name+"-"+subset+"-"+timestamp
  filepath = os.path.join(path, cmdcfg.dnnarch, evaluate_dir)
  log.debug("filepath: {}".format(filepath))

  common.mkdir_p(filepath)
  for d in ['splash', 'mask', 'annotations', 'viz']:
    common.mkdir_p(os.path.join(filepath,d))

  ## gt - ground truth
  ## pr/pred - prediction

  def get_cfgfilename(cfg_filepath):
    return cfg_filepath.split(os.path.sep)[-1]

  ## generate the summary on the evaluation run
  evaluate_run_summary = defaultdict(list)
  evaluate_run_summary['name'] =name
  evaluate_run_summary['execution_start_time'] = timestamp
  evaluate_run_summary['subset'] = subset
  evaluate_run_summary['total_labels'] = num_classes
  evaluate_run_summary['total_images'] = num_images
  evaluate_run_summary['evaluate_no_of_result'] = evaluate_no_of_result
  evaluate_run_summary['evaluate_dir'] = evaluate_dir
  evaluate_run_summary['dataset'] = get_cfgfilename(appcfg.DATASET[appcfg.ACTIVE.DATASET].cfg_file)
  evaluate_run_summary['arch'] = get_cfgfilename(appcfg.ARCH[appcfg.ACTIVE.ARCH].cfg_file)
  evaluate_run_summary['model'] = cmdcfg['model_info']

  ## classification report and confusion matrix - json and csv
  ## generate the filenames for what reports to be generated
  reportcfg = {
    'filepath':filepath
    ,'evaluate_run_summary_reportfile':os.path.join(filepath, "evaluate_run_summary_rpt-"+subset)
    ,'classification_reportfile':os.path.join(filepath, "classification_rpt-"+subset)
    ,'confusionmatrix_reportfile':os.path.join(filepath, "confusionmatrix_rpt-"+subset)
    ,'iou_threshold':iou_threshold
    ,'evaluate_run_summary':evaluate_run_summary
    ,'save_viz_and_json':save_viz_and_json
    ,'evaluate_no_of_result':evaluate_no_of_result
  }

  log.debug("reportcfg: {}".format(reportcfg))

  dnnmod = apputil.get_module(cmdcfg.dnnarch)

  fn_evaluate = apputil.get_module_fn(dnnmod, "evaluate")

  evaluate_run_summary = fn_evaluate(mode, cmdcfg, appcfg, modelcfg, dataset, datacfg, class_names, reportcfg, get_mask)

  return evaluate_run_summary
コード例 #6
0
def predict(args, mode, appcfg):
  """Executes the prediction and stores the generated results
  TODO:
  1. create the prediction configuration 
  2. PDB specification
  """

  log.debug("predict---------------------------->")

  archcfg = apputil.get_archcfg(appcfg)
  log.debug("cmdcfg/archcfg: {}".format(archcfg))
  cmdcfg = archcfg

  if 'save_viz_and_json' not in cmdcfg:
    cmdcfg.save_viz_and_json = False
  
  save_viz = args.save_viz
  show_bbox = args.show_bbox
  log.debug("save_viz: {}".format(save_viz))
  cmdcfg.save_viz_and_json = save_viz

  modelcfg_path = os.path.join(appcfg.PATHS.AI_MODEL_CFG_PATH, cmdcfg.model_info)
  log.info("modelcfg_path: {}".format(modelcfg_path))
  modelcfg = apputil.get_modelcfg(modelcfg_path)

  log.debug("modelcfg: {}".format(modelcfg))
  api_model_key = apputil.get_api_model_key(modelcfg)
  log.debug("api_model_key: {}".format(api_model_key))
  ## for prediction, get the label information from the model information
  class_names = apputil.get_class_names(modelcfg)
  log.debug("class_names: {}".format(class_names))

  num_classes = len(class_names)
  name = modelcfg.name

  cmdcfg.name = name
  cmdcfg.config.NAME = name
  cmdcfg.config.NUM_CLASSES = num_classes

  dnnmod = apputil.get_module(cmdcfg.dnnarch)

  ## todo: hard-coding clear up
  cmdcfg['log_dir'] = 'predict'
  log_dir_path = apputil.get_abs_path(appcfg, cmdcfg, 'AI_LOGS')
  cmdcfg['log_dir_path'] = log_dir_path

  weights_path = apputil.get_abs_path(appcfg, modelcfg, 'AI_WEIGHTS_PATH')
  cmdcfg['weights_path'] = weights_path

  load_model_and_weights = apputil.get_module_fn(dnnmod, "load_model_and_weights")
  model = load_model_and_weights(mode, cmdcfg, appcfg)

  path_dtls = apputil.get_path_dtls(args, appcfg)
  log.debug("path_dtls: {}".format(path_dtls))
  for t in ["images", "videos"]:
    if path_dtls[t] and len(path_dtls[t]) > 0:
      fname = "detect_from_"+t
      log.info("fname: {}".format(fname))
      fn = getattr(this, fname)
      if fn:
        file_names, res = fn(appcfg, dnnmod, path_dtls[t], path_dtls['path'], model, class_names, cmdcfg, api_model_key, show_bbox)
        # log.debug("len(file_names), file_names: {}, {}".format(len(file_names), file_names))
      else:
        log.error("Unkown fn: {}".format(fname))
  
  # return file_names, res
  return
コード例 #7
0
def train(args, mode, appcfg):
  log.debug("train---------------------------->")

  datacfg = apputil.get_datacfg(appcfg)

  ## Training dataset.
  subset = "train"
  log.info("subset: {}".format(subset))
  dbcfg = apputil.get_dbcfg(appcfg)

  dataset_train, num_classes_train, num_images_train, class_names_train, total_stats_train, total_verify_train = apputil.get_dataset_instance(appcfg, dbcfg, datacfg, subset)
  colors = viz.random_colors(len(class_names_train))
  
  log.info("-------")
  log.info("len(colors), colors: {},{}".format(len(colors), colors))

  log.info("subset, class_names_train: {}, {}".format(subset, class_names_train))
  log.info("subset, len(class_names_train): {}, {}".format(subset, len(class_names_train)))
  log.info("subset, num_classes_train: {}, {}".format(subset, num_classes_train))
  log.info("subset, num_images_train: {}, {}".format(subset, num_images_train))

  log.info("subset, len(dataset_train.image_info): {}, {}".format(subset, len(dataset_train.image_info)))
  log.info("subset, len(dataset_train.image_ids): {}, {}".format(subset, len(dataset_train.image_ids)))

  ## Validation dataset
  subset = "val"
  log.info("subset: {}".format(subset))
  dataset_val, num_classes_val, num_images_val, class_names_val, total_stats_val, total_verify_val = apputil.get_dataset_instance(appcfg, dbcfg, datacfg, subset)
  
  log.info("-------")
  log.info("subset, class_names_val: {}, {}".format(subset, class_names_val))
  log.info("subset, len(class_names_val): {}, {}".format(subset, len(class_names_val)))
  log.info("subset, num_classes_val: {}, {}".format(subset, num_classes_val))
  log.info("subset, num_images_val: {}, {}".format(subset, num_images_val))
  
  log.info("subset, len(dataset_val.image_info): {}, {}".format(subset, len(dataset_val.image_info)))
  log.info("subset, len(dataset_val.image_ids): {}, {}".format(subset, len(dataset_val.image_ids)))

  log.info("-------")

  ## Ensure label sequence and class_names of train and val dataset are excatly same, if not abort training
  assert class_names_train == class_names_val

  archcfg = apputil.get_archcfg(appcfg)
  log.debug("archcfg: {}".format(archcfg))
  cmdcfg = archcfg

  name = dataset_train.name

  ## generate the modelinfo template to be used for evaluate and prediction
  modelinfocfg = {
    'classes': class_names_train.copy()
    ,'classinfo': None
    ,'config': cmdcfg.config.copy()
    ,'dataset': cmdcfg.dbname
    ,'dbname': cmdcfg.dbname
    ,'dnnarch': cmdcfg.dnnarch
    ,'framework_type': cmdcfg.framework_type
    ,'id': None
    ,'load_weights': cmdcfg.load_weights.copy()
    ,'name': name
    ,'num_classes': num_classes_train
    ,'problem_id': None
    ,'rel_num': None
    ,'weights': None
    ,'weights_path': None
    ,'log_dir': None
    ,'checkpoint_path': None
    ,'model_info': None
    ,'timestamp': None
    ,'creator': None
  }

  datacfg.name = name
  datacfg.classes = class_names_train
  datacfg.num_classes = num_classes_train

  cmdcfg.name = name
  cmdcfg.config.NAME = name
  cmdcfg.config.NUM_CLASSES = num_classes_train

  modelcfg_path = os.path.join(appcfg.PATHS.AI_MODEL_CFG_PATH, cmdcfg.model_info)
  log.info("modelcfg_path: {}".format(modelcfg_path))
  modelcfg = apputil.get_modelcfg(modelcfg_path)

  log_dir_path = apputil.get_abs_path(appcfg, cmdcfg, 'AI_LOGS')
  cmdcfg['log_dir_path'] = log_dir_path

  weights_path = apputil.get_abs_path(appcfg, modelcfg, 'AI_WEIGHTS_PATH')
  cmdcfg['weights_path'] = weights_path

  dnnmod = apputil.get_module(cmdcfg.dnnarch)
  load_model_and_weights = apputil.get_module_fn(dnnmod, "load_model_and_weights")
  model = load_model_and_weights(mode, cmdcfg, appcfg)  
  
  modelinfocfg['log_dir'] = model.log_dir
  modelinfocfg['checkpoint_path'] = model.checkpoint_path

  if 'creator' in cmdcfg:
    modelinfocfg['creator'] = cmdcfg['creator']

  log.info("modelinfocfg: {}".format(modelinfocfg))

  fn_create_modelinfo = apputil.get_module_fn(dnnmod, "create_modelinfo")
  modelinfo = fn_create_modelinfo(modelinfocfg)
  
  create_modelinfo = args.create_modelinfo
  try:
    if not create_modelinfo:
      log.info("Training...")
      fn_train = apputil.get_module_fn(dnnmod, "train")
      fn_train(model, dataset_train, dataset_val, cmdcfg)
      log.info("Training Completed!!!")
  finally:
    ## save modelinfo
    ## popolate the relative weights_path of the last model from the training if any model is generated otherwise None

    logs_path = appcfg['PATHS']['AI_LOGS']
    dnn = cmdcfg.dnnarch
  
    ##TODO

    list_of_files = glob.glob(os.path.join(model.log_dir,dnn+'*')) # * means all if need specific format then *.h5
    latest_file = max(list_of_files, key=os.path.getctime)
    new_weights_path = re.sub('\{}'.format(logs_path+'/'), '', latest_file)

    modelinfo['weights_path'] = new_weights_path

    modelinfo_filepath = apputil.get_abs_path(appcfg, modelinfo, 'AI_MODEL_CFG_PATH')
    common.yaml_safe_dump(modelinfo_filepath, modelinfo)
    log.info("TRAIN:MODELINFO_FILEPATH: {}".format(modelinfo_filepath))
    log.info("---x--x--x---")

  return modelinfo_filepath
コード例 #8
0
def get_modelcfg(cfg, api_model_key=''):
    """
  TODO
  - parameter checks as api_model_key is the client input
  - generic checks for SQLInjection
  - handle multiple concurrent connections
  """
    log.info("----------------------------->")
    log.info("api_model_key: {}".format(api_model_key))

    API_MODELINFO_TABEL = cfg['APP']['API_MODELINFO_TABEL']

    query = {}
    modelcfg = None
    log_dir = 'api'

    if api_model_key:
        log_dir = os.path.join(log_dir, api_model_key)

        model_pkey = api_model_key.split('-')
        log.info("model_pkey: {}".format(model_pkey))

        if len(model_pkey) > 0 and model_pkey[0]:
            query['org_name'] = model_pkey[0]

        if len(model_pkey) > 1 and model_pkey[1]:
            query['problem_id'] = model_pkey[1]

        ## TODO: release number change from integer to string
        if len(model_pkey) > 2 and model_pkey[2]:
            # query['rel_num'] = int(model_pkey[2])
            query['rel_num'] = model_pkey[2]

    DBCFG = cfg['APP']['DBCFG']
    OASISCFG = DBCFG['OASISCFG']
    log.debug("OASISCFG: {}".format(OASISCFG))
    mclient = MongoClient('mongodb://' + OASISCFG['host'] + ':' +
                          str(OASISCFG['port']))
    dbname = OASISCFG['dbname']
    db = mclient[dbname]

    log.info("dbname, query: {}, {}".format(dbname, query))
    modelinfo = db.get_collection(API_MODELINFO_TABEL)

    if len(query) == 3:
        modelcfg = modelinfo.find_one(query, {'_id': 0})
        if modelcfg:
            modelcfg['log_dir'] = log_dir

            log_dir_path = apputil.get_abs_path(cfg, modelcfg, 'AI_LOGS')
            weights_path = apputil.get_abs_path(cfg, modelcfg,
                                                'AI_WEIGHTS_PATH')

            modelcfg['log_dir_path'] = log_dir_path
            modelcfg['weights_path'] = weights_path
    else:
        modelcfg = list(modelinfo.find(query, {'_id': 0}))

    log.info("modelcfg: {}".format(modelcfg))

    mclient.close()

    if type(modelcfg) != type([]):
        ## quick fix for name not present in config
        if modelcfg and modelcfg['config']:
            if 'name' in modelcfg:
                modelcfg['config']['NAME'] = modelcfg['name']
            if 'num_classes' in modelcfg:
                modelcfg['config']['NUM_CLASSES'] = modelcfg['num_classes']

    return modelcfg
コード例 #9
0
def get_dataset_dicts(cfg, class_ids, id_map, imgs, anns, bbox_mode, config):
    # print(anns)

    ## => quick hack for running detectron2 with gaze data

    imgs_anns = list(zip(imgs, anns))
    dataset_dicts = []
    extra_annotation_keys = None

    ann_keys = ["iscrowd", "bbox", "keypoints", "category_id", "lbl_id"
                ] + (extra_annotation_keys or [])
    num_instances_without_valid_segmentation = 0

    # log.debug("imgs_anns: {}".format(imgs_anns[:2]))

    for (img_dict, anno_dict_list) in imgs_anns:
        # log.debug("img_dict: {}".format(img_dict))
        # log.debug("anno_dict_list: {}".format(len(anno_dict_list)))

        if config.MODEL.MASK_ON == True:

            filtered_anns = []
            for key in anno_dict_list:
                if key["ant_type"] == "polygon":
                    filtered_anns.append(key)

            # log.debug("img_dict: {}".format(img_dict))
            # log.debug("anno_dict_list: {}".format(len(anno_dict_list)))
            # log.debug("filtered_anns: {}".format(len(filtered_anns)))
            # log.debug("filtered_anns: {}".format(filtered_anns))
            # log.debug("anno_dict_list: {}".format(anno_dict_list))

            if len(filtered_anns) != 0:
                image_path = apputil.get_abs_path(
                    cfg, img_dict, 'AI_ANNON_DATA_HOME_LOCAL')  ##image_root
                filepath = os.path.join(image_path, img_dict['filename'])
                record = {}
                record["file_name"] = filepath
                record["image_name"] = img_dict['filename']
                # record["file_path"] = filepath
                record["height"] = img_dict["height"]
                record["width"] = img_dict["width"]
                image_id = record["image_id"] = img_dict["img_id"]  ## coco: id

                objs = []
                # for anno in anno_dict_list:
                for anno in filtered_anns:
                    assert anno["img_id"] == image_id  ## image_id
                    obj = {key: anno[key] for key in ann_keys if key in anno}
                    _bbox = obj['bbox']

                    coco_frmt_bbox = [
                        _bbox['xmin'], _bbox['ymin'], _bbox['width'],
                        _bbox['height']
                    ]

                    # log.debug("coco_frmt_bbox: {}".format(coco_frmt_bbox))

                    obj['bbox'] = coco_frmt_bbox
                    anno = anno["shape_attributes"]

                    # log.debug("anno: {}".format(anno))

                    px = anno["all_points_x"]
                    py = anno["all_points_y"]
                    poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)]
                    poly = [p for x in poly for p in x]
                    segm = [poly]
                    segm = [
                        poly for poly in segm
                        if len(poly) % 2 == 0 and len(poly) >= 6
                    ]
                    obj["segmentation"] = segm
                    obj["bbox_mode"] = bbox_mode
                    obj["category_id"] = id_map[obj["lbl_id"]]  ## category_id
                    objs.append(obj)

                record["annotations"] = objs
                dataset_dicts.append(record)

        elif config.MODEL.MASK_ON == False:

            image_path = apputil.get_abs_path(
                cfg, img_dict, 'AI_ANNON_DATA_HOME_LOCAL')  ##image_root
            filepath = os.path.join(image_path, img_dict['filename'])
            record = {}
            record["file_name"] = filepath
            record["image_name"] = img_dict['filename']
            record["height"] = img_dict["height"]
            record["width"] = img_dict["width"]
            image_id = record["image_id"] = img_dict["img_id"]  ## coco: id

            objs = []
            for anno in anno_dict_list:
                assert anno["img_id"] == image_id  ## image_id
                obj = {key: anno[key] for key in ann_keys if key in anno}
                _bbox = obj['bbox']
                coco_frmt_bbox = [
                    _bbox['xmin'], _bbox['ymin'], _bbox['width'],
                    _bbox['height']
                ]

                # log.debug("coco_frmt_bbox: {}".format(coco_frmt_bbox))

                obj['bbox'] = coco_frmt_bbox
                segm = None
                obj["bbox_mode"] = bbox_mode
                obj["category_id"] = id_map[obj["lbl_id"]]  ## category_id
                # obj["segmentation"] = segm
                objs.append(obj)

            record["annotations"] = objs
            dataset_dicts.append(record)

    return dataset_dicts
コード例 #10
0
    def load_hmd(self, appcfg, dbcfg, datacfg, subset):
        """
    - assume default file extension is json
    TODO:
    - csv file loading
    """
        log.info("-------------------------------->")
        log.debug("datacfg: {}".format(datacfg))

        class_ids = datacfg.class_ids if 'class_ids' in datacfg and datacfg[
            'class_ids'] else []
        annon_type = datacfg.annon_type
        name = datacfg.name

        # class_map = datacfg.class_map if datacfg.class_map else None

        annon = self.annon = ANNON(dbcfg, datacfg, subset=subset)

        class_ids = annon.getCatIds(catIds=class_ids)
        image_ids = annon.getImgIds(catIds=class_ids)

        # log.debug("subset, image_ids: {}, {}".format(subset, image_ids))
        log.debug("subset, class_ids: {}, {}".format(subset, class_ids))

        ## Add images
        total_annotation = 0
        total_maskarea = 0
        total_bboxarea = 0
        data_read_threshold = datacfg.data_read_threshold if 'data_read_threshold' in datacfg else -1
        log.debug("data_read_threshold: {}".format(data_read_threshold))

        images = annon.loadImgs(ids=image_ids)
        for i, img in enumerate(images):
            if data_read_threshold == i:
                log.info("Threshold reached: i: {}".format(i))
                break

            # log.debug("img: {}".format(img))
            image_path = apputil.get_abs_path(appcfg, img,
                                              'AI_ANNON_DATA_HOME_LOCAL')
            filepath = os.path.join(image_path, img['filename'])
            # log.debug("filepath: {}".format(filepath))
            ## TBD: width and height from a['file_attributes']['width'], a['file_attributes']['height']
            if os.path.exists(filepath):
                try:
                    ##log.info("Image file: {}".format(filepath))
                    if 'height' not in img or 'width' not in img:
                        im = skimage.io.imread(filepath)
                        height, width = im.shape[:2]
                    else:
                        height, width = img['height'], img['width']
                    # width = annon.imgs[i]["width"]
                    # height = annon.imgs[i]["height"]
                    img_id = img['img_id']

                    annotations = annon.loadAnns(
                        annon.getAnnIds(imgIds=[img_id], catIds=class_ids))
                    total_annotation += len(annotations)

                    self.add_image(name,
                                   image_id=name + '-' + str(img_id),
                                   path=filepath,
                                   width=width,
                                   height=height,
                                   annon_type=annon_type,
                                   annotations=annotations)
                except:
                    log.info(
                        "Error Reading file or adding annotation: {}".format(
                            filepath))
                    log.error("Exception occurred", exc_info=True)
            else:
                log.info("file does not exists: {}".format(filepath))

        total_img = len(image_ids)
        total_classes = len(class_ids)

        classinfo = annon.loadCats(ids=class_ids)

        for index, ci in enumerate(classinfo):
            class_idx = index + 1
            class_source, class_lbl_id, class_name = ci['source'], ci[
                'lbl_id'], ci['name']
            log.info(
                "Adding: class_source, class_lbl_id, class_name, class_dx: {}, {}, {}"
                .format(class_source, class_lbl_id, class_name, class_idx))
            self.add_class(source=class_source,
                           idx=class_idx,
                           class_name=class_name,
                           lbl_id=class_lbl_id,
                           color=None)

        log.info("Total Images: {}".format(total_img))
        log.info("Total Annotations: {}".format(total_annotation))
        log.info("Total Classes without BG: {}".format(total_classes))
        log.info("Total Classes including BG: {}".format(len(self.classinfo)))
        log.info("Classinfo: {}".format(self.classinfo))
        log.info("-------")

        return total_img, total_annotation, total_classes, annon
コード例 #11
0
 def get_image_path(self, img):
     image_path = apputil.get_abs_path(self.appcfg, img,
                                       'AI_ANNON_DATA_HOME_LOCAL')
     log.debug("image_path: {}".format(image_path))
     return image_path
コード例 #12
0
def evaluate(mode,
             cmdcfg,
             appcfg,
             modelcfg,
             dataset,
             datacfg,
             class_names,
             reportcfg,
             get_mask=True,
             auto_show=False):
    """API
  Execute the evaluation and generates the evaluation reports classification report
  with differet scores confusion matrix summary report

  Ref:
  https://github.com/matterport/Mask_RCNN/blob/master/samples/shapes/train_shapes.ipynb
  """
    log.info("---------------------------->")

    dnncfg = get_dnncfg(cmdcfg.config)

    log_dir_path = apputil.get_abs_path(appcfg, cmdcfg, 'AI_LOGS')
    cmdcfg['log_dir_path'] = log_dir_path
    model = load_model_and_weights(mode, cmdcfg, appcfg)

    save_viz_and_json = reportcfg['save_viz_and_json']
    evaluate_no_of_result = reportcfg['evaluate_no_of_result']
    filepath = reportcfg['filepath']
    evaluate_run_summary = reportcfg['evaluate_run_summary']

    log.info("evaluate_no_of_result: {}".format(evaluate_no_of_result))

    detection_on_dataset = []
    ## TODO: put at right place
    iou_threshold_input = reportcfg['iou_threshold']
    # iou_thresholds = None
    # iou_thresholds = iou_thresholds or np.arange(0.5, 1.0, 0.05)
    iou_thresholds = np.arange(0.5, 1.0, 0.05)
    size = len(iou_thresholds)
    _mAPs, _precisions, _recalls = size * [None], size * [None], size * [None]
    gt_total_annotation = 0
    pred_total_annotation = 0
    remaining_num_images = -1
    image_ids = dataset.image_ids
    num_images = len(image_ids)
    pred_match_total_annotation = []

    colors = viz.random_colors(len(class_names))
    log.info("len(colors), colors: {},{}".format(len(colors), colors))

    cc = dict(zip(class_names, colors))

    ## for some reasons if gets an error in iterating through dataset, all the hardwork is lost,
    ## therefore, save the data to disk in finally clause
    via_jsonres = {}
    imagelist = []

    class_ids_dataset = dataset.class_ids
    class_names_model = modelcfg['classes']
    class_ids_model = np.arange(len(class_names_model))

    log.debug("class_names dataset: {}".format(class_names))
    log.debug("class_names_model: {}".format(class_names_model))
    log.debug("class_ids_dataset: {}".format(class_ids_dataset))
    log.debug("class_ids_model: {}".format(class_ids_model))

    ## class_names consists of BG at index 0
    ## class_names_model consists of BG at index 0
    class_ids_map = False
    class_names_common = class_names.copy()
    if class_names != modelcfg['classes']:
        class_ids_map = True
        class_names_common, class_ids_common_dataset, class_ids_common_model, gt_to_model_map = class_ids_of_model_to_dataset(
            np.array(class_names), class_ids_dataset,
            np.array(class_names_model), class_ids_model)

        log.info("class_names_common: {}".format(class_names_common))
        log.info(
            "class_ids_common_dataset: {}".format(class_ids_common_dataset))
        log.info("class_ids_common_model: {}".format(class_ids_common_model))
        ## TODO: Exception handling: if class_ids_of_model_to_dataset length is 1 then only BG is common

    class_names = np.array(class_names)

    T0 = time.time()

    try:
        for i, image_id in enumerate(image_ids):
            log.debug("-------")

            filepath_image_in = dataset.image_reference(image_id)
            image_filename = filepath_image_in.split(os.path.sep)[-1]
            image_name_without_ext = image_filename.split('.')[0]

            imagelist.append(filepath_image_in)
            log.debug("Running on {}".format(image_filename))

            if evaluate_no_of_result == i:
                log.info("evaluate_no_of_result reached: i: {}\n".format(i))
                break

            remaining_num_images = evaluate_no_of_result if evaluate_no_of_result and evaluate_no_of_result > 0 else num_images
            remaining_num_images = remaining_num_images - i - 1
            log.info(
                "To be evaluated remaining_num_images:...................{}".
                format(remaining_num_images))

            t0 = time.time()

            # im, gt_class_ids, gt_boxes, gt_masks, gt_active_class_ids = load_image_gt_without_resizing(dataset, datacfg, dnncfg, image_id)
            im, gt_image_meta, gt_class_ids, gt_boxes, gt_masks = modellib.load_image_gt(
                dataset, datacfg, dnncfg, image_id, use_mini_mask=False)
            molded_images = np.expand_dims(modellib.mold_image(im, dnncfg), 0)

            if class_ids_map:
                log.debug("Before gt_class_id_map...:")
                log.debug(
                    "len(gt_class_ids): {}\nTotal Unique classes: len(set(gt_class_ids)): {}\ngt_class_ids: {}"
                    .format(len(gt_class_ids), len(set(gt_class_ids)),
                            gt_class_ids))

                for _i, gt_id in enumerate(gt_class_ids):
                    gt_class_ids[_i] = gt_to_model_map[gt_id]

                log.debug("After gt_class_id_map...:")
                log.debug(
                    "len(gt_class_ids): {}\nTotal Unique classes: len(set(gt_class_ids)): {}\ngt_class_ids: {}"
                    .format(len(gt_class_ids), len(set(gt_class_ids)),
                            gt_class_ids))

            t1 = time.time()
            time_taken_imread = (t1 - t0)
            log.debug('Total time taken in time_taken_imread: %f seconds' %
                      (time_taken_imread))

            gt_total_annotation += len(gt_class_ids)

            log.info("\nGround Truth-------->")

            log.info("i,image_id:{},{}".format(i, image_id))

            log.debug(
                "len(gt_boxes), gt_boxes.shape, type(gt_boxes): {},{},{}".
                format(len(gt_boxes), gt_boxes.shape, type(gt_boxes)))
            log.debug(
                "len(gt_masks), gt_masks.shape, type(gt_masks): {},{},{}".
                format(len(gt_masks), gt_masks.shape, type(gt_masks)))

            log.debug("gt_boxes: {}".format(gt_boxes))
            log.debug("gt_masks: {}".format(gt_masks))

            log.info("--------")

            # Detect objects
            ##---------------------------------------------
            t2 = time.time()

            r = detect(model, im=im, verbose=1)[0]
            ## N - total number of predictions
            ## (N, 4)
            pred_boxes = r['rois']
            ## (H, W, N)
            pred_masks = r['masks']
            ## N
            pred_class_ids = r['class_ids']
            ## N
            pred_scores = r['scores']

            log.debug("Prediction on Groud Truth-------->")
            log.debug('len(r): {}'.format(len(r)))
            log.debug("len(gt_class_ids), gt_class_ids: {},{}".format(
                len(gt_class_ids), gt_class_ids))
            log.debug(
                "len(pred_class_ids), pred_class_ids, type(pred_class_ids): {},{},{}"
                .format(len(pred_class_ids), pred_class_ids,
                        type(pred_class_ids)))
            log.debug("len(pred_scores), pred_scores: {},{}".format(
                len(pred_scores), pred_scores))
            log.debug(
                "len(pred_boxes), pred_boxes.shape, type(pred_boxes): {},{},{}"
                .format(len(pred_boxes), pred_boxes.shape, type(pred_boxes)))
            log.debug(
                "len(pred_masks), pred_masks.shape, type(pred_masks): {},{},{}"
                .format(len(pred_masks), pred_masks.shape, type(pred_masks)))
            log.debug("--------")

            if class_ids_map:
                pred_class_ids_common_model_indices = np.where(
                    np.in1d(pred_class_ids, class_ids_common_model))[0]
                class_ids_common_model_pred_class_ids_indices = np.where(
                    np.in1d(class_ids_common_model, pred_class_ids))[0]
                # pred_class_ids_common_dataset_indices = np.where(np.in1d(class_ids_common_dataset, pred_class_ids_common_model_indices))[0]

                pred_boxes = pred_boxes[pred_class_ids_common_model_indices]
                pred_masks = pred_masks[...,
                                        pred_class_ids_common_model_indices]
                pred_class_ids = pred_class_ids[
                    pred_class_ids_common_model_indices]

                pred_scores = pred_scores[pred_class_ids_common_model_indices]

                log.debug(
                    "Prediction on Groud Truth: After Model class filtering-------->"
                )
                log.debug('len(r): {}'.format(len(r)))
                log.debug(
                    "len(pred_class_ids_common_model_indices), pred_class_ids_common_model_indices: {},{}"
                    .format(len(pred_class_ids_common_model_indices),
                            pred_class_ids_common_model_indices))

                log.debug(
                    "len(class_ids_common_model_pred_class_ids_indices), class_ids_common_model_pred_class_ids_indices: {},{}"
                    .format(len(class_ids_common_model_pred_class_ids_indices),
                            class_ids_common_model_pred_class_ids_indices))

                log.debug(
                    "len(class_ids_common_dataset[class_ids_common_model_pred_class_ids_indices]), class_ids_common_dataset[class_ids_common_model_pred_class_ids_indices]: {},{}"
                    .format(
                        len(class_ids_common_dataset[
                            class_ids_common_model_pred_class_ids_indices]),
                        class_ids_common_dataset[
                            class_ids_common_model_pred_class_ids_indices]))

                log.debug("len(gt_class_ids), gt_class_ids: {},{}".format(
                    len(gt_class_ids), gt_class_ids))
                log.debug(
                    "len(pred_class_ids), pred_class_ids, type(pred_class_ids): {},{},{}"
                    .format(len(pred_class_ids), pred_class_ids,
                            type(pred_class_ids)))
                log.debug("len(pred_scores), pred_scores: {},{}".format(
                    len(pred_scores), pred_scores))
                log.debug(
                    "len(pred_boxes), pred_boxes.shape, type(pred_boxes): {},{},{}"
                    .format(len(pred_boxes), pred_boxes.shape,
                            type(pred_boxes)))
                log.debug(
                    "len(pred_masks), pred_masks.shape, type(pred_masks): {},{},{}"
                    .format(len(pred_masks), pred_masks.shape,
                            type(pred_masks)))
                log.debug("--------")

            pred_total_annotation += len(pred_class_ids)

            t3 = time.time()
            time_taken_in_detect = (t3 - t2)
            log.info('TIME_TAKEN_IN_DETECT:%f seconds' %
                     (time_taken_in_detect))

            t4 = time.time()

            ## TODO: gt via_json resp and pred via jsn res separate data strucure
            ## TODO: mAP calculation for per class and over enitre dataset

            ## TODO: this does not help; need to flatten the all ground truts for eniter dataset.
            ## np.zeros(len(gt_for_all_images)), ideally same number of predictions sohuld be there
            ## Insort; have to re-write the compute_matches function for entire dataset

            evaluate_run_summary['images'].append(image_filename)
            # evaluate_run_summary['gt_boxes'].append(gt_boxes)
            evaluate_run_summary['gt_class_ids'].append(list(gt_class_ids))
            # evaluate_run_summary['gt_masks'].append(gt_masks)
            # evaluate_run_summary['pred_boxes'].append(pred_boxes)
            evaluate_run_summary['pred_class_ids'].append(list(pred_class_ids))
            evaluate_run_summary['pred_scores'].append(list(pred_scores))
            # evaluate_run_summary['pred_masks'].append(pred_masks)
            evaluate_run_summary['gt_total_annotation_per_image'].append(
                len(gt_class_ids))
            evaluate_run_summary['pred_total_annotation_per_image'].append(
                len(pred_class_ids))

            detection_on_dataset_item = defaultdict(list)
            __pred_match_total_annotation = np.zeros([len(iou_thresholds)],
                                                     dtype=int)

            for count, iou_threshold in enumerate(iou_thresholds):
                log.info("count, iou_threshold: {}, {}".format(
                    count, iou_threshold))

                ## Compute Average Precision at a set IoU threshold
                ## --------------------------------------------
                AP_per_image, precisions, recalls, gt_match, pred_match, overlaps, pred_match_scores, pred_match_class_ids = utils.compute_ap(
                    gt_boxes,
                    gt_class_ids,
                    gt_masks,
                    pred_boxes,
                    pred_class_ids,
                    pred_scores,
                    pred_masks,
                    iou_threshold=iou_threshold)

                __pred_match_total_annotation[count] += len(
                    pred_match_class_ids)

                ## compute and returns f1 score metric
                ## --------------------------------------------
                f1_per_image = utils.compute_f1score(precisions, recalls)

                ## Compute the recall at the given IoU threshold. It's an indication
                ## of how many GT boxes were found by the given prediction boxes.
                ## --------------------------------------------
                recall_bbox, positive_ids_bbox = utils.compute_recall(
                    gt_boxes, pred_boxes, iou_threshold)

                # log.info("len(precisions),precisions: {},{}".format(len(precisions), precisions))
                # log.info("len(recalls),recalls: {},{}".format(len(recalls), recalls))
                # log.info("len(pred_match_class_ids),pred_match_class_ids: {},{}".format(len(pred_match_class_ids), pred_match_class_ids))

                # log.info("AP_per_image: {}".format(AP_per_image))
                # log.info("len(overlaps),overlaps: {},{}".format(len(overlaps), overlaps))

                pred_match_class_names = class_names[np.where(
                    np.in1d(dataset.class_ids, pred_match_class_ids))[0]]

                detection_on_dataset_item['ap_per_image'].append(AP_per_image)
                detection_on_dataset_item['f1_per_image'].append(f1_per_image)
                detection_on_dataset_item['precisions'].append(precisions)
                detection_on_dataset_item['recalls'].append(list(recalls))
                detection_on_dataset_item['recall_bbox'].append(recall_bbox)
                detection_on_dataset_item['positive_ids_bbox'].append(
                    list(positive_ids_bbox))
                detection_on_dataset_item['gt_match'].append(list(gt_match))
                detection_on_dataset_item['pred_match'].append(
                    list(pred_match))
                detection_on_dataset_item['pred_match_scores'].append(
                    list(pred_match_scores))
                detection_on_dataset_item['overlaps_mask_iou'].append(
                    list(overlaps))
                detection_on_dataset_item['pred_match_class_ids'].append(
                    list(pred_match_class_ids))
                detection_on_dataset_item['pred_match_class_names'].append(
                    list(pred_match_class_names))
                detection_on_dataset_item[
                    'pred_match_total_annotation'].append(
                        len(pred_match_class_ids))
                detection_on_dataset_item['iou_thresholds'].append(
                    iou_threshold)

                if save_viz_and_json and iou_threshold == float(
                        iou_threshold_input):
                    fext = ".png"
                    file_name = image_filename + fext
                    log.info("@IoU, SAVED_FILE_NAME: {},{}".format(
                        iou_threshold, file_name))
                    jsonres = viz.get_display_instances(im,
                                                        pred_boxes,
                                                        pred_masks,
                                                        pred_class_ids,
                                                        class_names_model,
                                                        pred_scores,
                                                        colors=cc,
                                                        show_bbox=True,
                                                        show_mask=True,
                                                        get_mask=get_mask,
                                                        filepath=filepath,
                                                        filename=file_name,
                                                        auto_show=auto_show)

                    ## Convert Json response to VIA Json response
                    ##---------------------------------------------
                    # size_image = 0
                    size_image = os.path.getsize(filepath_image_in)
                    jsonres["filename"] = image_filename
                    jsonres["size"] = size_image
                    jsonres['file_attributes']['iou'] = iou_threshold

                    ## TODO: if want to store in mongoDB, '.' (dot) should not be present in the key in the json data
                    ## but, to visualize the results in VIA tool, this (dot) and size is expected
                    # via_jsonres[image_filename.replace('.','-')+str(size_image)] = json.loads(common.numpy_to_json(jsonres))
                    via_jsonres[image_filename + str(size_image)] = json.loads(
                        common.numpy_to_json(jsonres))
                    # log.debug("jsonres: {}".format(jsonres))
                    # log.debug("via_jsonres[image_filename+str(size_image)]: {}".format(via_jsonres[image_filename+str(size_image)]))

            detection_on_dataset.append(detection_on_dataset_item)
            pred_match_total_annotation.append(__pred_match_total_annotation)

            mean_ap_of_per_image, mean_f1_of_per_image, mean_recall_bbox_of_per_image, total_pred_match_total_annotation_of_per_image = compute_ap_of_per_image_over_dataset(
                detection_on_dataset)

            ## TODO: use detection_on_dataset for evaluation over entire dataset and per class
            ## fix the TODO items within compute_ap_dataset
            # _mAPs, _precisions, _recalls = compute_ap_dataset(detection_on_dataset, iou_thresholds)

        log.info("---x-x---")
    except Exception as e:
        log.info("Exception: {}".format(e))
        log.error("Fatal error in main loop".format(e), exc_info=True)
        # log.error('Error occurred ' + str(e))
        raise
    finally:
        log.info("--------X--------X--------X--------")
        T1 = time.time()

        evaluate_run_summary['total_execution_time'] = T1 - T0

        evaluate_run_summary['mAP'] = _mAPs
        evaluate_run_summary['precision'] = _precisions
        evaluate_run_summary['recall'] = _recalls

        evaluate_run_summary['class_names_dataset'] = class_names
        evaluate_run_summary['class_ids_dataset'] = dataset.class_ids
        evaluate_run_summary['class_names_model'] = class_names_model
        evaluate_run_summary['class_ids_model'] = class_ids_model
        evaluate_run_summary['class_names_common'] = class_names_common

        evaluate_run_summary['mean_ap_of_per_image'] = mean_ap_of_per_image
        evaluate_run_summary['mean_f1_of_per_image'] = mean_f1_of_per_image
        evaluate_run_summary[
            'mean_recall_bbox_of_per_image'] = mean_recall_bbox_of_per_image
        evaluate_run_summary[
            'total_pred_match_total_annotation_of_per_image'] = total_pred_match_total_annotation_of_per_image

        evaluate_run_summary[
            'pred_match_total_annotation'] = pred_match_total_annotation
        evaluate_run_summary['gt_total_annotation'] = gt_total_annotation
        evaluate_run_summary['pred_total_annotation'] = pred_total_annotation
        evaluate_run_summary['iou_thresholds'] = iou_thresholds
        evaluate_run_summary['execution_end_time'] = "{:%d%m%y_%H%M%S}".format(
            datetime.datetime.now())
        # evaluate_run_summary['detection_min_confidence'] = dnncfg.config['DETECTION_MIN_CONFIDENCE']
        evaluate_run_summary['remaining_num_images'] = remaining_num_images

        log.debug("evaluate_run_summary: {}".format(evaluate_run_summary))

        classification_reportfile_path = reportcfg[
            'classification_reportfile'] + '-per_dataset.json'
        with open(classification_reportfile_path, 'w') as fw:
            fw.write(common.numpy_to_json(detection_on_dataset))

        evaluate_run_summary_reportfile_path = reportcfg[
            'evaluate_run_summary_reportfile'] + '.json'
        with open(evaluate_run_summary_reportfile_path, 'w') as fw:
            fw.write(common.numpy_to_json(evaluate_run_summary))

        ## Save the image list for loading the response in VIA along with the images
        imagelist_filepath = os.path.join(filepath, 'annotations',
                                          "imagelist.csv")
        pd.DataFrame(imagelist).to_csv(imagelist_filepath)

        ## https://stackoverflow.com/questions/12309269/how-do-i-write-json-data-to-a-file
        via_jsonres_filepath = os.path.join(filepath, 'annotations',
                                            "annotations.json")
        if via_jsonres and len(via_jsonres) > 0:
            with open(via_jsonres_filepath, 'w') as fw:
                fw.write(json.dumps(via_jsonres))

        print("EVALUATE_REPORT:ANNOTATION:{}".format(via_jsonres_filepath))
        print("EVALUATE_REPORT:IMAGELIST:{}".format(imagelist_filepath))
        print(
            "EVALUATE_REPORT:METRIC:{}".format(classification_reportfile_path))
        print("EVALUATE_REPORT:SUMMARY:{}".format(
            evaluate_run_summary_reportfile_path))

        log.info("--------")

        return evaluate_run_summary
コード例 #13
0
def load_hmdcoco(cfg, datacfg, img_ids, bbox_mode):
    class_ids = datacfg.class_ids if 'class_ids' in datacfg and datacfg[
        'class_ids'] else []
    class_ids = annon.getCatIds(catIds=class_ids)  ## cat_ids
    classinfo = annon.loadCats(class_ids)  ## cats
    # print("class_ids: {}".format(class_ids))
    # print("classinfo: {}".format(classinfo))

    # print(img_ids)
    imgs = annon.loadImgs(img_ids)
    anns = [annon.imgToAnns[img_id] for img_id in img_ids]
    # print(anns)

    ## => quick hack for running detectron2 with gaze data
    ## TODO: Convert to COCO -> if done in pre-processing, this step not required here

    imgs_anns = list(zip(imgs, anns))
    dataset_dicts = []
    extra_annotation_keys = None

    ## TODO: iscrowd is available in attributes as something like group
    ann_keys = ["iscrowd", "bbox", "keypoints", "category_id", "lbl_id"
                ] + (extra_annotation_keys or [])
    num_instances_without_valid_segmentation = 0
    id_map = {v: i for i, v in enumerate(class_ids)}

    for (img_dict, anno_dict_list) in imgs_anns:
        image_path = apputil.get_abs_path(
            cfg, img_dict, 'AI_ANNON_DATA_HOME_LOCAL')  ##image_root
        filepath = os.path.join(image_path, img_dict['filename'])
        record = {}
        record["file_name"] = filepath
        record["height"] = img_dict["height"]
        record["width"] = img_dict["width"]
        image_id = record["image_id"] = img_dict["img_id"]  ## coco: id

        objs = []
        for anno in anno_dict_list:
            assert anno["img_id"] == image_id  ## image_id
            obj = {key: anno[key] for key in ann_keys if key in anno}
            ##TODO: convert bbbox to coco format
            _bbox = obj['bbox']

            ##TODO: verify what is BoxMode.XYWH_ABS
            coco_frmt_bbox = [
                _bbox['xmin'], _bbox['ymin'], _bbox['width'], _bbox['height']
            ]
            #print("coco_frmt_bbox: {}".format(coco_frmt_bbox))
            obj['bbox'] = coco_frmt_bbox
            ## TODO: get polygon from shape_attributes and conver to coco format
            #segm = anno.get("segmentation", None)
            segm = None
            if segm:  # either list[list[float]] or dict(RLE)
                if not isinstance(segm, dict):
                    # filter out invalid polygons (< 3 points)
                    segm = [
                        poly for poly in segm
                        if len(poly) % 2 == 0 and len(poly) >= 6
                    ]
                    if len(segm) == 0:
                        num_instances_without_valid_segmentation += 1
                        continue  # ignore this instance
                    obj["segmentation"] = segm

            obj["bbox_mode"] = bbox_mode
            if id_map:
                obj["category_id"] = id_map[obj["lbl_id"]]  ## category_id
            objs.append(obj)

        record["annotations"] = objs
        dataset_dicts.append(record)

    return dataset_dicts