Exemple #1
0
def save_Stats(cfg, Stats, Total_Stats, dataset=None, annon_filepath=None, dst_dir=None, db=None):
  stats_data = json.loads(common.numpy_to_json(Stats))
  total_stats_data = json.loads(common.numpy_to_json(Total_Stats))

  stats_total_stats_data = common.merge_dict([stats_data, total_stats_data])

  tblname = annonutils.get_tblname('STATS')

  save_to_file = cfg['SAVE_TO_FILE']
  if save_to_file:
    ## Stats files
    create_stats_files(cfg, Stats, Total_Stats, dst_dir)
    ## Move processed annotation file to archive folder
    log.info("annon_filepath, tblname: {}, {}".format(annon_filepath, tblname))
    rel_dir = cfg['BASE_PATH']['RELEASE_DIR']
    with open(os.path.join(rel_dir, os.path.basename(annon_filepath)),'w') as fw:
      json.dump(dataset,fw)
  else:
    log.info("tblname: {}".format(tblname))
    # annonutils.write2db(db, tblname, [stats_total_stats_data], idx_col='rel_filename')
    annonutils.write2db(db, tblname, [stats_total_stats_data], idx_col='rel_filepath')
Exemple #2
0
def save_Annotation_Data(cfg, Annotation_Data, ant_data_dir=None, db=None):
  ## At least create empty directory even when NO Annotation_Data is present to avoid dir not exists checks
  if len(Annotation_Data) > 0:
    save_to_file = cfg['SAVE_TO_FILE']
    if save_to_file:
      log.info("ant_data_dir: {}".format(ant_data_dir))
      for k,v in Annotation_Data.items():
        # ant_data_filename = os.path.join(ant_data_dir,k+'.json')
        ant_data_filename = os.path.join(ant_data_dir,k)
        json_str = common.numpy_to_json(v)
        with open(ant_data_filename,'w') as fw:
          # fw.write(json.dumps(v))
          fw.write(json_str)
def detect_batch(model,
                 verbose=1,
                 modelcfg=None,
                 batch=None,
                 imagenames=None,
                 colors=None,
                 get_mask=False,
                 class_names=None):
    """API
  """
    log.info("len(batch): {}".format(len(batch)))

    # log.info("len(imagenames): {}".format(len(imagenames)))
    # assert len(batch) == len(imagenames)

    total_items = len(batch)
    res = []
    cc = None

    r = model.detect(batch, verbose)

    if class_names:
        if not colors:
            colors = viz.random_colors(len(class_names))
        cc = dict(zip(class_names, colors))

    for i in range(total_items):
        jsonres = viz.get_detections(batch[i],
                                     r[i]['rois'],
                                     r[i]['masks'],
                                     r[i]['class_ids'],
                                     class_names,
                                     r[i]['scores'],
                                     colors=cc,
                                     get_mask=get_mask)

        uid = common.createUUID('pred')
        # image_name = imagenames[i]
        image_name = uid
        jsonres["filename"] = image_name
        jsonres["file_attributes"]["uuid"] = uid
        via_jsonres = {}
        via_jsonres[image_name] = jsonres
        json_str = common.numpy_to_json(via_jsonres)

        res.append(json.loads(json_str))

    return res
Exemple #4
0
def save_Annotation_Info(cfg, Annotation_Info, dst_dir=None, db=None):
  if len(Annotation_Info) > 0:
    tblname = annonutils.get_tblname('ANNOTATIONS')
    json_str = common.numpy_to_json(Annotation_Info)
    # log.info("json_str: {}".format(json_str))

    save_to_file = cfg['SAVE_TO_FILE']
    if save_to_file:
      ant_filename = os.path.join(dst_dir,os.path.basename(dst_dir)+'-'+cfg['FILES']['ANNOTATIONS'])
      log.info("ant_filename, tblname: {}, {}".format(ant_filename, tblname))
      with open(ant_filename,'w') as fw:
        # fw.write(json.dumps(Annotation_Info))
        fw.write(json_str)
    else:
      log.info("tblname: {}".format(tblname))
      annonutils.write2db(db, tblname, list(json.loads(json_str).values()), idx_col='ant_id')
def detect_from_videos(appcfg, dnnmod, videos, path, model, class_names, cmdcfg, api_model_key, show_bbox=False):
  """detect_from_videos
  Code adopted from:
  
  Copyright (c) 2018 Matterport, Inc.
  Licensed under the MIT License (see LICENSE for details)
  Originally, Written by Waleed Abdulla
  ---
  
  Key contribution:
  * saving the annotated results directly
  * saving the annotated mask only
  * annotation results as json response for consumption in API, VGG VIA compatible results

  Copyright (c) 2020 mangalbhaskar
  Licensed under [see LICENSE for details]
  Written by mangalbhaskar
  ---

  Conventions:
    video - video filename
    filepath - the absolute path of the video input file location
    vid - binary data after reading the video file
  """
  import cv2
  
  save_viz_and_json = cmdcfg.save_viz_and_json if 'save_viz_and_json' in cmdcfg else False
  if save_viz_and_json:
    timestamp = "{:%d%m%y_%H%M%S}".format(datetime.datetime.now())
    filepath = os.path.join(path,"predict-"+timestamp)
    log.debug("filepath: {}".format(filepath))
    common.mkdir_p(filepath)

  file_names = []
  res = []
  detect = apputil.get_module_fn(dnnmod, "detect")
  colors = viz.random_colors(len(class_names))
  log.debug("class_names: {}".format(class_names))
  log.debug("len(class_names), class_names: {},{}".format(len(class_names), class_names))
  log.debug("len(colors), colors: {},{}".format(len(colors), colors))

  cc = dict(zip(class_names,colors))

  for video in videos:
    ## Run model detection and save the outputs
    log.debug("Running on {}".format(video))

    ## Read Video
    ##---------------------------------------------
    filepath_video = os.path.join(path, video)
    log.debug("Processing video with filepath_video: {}".format(filepath_video))
    vid = cv2.VideoCapture(filepath_video)
    width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = vid.get(cv2.CAP_PROP_FPS)

    vname, vext = os.path.splitext(video)
    file_name = video

    if save_viz_and_json:
      ## oframe - original image frame from the video
      ## pframe or viz - annotations visualization frame from the video
      ## annotations - annotations json per frame
      video_viz_basepath = os.path.join(filepath,vname) 
      path_oframe = os.path.join(video_viz_basepath,"oframe")
      path_pframe = os.path.join(video_viz_basepath,"pframe")
      path_sframe = os.path.join(video_viz_basepath,"splash")
      path_mframe = os.path.join(video_viz_basepath,"mask")
      path_mmframe = os.path.join(video_viz_basepath,"mmask")
      path_viz = os.path.join(video_viz_basepath,"viz")
      path_annotations = os.path.join(video_viz_basepath,"annotations")
      for d in [path_oframe, path_pframe, path_annotations, path_sframe, path_mframe, path_mmframe, path_viz]:
        log.debug("videos dirs: {}".format(d))
        common.mkdir_p(d)

      ## Define codec and create video writer
      ##---------------------------------------------
      # file_name = "{:%d%m%y_%H%M%S}.avi".format(datetime.datetime.now())
      fext = ".avi"
      file_name = vname+fext

      filepath_pvideo = os.path.join(filepath, vname, file_name)
      log.debug("filepath_pvideo: {}".format(filepath_pvideo))

    count = 0
    success = True
    frame_cutoff = 0
    from_frame = 0
    while success:
      log.debug("-------")
      log.debug("frame: {}".format(count))

      if frame_cutoff and count >= frame_cutoff:
        break

      ## start predictions specific 'from the specific frame number'
      if from_frame and count < from_frame:
        count += 1
        continue

      ## Read next image
      success, oframe_im = vid.read()
      if success:
        oframe_name = str(count)+"_"+video+".png"

        ## OpenCV returns images as BGR, convert to RGB
        oframe_im_rgb = oframe_im[..., ::-1]
        
        ## Detect objects
        t1 = time.time()
        
        # r = detect(model, im=oframe_im_rgb, verbose=0)
        r = detect(model, im=oframe_im_rgb, verbose=1)[0]

        t2 = time.time()
        time_taken = (t2 - t1)
        log.debug('Total time taken in detect: %f seconds' %(time_taken))

        ## Convert Json response to VIA Json response
        ##---------------------------------------------
        t1 = time.time()

        if save_viz_and_json:
          # pframe_im, jsonres = viz.get_display_instances(oframe_im_rgb, r['rois'], r['masks'], r['class_ids'], class_names, r['scores'], colors=cc, show_bbox=False)
          jsonres = viz.get_display_instances(oframe_im_rgb, r['rois'], r['masks'], r['class_ids'], class_names, r['scores'], colors=cc, show_bbox=False, auto_show=False, filepath=video_viz_basepath, filename=oframe_name)
        else:
          jsonres = viz.get_detections(oframe_im_rgb, r['rois'], r['masks'], r['class_ids'], class_names, r['scores'], colors=cc)

        t2 = time.time()
        time_taken = (t2 - t1)
        log.debug('Total time taken in detections: %f seconds' %(time_taken))
        
        ## Convert Json response to VIA Json response
        ##---------------------------------------------
        t1 = time.time()
        size_oframe = 0
        jsonres["filename"] = oframe_name
        jsonres["size"] = size_oframe
        via_jsonres = {}
        via_jsonres[oframe_name+str(size_oframe)] = jsonres
        json_str = common.numpy_to_json(via_jsonres)
        # log.debug("json_str:\n{}".format(json_str))

        t2 = time.time()
        time_taken = (t2 - t1)
        log.debug('Total time taken in json_str: %f seconds' %(time_taken))
        
        ## Create Visualisations & Save output
        ##---------------------------------------------
        if save_viz_and_json:
          t1 = time.time()

          ## Color Splash Effect
          ## Save vframe and video buffer
          ##---------------------------------------------
          # splash = viz.color_splash(oframe_im_rgb, r['masks'])
          # # RGB -> BGR to save image to video
          # splash = splash[..., ::-1]
          # # Add image to video writer
          # vwriter_splash.write(splash)

          ## Color Mask Effect
          ## Save vframe and video buffer
          ##---------------------------------------------

          # mframe_im = viz.color_mask(oframe_im_rgb, r['masks'])
          # ## RGB -> BGR to save image to video
          # ## mframe_im = mframe_im[..., ::-1]
          # filepath_mframe = os.path.join(path_mframe, oframe_name)
          # viz.imsave(filepath_mframe, mframe_im)

          ## Annotation Visualisation
          ## Save vframe and video buffer
          ##---------------------------------------------

          # filepath_pframe = os.path.join(path_pframe, oframe_name)
          # viz.imsave(filepath_pframe, pframe_im)

          # filepath_oframe = os.path.join(path_oframe, oframe_name)
          # viz.imsave(filepath_oframe, oframe_im_rgb)
          # # size_oframe = os.path.getsize(filepath_oframe)

          filepath_jsonres = os.path.join(path_annotations, oframe_name+".json")
          log.debug("filepath_jsonres: {}".format(filepath_jsonres))
          with open(filepath_jsonres,'w') as fw:
            fw.write(json_str)

          ## TODO: using the opencv itself created visualisation video from individual frames
          # pframe_im_bgr = pframe_im[..., ::-1]
          # height, width = pframe_im_bgr.shape[:2]
          # ## int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
          # ## height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
          # ## vwriter_splash = cv2.VideoWriter(os.path.join(filepath, 'splash_'+file_name), cv2.VideoWriter_fourcc(*'MJPG'), fps, (width, height))
          # vwriter_viz = cv2.VideoWriter(filepath_pvideo, cv2.VideoWriter_fourcc(*'MJPG'), fps, (width, height))
          # vwriter_viz.write(pframe_im_bgr)

          # ## Add image to video writer
          # ## vwriter_mask.write(mframe_im)
        
        res.append(json_str)

        count += 1

    # if save_viz_and_json:    
    #   ## vwriter_splash.release()
    #   vwriter_viz.release()

    file_names.append(file_name)

    ## https://stackoverflow.com/questions/36643139/python-and-opencv-cannot-write-readable-avi-video-files
    ## ffmpeg -framerate 29 -i MAH04240.mp4-%d.png -c:v libx264 -r 30 MAH04240-maskrcnn-viz.mp4
    ## ffmpeg -framerate 29 -i %d_MAH04240.mp4.png -c:v libx264 -r 30 MAH04240-maskrcnn-viz.mp4
  return file_names,res
async def _create_res(detect, filepath, images, path, model, class_names, cmdcfg, api_model_key, show_bbox=False):
  save_viz_and_json = cmdcfg.save_viz_and_json if 'save_viz_and_json' in cmdcfg else False
  ## TODO: move to cmdcfg configuration
  get_mask = True

  file_names = []
  res = []

  colors = viz.random_colors(len(class_names))
  log.debug("class_names: {}".format(class_names))
  log.debug("len(class_names), class_names: {},{}".format(len(class_names), class_names))
  log.debug("len(colors), colors: {},{}".format(len(colors), colors))

  cc = dict(zip(class_names, colors))

  ## TODO: highly ineffecient and should be switched to batch processing mode

  # im_arr = [ viz.imread(os.path.join(path, image_filename)) for image_filename in images ]
  for image_filename in images:
    # Run model detection and save the outputs
    log.debug("-------")
    log.debug("Running on {}".format(image_filename))

    # Read image
    ##------------------------------
    ## TODO: file or filepath or url
    filepath_image_in = os.path.join(path, image_filename)
    fext = ".png"
    # file_name = image_filename
    file_name = image_filename+fext

    t0 = time.time()
    ## TODO: 3. to verify
    # im = skimage.io.imread(filepath_image_in)
    im = viz.imread(filepath_image_in)
    # im_arr.append[im]

    t1 = time.time()
    time_taken_imread = (t1 - t0)
    log.debug('Total time taken in time_taken_imread: %f seconds' %(time_taken_imread))

    # Detect objects
    ##---------------------------------------------
    t2 = time.time()

    r = detect(model, im=im, verbose=1)[0]
    pred_boxes = r['rois']
    pred_masks =  r['masks']
    pred_class_ids = r['class_ids']
    pred_scores = r['scores']

    log.debug("Prediction on Groud Truth-------->")
    log.debug('len(r): {}'.format(len(r)))
    log.debug("len(pred_class_ids), pred_class_ids: {},{}".format(len(pred_class_ids), pred_class_ids))
    log.debug("len(pred_boxes), pred_boxes.shape, type(pred_boxes): {},{},{}".format(len(pred_boxes), pred_boxes.shape, type(pred_boxes)))
    log.debug("len(pred_masks), pred_masks.shape, type(pred_masks): {},{},{}".format(len(pred_masks), pred_masks.shape, type(pred_masks)))
    log.debug("--------")

    t3 = time.time()
    time_taken_in_detect = (t3 - t2)
    log.debug('Total time taken in detect: %f seconds' %(time_taken_in_detect))

    t4 = time.time()
    ## TODO: batchify
    time_taken_save_viz_and_json = -1
    if save_viz_and_json:
      jsonres = viz.get_display_instances(im, pred_boxes, pred_masks, pred_class_ids, class_names, pred_scores,
                                                     colors=cc, show_bbox=show_bbox, get_mask=get_mask, filepath=filepath, filename=file_name)
   
      t7 = time.time()
      time_taken_save_viz_and_json = (t4 - t7)
    else:
      jsonres = viz.get_detections(im, pred_boxes, pred_masks, pred_class_ids, class_names, pred_scores,
                                     colors=cc, get_mask=get_mask)

    log.debug("jsonres: {}".format(jsonres))

    ## Convert Json response to VIA Json response
    ##---------------------------------------------
    ## https://stackoverflow.com/questions/11904083/how-to-get-image-size-bytes-using-pil
    # size_image = 0
    size_image = os.path.getsize(filepath_image_in)
    jsonres["filename"] = image_filename
    jsonres["size"] = size_image
    via_jsonres = {}

    ## TODO: if want to store in mongoDB, '.' (dot) should not be present in the key in the json data
    ## but, to visualize the results in VIA tool, this (dot) and size is expected
    via_jsonres[image_filename.replace('.','-')+str(size_image)] = jsonres
    # via_jsonres[image_filename+str(size_image)] = jsonres
    json_str = common.numpy_to_json(via_jsonres)
    # log.debug("json_str:\n{}".format(json_str))
    # file_names.append(file_name)

    t5 = time.time()
    time_taken_res_preparation = (t5 - t4)
    log.debug('Total time taken in time_taken_res_preparation: %f seconds' %(time_taken_res_preparation))

    ## Create Visualisations & Save output
    ## TODO: resize the annotation and match with the original image size and not the min or max image dimenion form cfg
    ##---------------------------------------------
    # time_taken_save_viz_and_json = -1
    # if save_viz_and_json:
    #   t6 = time.time()

    #   ## Color Splash Effect & Save image
    #   ##---------------------------------------------
    #   # viz.imsave(os.path.join(filepath, 'splash', file_name), viz.color_splash(im, pred_masks))

    #   ## Color Mask Effect & Save image
    #   ##---------------------------------------------
    #   # viz.imsave(os.path.join(filepath, 'mask', file_name), viz.color_mask(im, pred_masks))

    #   ## Annotation Visualisation & Save image
    #   ##---------------------------------------------
    #   # viz.imsave(os.path.join(filepath, 'viz', file_name), imgviz)

    #   t7 = time.time()
    #   time_taken_save_viz_and_json = (t6 - t7)
    #   log.debug('Total time taken in save_viz_and_json: %f seconds' %(time_taken_save_viz_and_json))

    t8 = time.time()
    tt_turnaround = (t8 - t0)
    log.debug('Total time taken in tt_turnaround: %f seconds' %(tt_turnaround))

    res_code = 200
    dnnarch = cmdcfg.dnnarch
    modelkeys = api_model_key.split('-')
    # feature_vector = json.loads(common.numpy_to_json(r))
    feature_vector = r

    apires = {
      "api": None
      ,"type": api_model_key
      ,"dnnarch": dnnarch
      ,"org_name": modelkeys[0]
      ,"problem_id": modelkeys[1]
      ,"rel_num": modelkeys[2]
      ,"image_name": image_filename
      ,"result": json.loads(json_str)
      ,'status_code': res_code
      ,'timings': {
        'image_read': time_taken_imread
        ,'detect': time_taken_in_detect
        ,'res_preparation': time_taken_res_preparation
        ,'time_taken_save_viz_and_json': time_taken_save_viz_and_json
        ,'tt_turnaround': tt_turnaround
      }
    }


    filepath_jsonres = os.path.join(filepath, 'annotations', image_filename+".json")
    log.debug("filepath_jsonres: {}".format(filepath_jsonres))

    ## Always Save the VIA Json response
    await asyncio.gather(
      do_save_to_file(filepath_jsonres, apires, feature_vector)
    )

    # res.append(apires)

  log.debug("-------")
def predict(appcfg, modelinfo, image, get_mask=False):
  """Main function for AI API for prediction 
  """
  try:
    t0 = time.time()

    image_name = secure_filename(image.filename)
    log.debug("image.filename: {}".format(image_name))

    log.debug("modelinfo: {}".format(modelinfo))
    api_model_key = modelinfo['API_MODEL_KEY']
    dnnarch = modelinfo['DNNARCH']

    if image and allowed_file( appcfg, image_name):
      image_bytes = image.read()
      im_non_numpy = Image.open(io.BytesIO(image_bytes))

      ##TODO: from config derive if need to be resized and then send the resized image to api

      model = modelinfo['MODEL']
      modelcfg = modelinfo['MODELCFG']
      detect = modelinfo['DETECT']
      detect_with_json = modelinfo['DETECT_WITH_JSON']

      cc = None
      class_names = modelcfg['classes']

      t1 = time.time()
      time_taken_imread = (t1 - t0)
      log.debug('Total time taken in time_taken_imread: %f seconds' %(time_taken_imread))

      t2 = time.time()

      jsonres = detect_with_json(model, verbose=1, modelcfg=modelcfg, image_name=image_name, im_non_numpy=im_non_numpy, get_mask=get_mask, class_names=class_names)
      t3 = time.time()
      time_taken_in_detect_with_json = (t3 - t2)

      log.debug("jsonres: {}".format(jsonres))
      log.debug('Total time taken in detect with json: %f seconds' %(time_taken_in_detect_with_json))

      t4 = time.time()

      # uid = str(uuid.uuid4())
      uid = common.createUUID('pred')
      jsonres["filename"] = image_name
      jsonres["file_attributes"]["uuid"] = uid
      via_jsonres = {}
      via_jsonres[image_name] = jsonres
      json_str = common.numpy_to_json(via_jsonres)

      t5 = time.time()
      time_taken_res_preparation = (t5 - t4)
      log.debug('Total time taken in time_taken_res_preparation: %f seconds' %(time_taken_res_preparation))

      tt_turnaround = (t5 - t0)
      log.debug('Total time taken in tt_turnaround: %f seconds' %(tt_turnaround))

      res_code = 200
      # modelkeys = api_model_key.split('-')
      apires = {
        "api": None
        ,"type": api_model_key
        ,"dnnarch": dnnarch
        # ,"org_name": modelkeys[0]
        # ,"problem_id": modelkeys[1]
        # ,"rel_num": modelkeys[2]
        # ,"image_name": image
        ,"result": json.loads(json_str)
        ,'status_code': res_code
        ,'timings': {
          'image_read': time_taken_imread
          ,'detect_with_json': time_taken_in_detect_with_json
          ,'res_preparation': time_taken_res_preparation
          ,'tt_turnaround': tt_turnaround
        }
      }
    else:
      res_code = 400
      apires = {
        "api": None
        ,"type": api_model_key
        ,"dnnarch": dnnarch
        # ,"org_name": None
        # ,"problem_id": None
        # ,"rel_num": None
        # ,"image_name": None
        ,"result": None
        ,"error": "Invalid Image Type. Allowed Image Types are: {}".format(appcfg['APP']['ALLOWED_IMAGE_TYPE'])
        ,'status_code': res_code
        ,'timings': {
          'image_read': -1
          ,'detect': -1
          ,'res_preparation': -1
          ,'tt_turnaround': -1
        }
      }
  except Exception as e:
    log.error("Exception in detection", exc_info=True)
    res_code = 500
    apires = {
      "api": None
      ,"type": None
      ,"dnnarch": None
      ,"result": None
      ,"error": "Internal Error. Exception in detection."
      ,'status_code': res_code
      ,'timings': {
        'image_read': -1
        ,'detect': -1
        ,'res_preparation': -1
        ,'tt_turnaround': -1
      }
    }

  log.debug("apires: {}".format(apires))
  # res = Response(jsonify(apires), status=res_code, mimetype='application/json')
  # res = jsonify(apires)
  # res.status_code = res_code
  res = Response(json.dumps(apires), status=res_code, mimetype='application/json')

  log.debug("res: {}".format(res))
  return res
def execute_eval(detect,
                 model,
                 dataset,
                 datacfg,
                 dnncfg,
                 class_names,
                 reportcfg,
                 get_mask=True):
    """Execute the evaluation and generates the evaluation reports
  - classification report with differet scores
  - confusion matrix
  - summary report
  """
    log.info("execute_eval---------------------------->")

    save_viz_and_json = reportcfg['save_viz_and_json']
    evaluate_no_of_result = reportcfg['evaluate_no_of_result']
    filepath = reportcfg['filepath']
    evaluate_run_summary = reportcfg['evaluate_run_summary']

    log.info("evaluate_no_of_result: {}".format(evaluate_no_of_result))

    detection_on_dataset = []
    ## TODO: put at right place
    iou_threshold = reportcfg['iou_threshold']
    # iou_thresholds = None
    # iou_thresholds = iou_thresholds or np.arange(0.5, 1.0, 0.05)
    iou_thresholds = np.arange(0.5, 1.0, 0.05)

    gt_total_annotation = 0
    pred_total_annotation = 0
    remaining_num_images = -1
    image_ids = dataset.image_ids
    num_images = len(image_ids)
    pred_match_total_annotation = []

    log.info("class_names: {}".format(class_names))

    colors = viz.random_colors(len(class_names))
    log.info("len(colors), colors: {},{}".format(len(colors), colors))

    cc = dict(zip(class_names, colors))

    ## for some reasons if gets an error in iterating through dataset, all the hardwork is lost,
    ## therefore, save the data to disk in finally clause
    via_jsonres = {}
    imagelist = []
    T0 = time.time()

    try:
        for i, image_id in enumerate(image_ids):
            log.debug("-------")

            filepath_image_in = dataset.image_reference(image_id)
            image_filename = filepath_image_in.split(os.path.sep)[-1]
            image_name_without_ext = image_filename.split('.')[0]

            imagelist.append(filepath_image_in)
            log.debug("Running on {}".format(image_filename))

            if evaluate_no_of_result == i:
                log.info("evaluate_no_of_result reached: i: {}\n".format(i))
                break

            remaining_num_images = evaluate_no_of_result if evaluate_no_of_result and evaluate_no_of_result > 0 else num_images
            remaining_num_images = remaining_num_images - i - 1
            log.info(
                "To be evaluated remaining_num_images:...................{}".
                format(remaining_num_images))

            t0 = time.time()

            im, gt_class_ids, gt_boxes, gt_masks, gt_active_class_ids = load_image_gt_without_resizing(
                dataset, datacfg, dnncfg, image_id)

            t1 = time.time()
            time_taken_imread = (t1 - t0)
            log.debug('Total time taken in time_taken_imread: %f seconds' %
                      (time_taken_imread))

            gt_total_annotation += len(gt_class_ids)

            log.info("\nGround Truth-------->")

            log.info("i,image_id:{},{}".format(i, image_id))
            log.info(
                "len(gt_active_class_ids),gt_active_class_ids: {},{}".format(
                    len(gt_active_class_ids), gt_active_class_ids))

            log.info(
                "len(gt_class_ids): {}\nTotal Unique classes: len(set(gt_class_ids)): {}\ngt_class_ids: {}"
                .format(len(gt_class_ids), len(set(gt_class_ids)),
                        gt_class_ids))
            log.info("len(gt_boxes), gt_boxes.shape, type(gt_boxes): {},{},{}".
                     format(len(gt_boxes), gt_boxes.shape, type(gt_boxes)))
            log.info("len(gt_masks), gt_masks.shape, type(gt_masks): {},{},{}".
                     format(len(gt_masks), gt_masks.shape, type(gt_masks)))

            log.debug("gt_boxes: {}".format(gt_boxes))
            log.debug("gt_masks: {}".format(gt_masks))

            log.info("--------")

            # Detect objects
            ##---------------------------------------------
            t2 = time.time()

            r = detect(model, im=im, verbose=1)[0]
            pred_boxes = r['rois']
            pred_masks = r['masks']
            pred_class_ids = r['class_ids']
            pred_scores = r['scores']

            pred_total_annotation += len(pred_class_ids)

            log.debug("Prediction on Groud Truth-------->")
            log.debug('len(r): {}'.format(len(r)))
            log.debug(
                "len(pred_class_ids), pred_class_ids, type(pred_class_ids): {},{},{}"
                .format(len(pred_class_ids), pred_class_ids,
                        type(pred_class_ids)))
            log.debug(
                "len(pred_boxes), pred_boxes.shape, type(pred_boxes): {},{},{}"
                .format(len(pred_boxes), pred_boxes.shape, type(pred_boxes)))
            log.debug(
                "len(pred_masks), pred_masks.shape, type(pred_masks): {},{},{}"
                .format(len(pred_masks), pred_masks.shape, type(pred_masks)))
            log.debug("--------")

            t3 = time.time()
            time_taken_in_detect = (t3 - t2)
            log.debug('Total time taken in detect: %f seconds' %
                      (time_taken_in_detect))

            t4 = time.time()

            ## TODO: gt via_json resp and pred via jsn res separate data strucure
            ## TODO: mAP calculation for per image, per class and enitre dataset

            ##TODO: this does not help; need to flatten the all ground truts for eniter dataset.
            ## np.zeros(len(gt_for_all_images)), ideally same number of predictions sohuld be there
            ## Insort; have to re-write the compute_matches function for entire dataset

            evaluate_run_summary['images'].append(image_filename)
            # evaluate_run_summary['gt_boxes'].append(gt_boxes)
            evaluate_run_summary['gt_class_ids'].append(list(gt_class_ids))
            # evaluate_run_summary['gt_masks'].append(gt_masks)
            # evaluate_run_summary['pred_boxes'].append(pred_boxes)
            evaluate_run_summary['pred_class_ids'].append(list(pred_class_ids))
            evaluate_run_summary['pred_scores'].append(list(pred_scores))
            # evaluate_run_summary['pred_masks'].append(pred_masks)
            evaluate_run_summary['gt_total_annotation_per_image'].append(
                len(gt_class_ids))
            evaluate_run_summary['pred_total_annotation_per_image'].append(
                len(pred_class_ids))

            detection_on_dataset_item = defaultdict(list)
            __pred_match_total_annotation = np.zeros([len(iou_thresholds)],
                                                     dtype=int)

            for count, iou_threshold in enumerate(iou_thresholds):
                log.info("count, iou_threshold: {}, {}".format(
                    count, iou_threshold))

                ## Compute Average Precision at a set IoU threshold
                ## --------------------------------------------
                AP_per_image, precisions, recalls, gt_match, pred_match, overlaps, pred_match_scores, pred_match_class_ids = utils.compute_ap(
                    gt_boxes,
                    gt_class_ids,
                    gt_masks,
                    pred_boxes,
                    pred_class_ids,
                    pred_scores,
                    pred_masks,
                    iou_threshold=iou_threshold)

                __pred_match_total_annotation[count] += len(
                    pred_match_class_ids)

                ## compute and returns f1 score metric
                ## --------------------------------------------
                f1_per_image = utils.compute_f1score(precisions, recalls)

                ## Compute the recall at the given IoU threshold. It's an indication
                ## of how many GT boxes were found by the given prediction boxes.
                ## --------------------------------------------
                recall_bbox, positive_ids_bbox = utils.compute_recall(
                    gt_boxes, pred_boxes, iou_threshold)

                # log.info("len(precisions),precisions: {},{}".format(len(precisions), precisions))
                # log.info("len(recalls),recalls: {},{}".format(len(recalls), recalls))
                # log.info("len(pred_match_class_ids),pred_match_class_ids: {},{}".format(len(pred_match_class_ids), pred_match_class_ids))

                # log.info("AP_per_image: {}".format(AP_per_image))
                # log.info("len(overlaps),overlaps: {},{}".format(len(overlaps), overlaps))

                class_names = np.array(class_names)
                pred_match_class_names = list(class_names[np.where(
                    np.in1d(dataset.class_ids, pred_match_class_ids))[0]])
                class_names = list(class_names)

                detection_on_dataset_item['ap_per_image'].append(AP_per_image)
                detection_on_dataset_item['f1_per_image'].append(f1_per_image)
                detection_on_dataset_item['precisions'].append(precisions)
                detection_on_dataset_item['recalls'].append(list(recalls))
                detection_on_dataset_item['recall_bbox'].append(recall_bbox)
                detection_on_dataset_item['positive_ids_bbox'].append(
                    list(positive_ids_bbox))
                detection_on_dataset_item['gt_match'].append(list(gt_match))
                detection_on_dataset_item['pred_match'].append(
                    list(pred_match))
                detection_on_dataset_item['pred_match_scores'].append(
                    list(pred_match_scores))
                detection_on_dataset_item['overlaps_mask_iou'].append(
                    list(overlaps))
                detection_on_dataset_item['pred_match_class_ids'].append(
                    list(pred_match_class_ids))
                detection_on_dataset_item['pred_match_class_names'].append(
                    pred_match_class_names)
                detection_on_dataset_item[
                    'pred_match_total_annotation'].append(
                        len(pred_match_class_ids))
                detection_on_dataset_item['iou_thresholds'].append(
                    iou_threshold)

                ## TODO: ref temp-evaluate-viz.code.py

            detection_on_dataset.append(detection_on_dataset_item)
            pred_match_total_annotation.append(__pred_match_total_annotation)
        log.info("---x-x---")
    except Exception as e:
        log.info("Exception: {}".format(e))
        raise
    finally:
        log.info("--------X--------X--------X--------")
        T1 = time.time()

        evaluate_run_summary['total_execution_time'] = T1 - T0
        evaluate_run_summary[
            'pred_match_total_annotation'] = pred_match_total_annotation
        evaluate_run_summary['gt_total_annotation'] = gt_total_annotation
        evaluate_run_summary['pred_total_annotation'] = pred_total_annotation
        evaluate_run_summary['iou_thresholds'] = iou_thresholds
        evaluate_run_summary['execution_end_time'] = "{:%d%m%y_%H%M%S}".format(
            datetime.datetime.now())
        # evaluate_run_summary['detection_min_confidence'] = dnncfg.config['DETECTION_MIN_CONFIDENCE']
        evaluate_run_summary['remaining_num_images'] = remaining_num_images
        # evaluate_run_summary['total_images'] = num_images

        log.debug("evaluate_run_summary: {}".format(evaluate_run_summary))

        ## Save the image list for loading the response in VIA along with the images
        imagelist_filepath = os.path.join(filepath, 'annotations',
                                          "imagelist.csv")
        pd.DataFrame(imagelist).to_csv(imagelist_filepath)

        classification_reportfile_path = reportcfg[
            'classification_reportfile'] + '-per_dataset.json'
        with open(classification_reportfile_path, 'w') as fw:
            fw.write(common.numpy_to_json(detection_on_dataset))

        evaluate_run_summary_reportfile_path = reportcfg[
            'evaluate_run_summary_reportfile'] + '.json'
        with open(evaluate_run_summary_reportfile_path, 'w') as fw:
            fw.write(common.numpy_to_json(evaluate_run_summary))

        print("EVALUATE_REPORT:IMAGELIST:{}".format(imagelist_filepath))
        print(
            "EVALUATE_REPORT:METRIC:{}".format(classification_reportfile_path))
        print("EVALUATE_REPORT:SUMMARY:{}".format(
            evaluate_run_summary_reportfile_path))

        log.info("--------")

        return evaluate_run_summary
def main():
  print("* Loading model...")
  
  ## TODO - write code to load the model here
  
  print("* Model loaded")

  ## continually pool for new images to process
  while True:
    ## attempt to grab a batch of images from the database, then
    ## initialize the image IDs and batch of images themselves
    queue = db.lrange(REDISCFG['image_queue'], 0, REDISCFG['batch_size'] - 1)
    image_ids = []
    batch = None

    ## loop over the queue
    for Q in queue:
      ## deserialize the object and obtain the input image
      Q = json.loads(Q.decode("utf-8"))
      image = helpers.base64_decode_image(Q["image"],
        REDISCFG['image_dtype'],
        (1, REDISCFG['image_height'], REDISCFG['image_width'], REDISCFG['image_chans']))

      ## check to see if the batch list is None
      if batch is None:
        batch = image
      ## otherwise, stack the data
      else:
        batch = np.vstack([batch, image])

      ## update the list of image IDs
      image_ids.append(Q["id"])

    ## check to see if we need to process the batch
    if len(image_ids) > 0:
      ## process the batch
      print("* Batch size: {}".format(batch.shape))
     
      ## TODO: predict in batch mode, modify the model code

      preds = model.predict(batch)
      results = imagenet_utils.decode_predictions(preds)

      ## loop over the image IDs and their corresponding set of
      ## results from our model
      for (image_id, result_set) in zip(image_ids, results):
        ## initialize the list of output
        output = []

        ## loop over the results and add them to the list of output
        for (imagenet_id, label, prob) in result_set:
          r = {"label": label, "probability": float(prob)}
          output.append(r)

        ## store the output in the database, using
        ## the image ID as the key so we can fetch the results
        
        # db.set(image_id, json.dumps(output))
        db.set(image_id, common.numpy_to_json(output))

      ## remove the set of images from our queue
      db.ltrim(REDISCFG['image_queue'], len(image_ids), -1)

    ## sleep for a small amount
    time.sleep(REDISCFG['server_sleep'])

  return
def evaluate(mode,
             cmdcfg,
             appcfg,
             modelcfg,
             dataset,
             datacfg,
             class_names,
             reportcfg,
             get_mask=True,
             auto_show=False):
    """API
  Execute the evaluation and generates the evaluation reports classification report
  with differet scores confusion matrix summary report

  Ref:
  https://github.com/matterport/Mask_RCNN/blob/master/samples/shapes/train_shapes.ipynb
  """
    log.info("---------------------------->")

    dnncfg = get_dnncfg(cmdcfg.config)

    log_dir_path = apputil.get_abs_path(appcfg, cmdcfg, 'AI_LOGS')
    cmdcfg['log_dir_path'] = log_dir_path
    model = load_model_and_weights(mode, cmdcfg, appcfg)

    save_viz_and_json = reportcfg['save_viz_and_json']
    evaluate_no_of_result = reportcfg['evaluate_no_of_result']
    filepath = reportcfg['filepath']
    evaluate_run_summary = reportcfg['evaluate_run_summary']

    log.info("evaluate_no_of_result: {}".format(evaluate_no_of_result))

    detection_on_dataset = []
    ## TODO: put at right place
    iou_threshold_input = reportcfg['iou_threshold']
    # iou_thresholds = None
    # iou_thresholds = iou_thresholds or np.arange(0.5, 1.0, 0.05)
    iou_thresholds = np.arange(0.5, 1.0, 0.05)
    size = len(iou_thresholds)
    _mAPs, _precisions, _recalls = size * [None], size * [None], size * [None]
    gt_total_annotation = 0
    pred_total_annotation = 0
    remaining_num_images = -1
    image_ids = dataset.image_ids
    num_images = len(image_ids)
    pred_match_total_annotation = []

    colors = viz.random_colors(len(class_names))
    log.info("len(colors), colors: {},{}".format(len(colors), colors))

    cc = dict(zip(class_names, colors))

    ## for some reasons if gets an error in iterating through dataset, all the hardwork is lost,
    ## therefore, save the data to disk in finally clause
    via_jsonres = {}
    imagelist = []

    class_ids_dataset = dataset.class_ids
    class_names_model = modelcfg['classes']
    class_ids_model = np.arange(len(class_names_model))

    log.debug("class_names dataset: {}".format(class_names))
    log.debug("class_names_model: {}".format(class_names_model))
    log.debug("class_ids_dataset: {}".format(class_ids_dataset))
    log.debug("class_ids_model: {}".format(class_ids_model))

    ## class_names consists of BG at index 0
    ## class_names_model consists of BG at index 0
    class_ids_map = False
    class_names_common = class_names.copy()
    if class_names != modelcfg['classes']:
        class_ids_map = True
        class_names_common, class_ids_common_dataset, class_ids_common_model, gt_to_model_map = class_ids_of_model_to_dataset(
            np.array(class_names), class_ids_dataset,
            np.array(class_names_model), class_ids_model)

        log.info("class_names_common: {}".format(class_names_common))
        log.info(
            "class_ids_common_dataset: {}".format(class_ids_common_dataset))
        log.info("class_ids_common_model: {}".format(class_ids_common_model))
        ## TODO: Exception handling: if class_ids_of_model_to_dataset length is 1 then only BG is common

    class_names = np.array(class_names)

    T0 = time.time()

    try:
        for i, image_id in enumerate(image_ids):
            log.debug("-------")

            filepath_image_in = dataset.image_reference(image_id)
            image_filename = filepath_image_in.split(os.path.sep)[-1]
            image_name_without_ext = image_filename.split('.')[0]

            imagelist.append(filepath_image_in)
            log.debug("Running on {}".format(image_filename))

            if evaluate_no_of_result == i:
                log.info("evaluate_no_of_result reached: i: {}\n".format(i))
                break

            remaining_num_images = evaluate_no_of_result if evaluate_no_of_result and evaluate_no_of_result > 0 else num_images
            remaining_num_images = remaining_num_images - i - 1
            log.info(
                "To be evaluated remaining_num_images:...................{}".
                format(remaining_num_images))

            t0 = time.time()

            # im, gt_class_ids, gt_boxes, gt_masks, gt_active_class_ids = load_image_gt_without_resizing(dataset, datacfg, dnncfg, image_id)
            im, gt_image_meta, gt_class_ids, gt_boxes, gt_masks = modellib.load_image_gt(
                dataset, datacfg, dnncfg, image_id, use_mini_mask=False)
            molded_images = np.expand_dims(modellib.mold_image(im, dnncfg), 0)

            if class_ids_map:
                log.debug("Before gt_class_id_map...:")
                log.debug(
                    "len(gt_class_ids): {}\nTotal Unique classes: len(set(gt_class_ids)): {}\ngt_class_ids: {}"
                    .format(len(gt_class_ids), len(set(gt_class_ids)),
                            gt_class_ids))

                for _i, gt_id in enumerate(gt_class_ids):
                    gt_class_ids[_i] = gt_to_model_map[gt_id]

                log.debug("After gt_class_id_map...:")
                log.debug(
                    "len(gt_class_ids): {}\nTotal Unique classes: len(set(gt_class_ids)): {}\ngt_class_ids: {}"
                    .format(len(gt_class_ids), len(set(gt_class_ids)),
                            gt_class_ids))

            t1 = time.time()
            time_taken_imread = (t1 - t0)
            log.debug('Total time taken in time_taken_imread: %f seconds' %
                      (time_taken_imread))

            gt_total_annotation += len(gt_class_ids)

            log.info("\nGround Truth-------->")

            log.info("i,image_id:{},{}".format(i, image_id))

            log.debug(
                "len(gt_boxes), gt_boxes.shape, type(gt_boxes): {},{},{}".
                format(len(gt_boxes), gt_boxes.shape, type(gt_boxes)))
            log.debug(
                "len(gt_masks), gt_masks.shape, type(gt_masks): {},{},{}".
                format(len(gt_masks), gt_masks.shape, type(gt_masks)))

            log.debug("gt_boxes: {}".format(gt_boxes))
            log.debug("gt_masks: {}".format(gt_masks))

            log.info("--------")

            # Detect objects
            ##---------------------------------------------
            t2 = time.time()

            r = detect(model, im=im, verbose=1)[0]
            ## N - total number of predictions
            ## (N, 4)
            pred_boxes = r['rois']
            ## (H, W, N)
            pred_masks = r['masks']
            ## N
            pred_class_ids = r['class_ids']
            ## N
            pred_scores = r['scores']

            log.debug("Prediction on Groud Truth-------->")
            log.debug('len(r): {}'.format(len(r)))
            log.debug("len(gt_class_ids), gt_class_ids: {},{}".format(
                len(gt_class_ids), gt_class_ids))
            log.debug(
                "len(pred_class_ids), pred_class_ids, type(pred_class_ids): {},{},{}"
                .format(len(pred_class_ids), pred_class_ids,
                        type(pred_class_ids)))
            log.debug("len(pred_scores), pred_scores: {},{}".format(
                len(pred_scores), pred_scores))
            log.debug(
                "len(pred_boxes), pred_boxes.shape, type(pred_boxes): {},{},{}"
                .format(len(pred_boxes), pred_boxes.shape, type(pred_boxes)))
            log.debug(
                "len(pred_masks), pred_masks.shape, type(pred_masks): {},{},{}"
                .format(len(pred_masks), pred_masks.shape, type(pred_masks)))
            log.debug("--------")

            if class_ids_map:
                pred_class_ids_common_model_indices = np.where(
                    np.in1d(pred_class_ids, class_ids_common_model))[0]
                class_ids_common_model_pred_class_ids_indices = np.where(
                    np.in1d(class_ids_common_model, pred_class_ids))[0]
                # pred_class_ids_common_dataset_indices = np.where(np.in1d(class_ids_common_dataset, pred_class_ids_common_model_indices))[0]

                pred_boxes = pred_boxes[pred_class_ids_common_model_indices]
                pred_masks = pred_masks[...,
                                        pred_class_ids_common_model_indices]
                pred_class_ids = pred_class_ids[
                    pred_class_ids_common_model_indices]

                pred_scores = pred_scores[pred_class_ids_common_model_indices]

                log.debug(
                    "Prediction on Groud Truth: After Model class filtering-------->"
                )
                log.debug('len(r): {}'.format(len(r)))
                log.debug(
                    "len(pred_class_ids_common_model_indices), pred_class_ids_common_model_indices: {},{}"
                    .format(len(pred_class_ids_common_model_indices),
                            pred_class_ids_common_model_indices))

                log.debug(
                    "len(class_ids_common_model_pred_class_ids_indices), class_ids_common_model_pred_class_ids_indices: {},{}"
                    .format(len(class_ids_common_model_pred_class_ids_indices),
                            class_ids_common_model_pred_class_ids_indices))

                log.debug(
                    "len(class_ids_common_dataset[class_ids_common_model_pred_class_ids_indices]), class_ids_common_dataset[class_ids_common_model_pred_class_ids_indices]: {},{}"
                    .format(
                        len(class_ids_common_dataset[
                            class_ids_common_model_pred_class_ids_indices]),
                        class_ids_common_dataset[
                            class_ids_common_model_pred_class_ids_indices]))

                log.debug("len(gt_class_ids), gt_class_ids: {},{}".format(
                    len(gt_class_ids), gt_class_ids))
                log.debug(
                    "len(pred_class_ids), pred_class_ids, type(pred_class_ids): {},{},{}"
                    .format(len(pred_class_ids), pred_class_ids,
                            type(pred_class_ids)))
                log.debug("len(pred_scores), pred_scores: {},{}".format(
                    len(pred_scores), pred_scores))
                log.debug(
                    "len(pred_boxes), pred_boxes.shape, type(pred_boxes): {},{},{}"
                    .format(len(pred_boxes), pred_boxes.shape,
                            type(pred_boxes)))
                log.debug(
                    "len(pred_masks), pred_masks.shape, type(pred_masks): {},{},{}"
                    .format(len(pred_masks), pred_masks.shape,
                            type(pred_masks)))
                log.debug("--------")

            pred_total_annotation += len(pred_class_ids)

            t3 = time.time()
            time_taken_in_detect = (t3 - t2)
            log.info('TIME_TAKEN_IN_DETECT:%f seconds' %
                     (time_taken_in_detect))

            t4 = time.time()

            ## TODO: gt via_json resp and pred via jsn res separate data strucure
            ## TODO: mAP calculation for per class and over enitre dataset

            ## TODO: this does not help; need to flatten the all ground truts for eniter dataset.
            ## np.zeros(len(gt_for_all_images)), ideally same number of predictions sohuld be there
            ## Insort; have to re-write the compute_matches function for entire dataset

            evaluate_run_summary['images'].append(image_filename)
            # evaluate_run_summary['gt_boxes'].append(gt_boxes)
            evaluate_run_summary['gt_class_ids'].append(list(gt_class_ids))
            # evaluate_run_summary['gt_masks'].append(gt_masks)
            # evaluate_run_summary['pred_boxes'].append(pred_boxes)
            evaluate_run_summary['pred_class_ids'].append(list(pred_class_ids))
            evaluate_run_summary['pred_scores'].append(list(pred_scores))
            # evaluate_run_summary['pred_masks'].append(pred_masks)
            evaluate_run_summary['gt_total_annotation_per_image'].append(
                len(gt_class_ids))
            evaluate_run_summary['pred_total_annotation_per_image'].append(
                len(pred_class_ids))

            detection_on_dataset_item = defaultdict(list)
            __pred_match_total_annotation = np.zeros([len(iou_thresholds)],
                                                     dtype=int)

            for count, iou_threshold in enumerate(iou_thresholds):
                log.info("count, iou_threshold: {}, {}".format(
                    count, iou_threshold))

                ## Compute Average Precision at a set IoU threshold
                ## --------------------------------------------
                AP_per_image, precisions, recalls, gt_match, pred_match, overlaps, pred_match_scores, pred_match_class_ids = utils.compute_ap(
                    gt_boxes,
                    gt_class_ids,
                    gt_masks,
                    pred_boxes,
                    pred_class_ids,
                    pred_scores,
                    pred_masks,
                    iou_threshold=iou_threshold)

                __pred_match_total_annotation[count] += len(
                    pred_match_class_ids)

                ## compute and returns f1 score metric
                ## --------------------------------------------
                f1_per_image = utils.compute_f1score(precisions, recalls)

                ## Compute the recall at the given IoU threshold. It's an indication
                ## of how many GT boxes were found by the given prediction boxes.
                ## --------------------------------------------
                recall_bbox, positive_ids_bbox = utils.compute_recall(
                    gt_boxes, pred_boxes, iou_threshold)

                # log.info("len(precisions),precisions: {},{}".format(len(precisions), precisions))
                # log.info("len(recalls),recalls: {},{}".format(len(recalls), recalls))
                # log.info("len(pred_match_class_ids),pred_match_class_ids: {},{}".format(len(pred_match_class_ids), pred_match_class_ids))

                # log.info("AP_per_image: {}".format(AP_per_image))
                # log.info("len(overlaps),overlaps: {},{}".format(len(overlaps), overlaps))

                pred_match_class_names = class_names[np.where(
                    np.in1d(dataset.class_ids, pred_match_class_ids))[0]]

                detection_on_dataset_item['ap_per_image'].append(AP_per_image)
                detection_on_dataset_item['f1_per_image'].append(f1_per_image)
                detection_on_dataset_item['precisions'].append(precisions)
                detection_on_dataset_item['recalls'].append(list(recalls))
                detection_on_dataset_item['recall_bbox'].append(recall_bbox)
                detection_on_dataset_item['positive_ids_bbox'].append(
                    list(positive_ids_bbox))
                detection_on_dataset_item['gt_match'].append(list(gt_match))
                detection_on_dataset_item['pred_match'].append(
                    list(pred_match))
                detection_on_dataset_item['pred_match_scores'].append(
                    list(pred_match_scores))
                detection_on_dataset_item['overlaps_mask_iou'].append(
                    list(overlaps))
                detection_on_dataset_item['pred_match_class_ids'].append(
                    list(pred_match_class_ids))
                detection_on_dataset_item['pred_match_class_names'].append(
                    list(pred_match_class_names))
                detection_on_dataset_item[
                    'pred_match_total_annotation'].append(
                        len(pred_match_class_ids))
                detection_on_dataset_item['iou_thresholds'].append(
                    iou_threshold)

                if save_viz_and_json and iou_threshold == float(
                        iou_threshold_input):
                    fext = ".png"
                    file_name = image_filename + fext
                    log.info("@IoU, SAVED_FILE_NAME: {},{}".format(
                        iou_threshold, file_name))
                    jsonres = viz.get_display_instances(im,
                                                        pred_boxes,
                                                        pred_masks,
                                                        pred_class_ids,
                                                        class_names_model,
                                                        pred_scores,
                                                        colors=cc,
                                                        show_bbox=True,
                                                        show_mask=True,
                                                        get_mask=get_mask,
                                                        filepath=filepath,
                                                        filename=file_name,
                                                        auto_show=auto_show)

                    ## Convert Json response to VIA Json response
                    ##---------------------------------------------
                    # size_image = 0
                    size_image = os.path.getsize(filepath_image_in)
                    jsonres["filename"] = image_filename
                    jsonres["size"] = size_image
                    jsonres['file_attributes']['iou'] = iou_threshold

                    ## TODO: if want to store in mongoDB, '.' (dot) should not be present in the key in the json data
                    ## but, to visualize the results in VIA tool, this (dot) and size is expected
                    # via_jsonres[image_filename.replace('.','-')+str(size_image)] = json.loads(common.numpy_to_json(jsonres))
                    via_jsonres[image_filename + str(size_image)] = json.loads(
                        common.numpy_to_json(jsonres))
                    # log.debug("jsonres: {}".format(jsonres))
                    # log.debug("via_jsonres[image_filename+str(size_image)]: {}".format(via_jsonres[image_filename+str(size_image)]))

            detection_on_dataset.append(detection_on_dataset_item)
            pred_match_total_annotation.append(__pred_match_total_annotation)

            mean_ap_of_per_image, mean_f1_of_per_image, mean_recall_bbox_of_per_image, total_pred_match_total_annotation_of_per_image = compute_ap_of_per_image_over_dataset(
                detection_on_dataset)

            ## TODO: use detection_on_dataset for evaluation over entire dataset and per class
            ## fix the TODO items within compute_ap_dataset
            # _mAPs, _precisions, _recalls = compute_ap_dataset(detection_on_dataset, iou_thresholds)

        log.info("---x-x---")
    except Exception as e:
        log.info("Exception: {}".format(e))
        log.error("Fatal error in main loop".format(e), exc_info=True)
        # log.error('Error occurred ' + str(e))
        raise
    finally:
        log.info("--------X--------X--------X--------")
        T1 = time.time()

        evaluate_run_summary['total_execution_time'] = T1 - T0

        evaluate_run_summary['mAP'] = _mAPs
        evaluate_run_summary['precision'] = _precisions
        evaluate_run_summary['recall'] = _recalls

        evaluate_run_summary['class_names_dataset'] = class_names
        evaluate_run_summary['class_ids_dataset'] = dataset.class_ids
        evaluate_run_summary['class_names_model'] = class_names_model
        evaluate_run_summary['class_ids_model'] = class_ids_model
        evaluate_run_summary['class_names_common'] = class_names_common

        evaluate_run_summary['mean_ap_of_per_image'] = mean_ap_of_per_image
        evaluate_run_summary['mean_f1_of_per_image'] = mean_f1_of_per_image
        evaluate_run_summary[
            'mean_recall_bbox_of_per_image'] = mean_recall_bbox_of_per_image
        evaluate_run_summary[
            'total_pred_match_total_annotation_of_per_image'] = total_pred_match_total_annotation_of_per_image

        evaluate_run_summary[
            'pred_match_total_annotation'] = pred_match_total_annotation
        evaluate_run_summary['gt_total_annotation'] = gt_total_annotation
        evaluate_run_summary['pred_total_annotation'] = pred_total_annotation
        evaluate_run_summary['iou_thresholds'] = iou_thresholds
        evaluate_run_summary['execution_end_time'] = "{:%d%m%y_%H%M%S}".format(
            datetime.datetime.now())
        # evaluate_run_summary['detection_min_confidence'] = dnncfg.config['DETECTION_MIN_CONFIDENCE']
        evaluate_run_summary['remaining_num_images'] = remaining_num_images

        log.debug("evaluate_run_summary: {}".format(evaluate_run_summary))

        classification_reportfile_path = reportcfg[
            'classification_reportfile'] + '-per_dataset.json'
        with open(classification_reportfile_path, 'w') as fw:
            fw.write(common.numpy_to_json(detection_on_dataset))

        evaluate_run_summary_reportfile_path = reportcfg[
            'evaluate_run_summary_reportfile'] + '.json'
        with open(evaluate_run_summary_reportfile_path, 'w') as fw:
            fw.write(common.numpy_to_json(evaluate_run_summary))

        ## Save the image list for loading the response in VIA along with the images
        imagelist_filepath = os.path.join(filepath, 'annotations',
                                          "imagelist.csv")
        pd.DataFrame(imagelist).to_csv(imagelist_filepath)

        ## https://stackoverflow.com/questions/12309269/how-do-i-write-json-data-to-a-file
        via_jsonres_filepath = os.path.join(filepath, 'annotations',
                                            "annotations.json")
        if via_jsonres and len(via_jsonres) > 0:
            with open(via_jsonres_filepath, 'w') as fw:
                fw.write(json.dumps(via_jsonres))

        print("EVALUATE_REPORT:ANNOTATION:{}".format(via_jsonres_filepath))
        print("EVALUATE_REPORT:IMAGELIST:{}".format(imagelist_filepath))
        print(
            "EVALUATE_REPORT:METRIC:{}".format(classification_reportfile_path))
        print("EVALUATE_REPORT:SUMMARY:{}".format(
            evaluate_run_summary_reportfile_path))

        log.info("--------")

        return evaluate_run_summary