def net_inference(model, reqs): CTX.logger.info("inference begin...") # datas = json.loads(args) predictor = model['predictor'] classes_dict = model['labels']['class'] # threshold uses for default threshold_dict = model['labels']['minMt'] # minModelThreshold rets = [] nms = py_nms_wrapper(config.TEST.NMS) box_voting = py_box_voting_wrapper(config.TEST.BOX_VOTING_IOU_THRESH, config.TEST.BOX_VOTING_SCORE_THRESH, with_nms=True) try: for data in reqs: try: im = load_image(data['data']['uri'], body=data['data']['body']) except ErrorBase as e: rets.append({"code":e.code, "message": e.message, "result": None}) continue # return [], 400, 'load image error' if im.shape[0] > im.shape[1]: long_side, short_side = im.shape[0], im.shape[1] else: long_side, short_side = im.shape[1], im.shape[0] if short_side > 0 and float(long_side)/float(short_side) > 50.0: msg = "aspect ration is too large, long_size:short_side should not larger than 50.0" # raise ErrorBase.__init__(400, msg) rets.append({"code": 400, "message": msg, "result": None}) continue data_batch, data_names, im_scale = generate_batch(im) scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, im_scale, config) det_ret = [] # labels.csv file not include background for cls_index in sorted(classes_dict.keys()): cls_ind = cls_index cls_name = classes_dict.get(cls_ind) cls_boxes = boxes[0][:, 4:8] if config.CLASS_AGNOSTIC else boxes[0][:, 4 * cls_ind:4 *4 * (cls_ind + 1)] cls_scores = scores[0][:, cls_ind, np.newaxis] threshold = float(threshold_dict[cls_ind]) keep = np.where(cls_scores > threshold)[0] dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :] keep = nms(dets) det_ret.extend(_build_result(det, cls_name, cls_ind, model['labels']) for det in dets[keep, :]) # get review value rets.append(dict(code=0,message='',result=dict(detections=det_ret))) except Exception as e: # print(traceback.format_exc()) CTX.logger.info("inference error:%s"%(traceback.format_exc())) return [], 599, str(e) return rets, 0, ''
def demo_net(cfg,predictor, dataset, image_set, root_path, dataset_path, thresh, vis=False, use_box_voting=False, test_file='test.txt',out_prefix='output',vis_image_dir='vis'): """ generate data_batch -> im_detect -> post process :param predictor: Predictor :param image_name: image name :param vis: will save as a new image if not visualized :return: None """ # visualization nms = py_nms_wrapper(config.TEST.NMS) box_voting = py_box_voting_wrapper(config.TEST.BOX_VOTING_IOU_THRESH, config.TEST.BOX_VOTING_SCORE_THRESH, with_nms=True) with open(test_file) as f: image_set_index = [x.strip().split(' ')[0] for x in f.readlines()] num_images = len(image_set_index) num_classes = len(CLASSES) all_boxes = [[[] for _ in xrange(num_images)] for _ in xrange(num_classes)] out_score_list,out_json_list = [],[] jsonlistwithscore ,jsonlistwithoutscore = [[] for _ in xrange(num_images)],[[] for _ in xrange(num_images)] predict_url(cfg, predictor, image_set_index, nms, box_voting, all_boxes, jsonlistwithscore, jsonlistwithoutscore, thresh, vis=False, use_box_voting=False, num_gpu=1) fout = open(out_prefix + '_vali.txt','w') fout_score = open(out_prefix + '_vali_score.txt', 'w') for i in range(num_images): fout.write(json.dumps(jsonlistwithoutscore[i]) + '\n') fout.flush() fout_score.write(json.dumps(jsonlistwithscore[i]) + '\n') fout_score.flush() print("num of images: detection:{}, gt:{}".format(len(all_boxes[0]), num_images)) #assert len(all_boxes) == num_images, 'calculations not complete' # save results cache_folder = os.path.join(root_path, 'cache') if not os.path.exists(cache_folder): os.mkdir(cache_folder) cache_file = os.path.join(cache_folder, dataset + '_' + image_set + '_' + out_prefix + '_detections.pkl') with open(cache_file, 'wb') as f: cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)
def demo_net(predictor, dataset, image_set, root_path, dataset_path, thresh, vis=False, vis_image_dir='vis', use_box_voting=False): """ generate data_batch -> im_detect -> post process :param predictor: Predictor :param image_name: image name :param vis: will save as a new image if not visualized :return: None """ # visualization nms = py_nms_wrapper(config.TEST.NMS) box_voting = py_box_voting_wrapper(config.TEST.BOX_VOTING_IOU_THRESH, config.TEST.BOX_VOTING_SCORE_THRESH, with_nms=True) image_set_index_file = os.path.join(dataset_path, 'DET', 'ImageSets', 'DET', image_set + '.txt') assert os.path.exists( image_set_index_file), image_set_index_file + ' not found' with open(image_set_index_file) as f: image_set_index = [x.strip().split(' ')[0] for x in f.readlines()] num_images = len(image_set_index) num_classes = len(CLASSES) all_boxes = [[[] for _ in xrange(num_images)] for _ in xrange(num_classes)] i = 0 for index in image_set_index: image_file = image_path_from_index(index, dataset_path, image_set) print("processing {}/{} image:{}".format(i, num_images, image_file)) im = cv2.imread(image_file) data_batch, data_names, im_scale = generate_batch(im) scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, im_scale, config) for cls in CLASSES: cls_ind = CLASSES.index(cls) #print cls_ind, 4 * cls_ind, 4 * (cls_ind + 1), boxes[0], boxes[0][:, 4 * cls_ind:4 * (cls_ind + 1)] #cls_boxes = boxes[0][:, 4 * cls_ind:4 * (cls_ind + 1)] cls_boxes = boxes[0][:, 4:8] if config.CLASS_AGNOSTIC else boxes[ 0][:, 4 * cls_ind:4 * (cls_ind + 1)] cls_scores = scores[0][:, cls_ind, np.newaxis] keep = np.where(cls_scores >= thresh)[0] cls_dets = np.hstack( (cls_boxes, cls_scores)).astype(np.float32)[keep, :] keep = nms(cls_dets) # apply box voting after nms if use_box_voting: nms_cls_dets = cls_dets[keep, :] all_boxes[cls_ind][i] = box_voting(nms_cls_dets, cls_dets) else: all_boxes[cls_ind][i] = cls_dets[keep, :] boxes_this_image = [[]] + [ all_boxes[j][i] for j in xrange(1, len(CLASSES)) ] i += 1 if vis: #vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale) if not os.path.exists(vis_image_dir): os.mkdir(vis_image_dir) result_file = os.path.join( vis_image_dir, index.strip().split('/')[-1] + '_result' + '.JPEG') print('results saved to %s' % result_file) im = draw_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale) im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) cv2.imwrite(result_file, im) print("num of images: detection:{}, gt:{}".format(len(all_boxes[0]), num_images)) #assert len(all_boxes) == num_images, 'calculations not complete' # save results cache_folder = os.path.join(root_path, 'cache') if not os.path.exists(cache_folder): os.mkdir(cache_folder) cache_file = os.path.join(cache_folder, dataset + '_' + image_set + '_detections.pkl') with open(cache_file, 'wb') as f: cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)
def net_inference(model): """ generate data_batch -> im_detect -> post process :param predictor: Predictor :param image_name: image name :return: None """ # datas = json.loads(args) predictor = model['predictor'] classes = model['classes'] threshold = model['threshold'] thresholds = model['thresholds'] rets = [] nms = py_nms_wrapper(config.TEST.NMS) box_voting = py_box_voting_wrapper(config.TEST.BOX_VOTING_IOU_THRESH, config.TEST.BOX_VOTING_SCORE_THRESH, with_nms=True) try: time_str = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) fileOp = "/tmp/eval/init/20170930_guns_1083-begin-" + time_str + '.csv' fileOp_op = open(fileOp, 'w') time_str = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) fileOp_1 = "/tmp/eval/init/20170930_guns_1083-end-" + time_str + '.csv' fileOp_1_op = open(fileOp_1, 'w') fileOp_2 = "/tmp/eval/init/20170930_guns_1083-image-" + time_str + '.jpg' for i in sorted(os.listdir("/tmp/eval/init/images")): imageFile = os.path.join("/tmp/eval/init/images", i) try: im = load_image(imageFile, 50.0) # cv2.imwrite('fileOp_2', im) print(im[0, :]) # np.savetxt(fileOp_2, im, delimiter=",") except ErrorBase as e: rets.append({"code": e.code, "message": e.message}) continue data_batch, data_names, im_scale = generate_batch(im) print("*" * 100) scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, im_scale, config) det_ret = [] for cls_index, cls in enumerate(classes[1:], start=1): if len(cls) > 1: cls_ind = int(cls[0]) cls_name = cls[1] else: cls_ind = cls_index cls_name = cls[0] cls_boxes = boxes[0][:, 4:8] if config.CLASS_AGNOSTIC else boxes[ 0][:, 4 * cls_ind:4 * (cls_ind + 1)] cls_scores = scores[0][:, cls_ind, np.newaxis] if len(classes) <= len(thresholds): threshold = thresholds[cls_ind] keep = np.where(cls_scores >= threshold)[0] dets = np.hstack( (cls_boxes, cls_scores)).astype(np.float32)[keep, :] if "20170930_guns_1083.jpg" in imageFile: # print(dets) # print('*'*100) # for i in dets: # fileOp_op.write(i) # fileOp_op.write('\n') np.savetxt(fileOp, dets, delimiter=",") pass keep = nms(dets) if "20170930_guns_1083.jpg" in imageFile: # print("end"*10) # print(dets[keep, :]) # print('*'*100) # for i in dets: # fileOp_1_op.write(i) # fileOp_1_op.write('\n') np.savetxt(fileOp_1, dets[keep, :], delimiter=",") det_ret.extend( _build_result(det, cls_name, cls_ind) for det in dets[keep, :]) rets.append( dict(code=0, message=imageFile, result=json.dumps(dict(detections=det_ret)))) except Exception, e: print(traceback.format_exc())