def load_image(self, image_index): path = os.path.join(self.images_path, self.image_names[image_index] + '.jpg') return read_image_bgr(path)
# test_img_list = test_img_list[:1000] print('数据集{}总数:{}'.format(current_config.voc_sub_dir, len(test_img_list))) for id, imgf in enumerate(test_img_list): # imgfp = os.path.join(config.test_images_path, imgf) imgfp = imgf['filepath'] # if test_img_list[id]['filename'] == '000227.jpg': # print(id, test_img_list[id]) # exit() if os.path.isfile(imgfp): try: img = read_image_bgr(imgfp) except: continue img = preprocess_image(img.copy()) img, scale = resize_image(img, min_side=config.img_min_size, max_side=config.img_max_size) orig_image = read_image_rgb(imgfp) _, _, detections = model.predict_on_batch(np.expand_dims(img, axis=0)) detections[:, :, 0] = np.maximum(0, detections[:, :, 0]) detections[:, :, 1] = np.maximum(0, detections[:, :, 1]) detections[:, :, 2] = np.minimum(img.shape[1], detections[:, :, 2])
def inference(): set_runtime_environment() start_time = time.time() config = Config('configRetinaNet.json') wpath = config.trained_weights_path result_path = config.test_result_path txt_path = result_path.replace('results', 'txt') classes = config.classes if not os.path.exists(result_path): os.makedirs(result_path) if not os.path.exists(txt_path): os.makedirs(txt_path) model, _ = resnet_retinanet(len(classes), backbone=config.type, weights='imagenet', nms=True, config=config) model.load_weights(wpath, by_name=True, skip_mismatch=True) files = sorted(os.listdir(config.test_images_path)) for nimage, imgf in enumerate(files): # if imgf not in ['ship0201606110201801.jpg', 'ship0201606110201902.jpg', 'ship02016061102012014.jpg', 'ship0201711030202902.jpg']: # continue # if nimage >= int(len(files) * 0.1): # break imgfp = os.path.join(config.test_images_path, imgf) if os.path.isfile(imgfp): try: img = read_image_bgr(imgfp) except: continue img = preprocess_image(img.copy()) img, scale = resize_image(img, min_side=config.img_min_size, max_side=config.img_max_size) _, _, detections = model.predict_on_batch( np.expand_dims(img, axis=0)) # bbox要取到边界内 detections[:, :, 0] = np.maximum(0, detections[:, :, 0]) detections[:, :, 1] = np.maximum(0, detections[:, :, 1]) detections[:, :, 2] = np.minimum(img.shape[1], detections[:, :, 2]) detections[:, :, 3] = np.minimum(img.shape[0], detections[:, :, 3]) detections[0, :, :4] /= scale scores = detections[0, :, 4:] # 推测置信度 indices = np.where(detections[0, :, 4:] >= 0.05) scores = scores[indices] scores_sort = np.argsort(-scores)[:100] image_boxes = detections[0, indices[0][scores_sort], :4] image_scores = np.expand_dims( detections[0, indices[0][scores_sort], 4 + indices[1][scores_sort]], axis=1) image_detections = np.append(image_boxes, image_scores, axis=1) image_predicted_labels = indices[1][scores_sort] txtfile = imgf.replace('.jpg', '.txt') realpath = os.path.join(txt_path, txtfile) f = open(realpath, 'w', encoding='utf-8') if len(image_boxes) > 0: for i, box in enumerate(image_boxes): xmin = int(box[0]) ymin = int(box[1]) xmax = int(box[2]) ymax = int(box[3]) # print(xmin, ymin, xmax, ymax) f.write('{} {} {} {} {} {}\n'.format( classes[image_predicted_labels[i]], xmin, ymin, xmax, ymax, image_scores[i][0])) f.close() print("生成txt '" + txtfile + "'" + ' time:{}, 目标框:{}'.format(time.time() - start_time, len(image_boxes))) start_time = time.time()