predicted_class = self.class_names[int(c)] score = str(top_conf[i]) top, left, bottom, right = boxes[i] f.write("%s %s %s %s %s %s\n" % (predicted_class, score[:6], str(int(left)), str( int(top)), str(int(right)), str(int(bottom)))) f.close() return centernet = mAP_CenterNet() image_ids = open( 'VOCdevkit/VOC2007/ImageSets/Main/test.txt').read().strip().split() if not os.path.exists("./input"): os.makedirs("./input") if not os.path.exists("./input/detection-results"): os.makedirs("./input/detection-results") if not os.path.exists("./input/images-optional"): os.makedirs("./input/images-optional") for image_id in tqdm(image_ids): image_path = "./VOCdevkit/VOC2007/JPEGImages/" + image_id + ".jpg" image = Image.open(image_path) # image.save("./input/images-optional/"+image_id+".jpg") centernet.detect_image(image_id, image) print("Conversion completed!")
right = min(image.size[0], np.floor(right + 0.5).astype('int32')) result["image_id"] = int(image_id) result["category_id"] = clsid2catid[c] result["bbox"] = [ float(left), float(top), float(right - left), float(bottom - top) ] result["score"] = float(top_conf[i]) results.append(result) return results centernet = mAP_CenterNet() jpg_names = os.listdir("./coco_dataset/val2017") with open("./coco_dataset/eval_results.json", "w") as f: results = [] for jpg_name in tqdm(jpg_names): if jpg_name.endswith("jpg"): image_path = "./coco_dataset/val2017/" + jpg_name image = Image.open(image_path) # 开启后在之后计算mAP可以可视化 results = centernet.detect_image( jpg_name.split(".")[0], image, results) json.dump(results, f)