def eval_list(cfgfile, weightfile, imglist): #m = TinyYoloFace14Net() #m.eval() #m.load_darknet_weights(tiny_yolo_weight) m = Darknet(cfgfile) m.eval() m.load_weights(weightfile) eval_wid = m.width eval_hei = m.height use_cuda = 1 if use_cuda: m.cuda() conf_thresh = 0.25 nms_thresh = 0.4 iou_thresh = 0.5 min_box_scale = 8. / m.width with open(imglist) as fp: lines = fp.readlines() total = 0.0 proposals = 0.0 correct = 0.0 lineId = 0 avg_iou = 0.0 for line in lines: img_path = line.rstrip() if img_path[0] == '#': continue lineId = lineId + 1 lab_path = img_path.replace('images', 'labels') lab_path = lab_path.replace('JPEGImages', 'labels') lab_path = lab_path.replace('.jpg', '.txt').replace('.png', '.txt') #truths = read_truths(lab_path) truths = read_truths_args(lab_path, min_box_scale) #print(truths) img = Image.open(img_path).convert('RGB').resize((eval_wid, eval_hei)) boxes = do_detect(m, img, conf_thresh, nms_thresh, use_cuda) if False: savename = "tmp/%06d.jpg" % (lineId) print("save %s" % savename) plot_boxes(img, boxes, savename) total = total + truths.shape[0] for i in range(len(boxes)): if boxes[i][4] > conf_thresh: proposals = proposals + 1 for i in range(truths.shape[0]): box_gt = [ truths[i][1], truths[i][2], truths[i][3], truths[i][4], 1.0 ] best_iou = 0 for j in range(len(boxes)): iou = bbox_iou(box_gt, boxes[j], x1y1x2y2=False) best_iou = max(iou, best_iou) if best_iou > iou_thresh: avg_iou += best_iou correct = correct + 1 precision = 1.0 * correct / proposals recall = 1.0 * correct / total fscore = 2.0 * precision * recall / (precision + recall) print("%d IOU: %f, Recal: %f, Precision: %f, Fscore: %f\n" % (lineId - 1, avg_iou / correct, recall, precision, fscore))
def valid(datacfg, cfgfile, weightfile, outfile): options = read_data_cfg(datacfg) valid_images = options['valid'] # backup = cfg.backup backup = weightfile.split('/')[-2] ckpt = weightfile.split('/')[-1].split('.')[0] prefix = 'results/' + backup.split('/')[-1] + '/e' + ckpt print('saving to: ' + prefix) names = cfg.classes with open(valid_images) as fp: tmp_files = fp.readlines() valid_files = [item.rstrip() for item in tmp_files] m = Darknet(cfgfile) m.print_network() m.load_weights(weightfile) m.cuda() m.eval() valid_dataset = dataset.listDataset(valid_images, shape=(m.width, m.height), shuffle=False, transform=transforms.Compose([ transforms.ToTensor(), ])) valid_batchsize = 2 assert(valid_batchsize > 1) kwargs = {'num_workers': 4, 'pin_memory': True} valid_loader = torch.utils.data.DataLoader( valid_dataset, batch_size=valid_batchsize, shuffle=False, **kwargs) fps = [0]*m.num_classes if not os.path.exists(prefix): # os.mkdir(prefix) os.makedirs(prefix) for i in range(m.num_classes): buf = '%s/%s%s.txt' % (prefix, outfile, names[i]) fps[i] = open(buf, 'w') lineId = -1 conf_thresh = 0.005 nms_thresh = 0.45 for batch_idx, (data, target) in enumerate(valid_loader): data = data.cuda() data = Variable(data, volatile = True) output = m(data).data batch_boxes = get_region_boxes(output, conf_thresh, m.num_classes, m.anchors, m.num_anchors, 0, 1) for i in range(output.size(0)): lineId = lineId + 1 fileId = os.path.basename(valid_files[lineId]).split('.')[0] width, height = get_image_size(valid_files[lineId]) print(valid_files[lineId]) boxes = batch_boxes[i] boxes = nms(boxes, nms_thresh) for box in boxes: x1 = (box[0] - box[2]/2.0) * width y1 = (box[1] - box[3]/2.0) * height x2 = (box[0] + box[2]/2.0) * width y2 = (box[1] + box[3]/2.0) * height det_conf = box[4] # import pdb # pdb.set_trace() for j in range((len(box)-5)/2): cls_conf = box[5+2*j] cls_id = box[6+2*j] prob =det_conf * cls_conf fps[cls_id].write('%s %f %f %f %f %f\n' % (fileId, prob, x1, y1, x2, y2)) # fps[cls_id].write('%s %f %f %f %f %f %f\n' % (fileId, det_conf, cls_conf, x1, y1, x2, y2)) for i in range(m.num_classes): fps[i].close()