示例#1
0
def deeptext_eval_test(result_path='', label_path='', img_path=''):
    eval_iter = 0

    print("\n========================================\n")
    print("Processing, please wait a moment.")
    max_num = 32

    pred_data = []
    files = os.listdir(label_path)
    for file in files:
        eval_iter = eval_iter + 1
        img_file = os.path.join(img_path,
                                file.split('gt_')[1].replace("txt", "jpg"))

        label_file = os.path.join(label_path, file)
        gt_bboxes, gt_labels = get_gt_bboxes_labels(label_file, img_file)

        gt_bboxes = np.array(gt_bboxes).astype(np.float32)

        all_bbox, all_label, all_mask = get_pred(file, result_path)
        all_label = all_label + 1

        for j in range(config.test_batch_size):
            all_bbox_squee = np.squeeze(all_bbox[j, :, :])
            all_label_squee = np.squeeze(all_label[j, :, :])
            all_mask_squee = np.squeeze(all_mask[j, :, :])

            all_bboxes_tmp_mask = all_bbox_squee[all_mask_squee, :]
            all_labels_tmp_mask = all_label_squee[all_mask_squee]

            if all_bboxes_tmp_mask.shape[0] > max_num:
                inds = np.argsort(-all_bboxes_tmp_mask[:, -1])
                inds = inds[:max_num]
                all_bboxes_tmp_mask = all_bboxes_tmp_mask[inds]
                all_labels_tmp_mask = all_labels_tmp_mask[inds]

            pred_data.append({
                "boxes": all_bboxes_tmp_mask,
                "labels": all_labels_tmp_mask,
                "gt_bboxes": gt_bboxes,
                "gt_labels": gt_labels
            })

    precisions, recalls = metrics(pred_data)
    print("\n========================================\n")
    for i in range(config.num_classes - 1):
        j = i + 1
        f1 = (2 * precisions[j] * recalls[j]) / (precisions[j] + recalls[j] +
                                                 1e-6)
        print("class {} precision is {:.2f}%, recall is {:.2f}%,"
              "F1 is {:.2f}%".format(j, precisions[j] * 100, recalls[j] * 100,
                                     f1 * 100))
        if config.use_ambigous_sample:
            break
示例#2
0
文件: eval.py 项目: xyg320/mindspore
def yolo_eval(dataset_path, ckpt_path):
    """Yolov3 evaluation."""

    ds = create_yolo_dataset(dataset_path, is_training=False)
    config = ConfigYOLOV3ResNet18()
    net = yolov3_resnet18(config)
    eval_net = YoloWithEval(net, config)
    print("Load Checkpoint!")
    param_dict = load_checkpoint(ckpt_path)
    load_param_into_net(net, param_dict)

    eval_net.set_train(False)
    i = 1.
    total = ds.get_dataset_size()
    start = time.time()
    pred_data = []
    print("\n========================================\n")
    print("total images num: ", total)
    print("Processing, please wait a moment.")
    for data in ds.create_dict_iterator():
        img_np = data['image']
        image_shape = data['image_shape']
        annotation = data['annotation']

        eval_net.set_train(False)
        output = eval_net(Tensor(img_np), Tensor(image_shape))
        for batch_idx in range(img_np.shape[0]):
            pred_data.append({
                "boxes": output[0].asnumpy()[batch_idx],
                "box_scores": output[1].asnumpy()[batch_idx],
                "annotation": annotation
            })
        percent = round(i / total * 100, 2)

        print('    %s [%d/%d]' % (str(percent) + '%', i, total), end='\r')
        i += 1
    print('    %s [%d/%d] cost %d ms' %
          (str(100.0) + '%', total, total, int((time.time() - start) * 1000)),
          end='\n')

    precisions, recalls = metrics(pred_data)
    print("\n========================================\n")
    for i in range(config.num_classes):
        print("class {} precision is {:.2f}%, recall is {:.2f}%".format(
            i, precisions[i] * 100, recalls[i] * 100))
示例#3
0
def Deeptext_eval_test(dataset_path='', ckpt_path=''):
    """Deeptext evaluation."""
    ds = create_deeptext_dataset(dataset_path, batch_size=config.test_batch_size,
                                 repeat_num=1, is_training=False)

    total = ds.get_dataset_size()
    net = Deeptext_VGG16(config)
    param_dict = load_checkpoint(ckpt_path)
    load_param_into_net(net, param_dict)
    net.set_train(False)
    eval_iter = 0

    print("\n========================================\n")
    print("Processing, please wait a moment.")
    max_num = 32

    pred_data = []
    for data in ds.create_dict_iterator():
        eval_iter = eval_iter + 1

        img_data = data['image']
        img_metas = data['image_shape']
        gt_bboxes = data['box']
        gt_labels = data['label']
        gt_num = data['valid_num']

        start = time.time()
        # run net
        output = net(img_data, img_metas, gt_bboxes, gt_labels, gt_num)
        gt_bboxes = gt_bboxes.asnumpy()

        gt_bboxes = gt_bboxes[gt_num.asnumpy().astype(bool), :]
        print(gt_bboxes)
        gt_labels = gt_labels.asnumpy()
        gt_labels = gt_labels[gt_num.asnumpy().astype(bool)]
        print(gt_labels)
        end = time.time()
        print("Iter {} cost time {}".format(eval_iter, end - start))

        # output
        all_bbox = output[0]
        all_label = output[1] + 1
        all_mask = output[2]

        for j in range(config.test_batch_size):
            all_bbox_squee = np.squeeze(all_bbox.asnumpy()[j, :, :])
            all_label_squee = np.squeeze(all_label.asnumpy()[j, :, :])
            all_mask_squee = np.squeeze(all_mask.asnumpy()[j, :, :])

            all_bboxes_tmp_mask = all_bbox_squee[all_mask_squee, :]
            all_labels_tmp_mask = all_label_squee[all_mask_squee]

            if all_bboxes_tmp_mask.shape[0] > max_num:
                inds = np.argsort(-all_bboxes_tmp_mask[:, -1])
                inds = inds[:max_num]
                all_bboxes_tmp_mask = all_bboxes_tmp_mask[inds]
                all_labels_tmp_mask = all_labels_tmp_mask[inds]

            pred_data.append({"boxes": all_bboxes_tmp_mask,
                              "labels": all_labels_tmp_mask,
                              "gt_bboxes": gt_bboxes,
                              "gt_labels": gt_labels})

            percent = round(eval_iter / total * 100, 2)

            print('    %s [%d/%d]' % (str(percent) + '%', eval_iter, total), end='\r')

    precisions, recalls = metrics(pred_data)
    print("\n========================================\n")
    for i in range(config.num_classes - 1):
        j = i + 1
        F1 = (2 *  precisions[j] * recalls[j]) / (precisions[j] + recalls[j] + 1e-6)
        print("class {} precision is {:.2f}%, recall is {:.2f}%,"
              "F1 is {:.2f}%".format(j, precisions[j] * 100, recalls[j] * 100, F1 * 100))
        if config.use_ambigous_sample:
            break
示例#4
0
                        logger.info(
                            "step: [{}/{}], epochs: [{}/{}], validation loss: {:.4f}".format(
                                step + 1,
                                len(validloader),
                                epoch + 1,
                                self.epochs,
                                valid_loss/50,
                            )
                        )
                        valid_loss = 0.
                    # ==============================================
            
            # metric e.g.: row-wise micro averaged F1 score
            y_pred = np.asarray(y_pred, dtype=np.float32)
            y_true = np.asarray(y_true, dtype=np.float32)
            micro_avg_f1, cls_report, mAP, auc_score = metrics(y_true=y_true, y_pred=y_pred, show_report=self.show_report, threshold=self.threshold)
            
            scheduler.step() # call/update the scheduler

            if self.show_report: logger.info("==> classification Report: \n{}".format(cls_report))
            logger.info("==> mAP : {}".format(mAP))
            logger.info("==> AUC-ROC score : {}".format(auc_score))
            logger.info('==> epoch: [{}/{}], validation F1-score: {:.6f}'.format(epoch+1, self.epochs, micro_avg_f1))
            avg_valid_loss.append(np.mean(valid_epoch_loss)) # update average validation loss

            self.writer.add_scalar('loss/validation', avg_valid_loss[-1], epoch+1)
            self.writer.add_scalar('mAP', mAP, epoch+1)
            self.writer.add_scalar('AUC_ROC', auc_score, epoch+1)
            self.writer.add_scalar('valid F1', micro_avg_f1, epoch+1)
            # save model if validation loss has decreased
            if micro_avg_f1 >= self.best_f1:
示例#5
0
    for line in open(args.anno_path):
        line_list = line.split(' ')
        line_list[0] = line_list[0].split('/')[-1]
        anno_dict[line_list[0]] = line_list[1:]

    pred_data = []
    for key in anno_dict:
        result0 = os.path.join(args.result_path, key.split('.')[0] + '_0.bin')
        result1 = os.path.join(args.result_path, key.split('.')[0] + '_1.bin')
        output0 = np.fromfile(result0, np.float32).reshape(batchsize, 13860, 4)
        output1 = np.fromfile(result1, np.float32).reshape(batchsize, 13860, 2)

        anno_list = []
        for v in anno_dict[key]:
            v_list = v.split(',')
            anno_list.append(v_list)
        annotation = np.array(anno_list, np.int64)

        for batch_idx in range(batchsize):
            pred_data.append({
                "boxes": output0[batch_idx],
                "box_scores": output1[batch_idx],
                "annotation": annotation
            })

    precisions, recalls = metrics(pred_data)
    print("\n========================================\n")
    for i in range(config.num_classes):
        print("class {} precision is {:.2f}%, recall is {:.2f}%".format(
            i, precisions[i] * 100, recalls[i] * 100))