def Val_Dataset(self, root_dir, coco_dir, img_dir, set_dir):
        self.system_dict["dataset"]["val"]["status"] = True
        self.system_dict["dataset"]["val"]["root_dir"] = root_dir
        self.system_dict["dataset"]["val"]["coco_dir"] = coco_dir
        self.system_dict["dataset"]["val"]["img_dir"] = img_dir
        self.system_dict["dataset"]["val"]["set_dir"] = set_dir

        self.system_dict["local"]["val_params"] = {
            "batch_size": self.system_dict["params"]["batch_size"],
            "shuffle": False,
            "drop_last": False,
            "collate_fn": collater,
            "num_workers": self.system_dict["params"]["num_workers"]
        }

        self.system_dict["local"]["val_set"] = CocoDataset(
            root_dir=self.system_dict["dataset"]["val"]["root_dir"] + "/" +
            self.system_dict["dataset"]["val"]["coco_dir"],
            img_dir=self.system_dict["dataset"]["val"]["img_dir"],
            set_dir=self.system_dict["dataset"]["val"]["set_dir"],
            transform=transforms.Compose([Normalizer(),
                                          Resizer()]))

        self.system_dict["local"]["test_generator"] = DataLoader(
            self.system_dict["local"]["val_set"],
            **self.system_dict["local"]["val_params"])
Пример #2
0
def test(opt):
    model = torch.load(opt.pretrained_model).module
    model.cuda()
    dataset = CocoDataset(opt.data_path,
                          set='val2017',
                          transform=transforms.Compose(
                              [Normalizer(), Resizer()]))

    if os.path.isdir(opt.output):
        shutil.rmtree(opt.output)
    os.makedirs(opt.output)

    for index in range(len(dataset)):
        data = dataset[index]
        scale = data['scale']
        image_info = dataset.coco.loadImgs(dataset.image_ids[index])[0]
        with torch.no_grad():
            tb = datetime.now()
            scores, labels, boxes = model(data['img'].cuda().permute(
                2, 0, 1).float().unsqueeze(dim=0))
            boxes /= scale
            te = datetime.now()
            print(te, image_info['file_name'] + " cost " + str(te - tb))

        if boxes.shape[0] > 0:
            #image_info = dataset.coco.loadImgs(dataset.image_ids[index])[0]
            #path = os.path.join(dataset.root_dir, 'images', dataset.set_name, image_info['file_name'])
            path = os.path.join(dataset.root_dir, dataset.set_name,
                                image_info['file_name'])
            print("read from ", path)
            output_image = cv2.imread(path)

            for box_id in range(boxes.shape[0]):
                pred_prob = float(scores[box_id])
                if pred_prob < opt.cls_threshold:
                    break
                pred_label = int(labels[box_id])
                xmin, ymin, xmax, ymax = boxes[box_id, :]
                color = colors[pred_label]
                cv2.rectangle(output_image, (xmin, ymin), (xmax, ymax), color,
                              2)
                text_size = cv2.getTextSize(
                    COCO_CLASSES[pred_label] + ' : %.2f' % pred_prob,
                    cv2.FONT_HERSHEY_PLAIN, 1, 1)[0]

                cv2.rectangle(
                    output_image, (xmin, ymin),
                    (xmin + text_size[0] + 3, ymin + text_size[1] + 4), color,
                    -1)
                cv2.putText(output_image,
                            COCO_CLASSES[pred_label] + ' : %.2f' % pred_prob,
                            (xmin, ymin + text_size[1] + 4),
                            cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)

            cv2.imwrite(
                "{}/{}_prediction.jpg".format(opt.output,
                                              image_info["file_name"][:-4]),
                output_image)
    def Train_Dataset(self,
                      root_dir,
                      coco_dir,
                      img_dir,
                      set_dir,
                      batch_size=8,
                      image_size=512,
                      use_gpu=True,
                      num_workers=3):
        self.system_dict["dataset"]["train"]["root_dir"] = root_dir
        self.system_dict["dataset"]["train"]["coco_dir"] = coco_dir
        self.system_dict["dataset"]["train"]["img_dir"] = img_dir
        self.system_dict["dataset"]["train"]["set_dir"] = set_dir

        self.system_dict["params"]["batch_size"] = batch_size
        self.system_dict["params"]["image_size"] = image_size
        self.system_dict["params"]["use_gpu"] = use_gpu
        self.system_dict["params"]["num_workers"] = num_workers

        if (self.system_dict["params"]["use_gpu"]):
            if torch.cuda.is_available():
                self.system_dict["local"][
                    "num_gpus"] = torch.cuda.device_count()
                torch.cuda.manual_seed(123)
            else:
                torch.manual_seed(123)

        self.system_dict["local"]["training_params"] = {
            "batch_size":
            self.system_dict["params"]["batch_size"] *
            self.system_dict["local"]["num_gpus"],
            "shuffle":
            True,
            "drop_last":
            True,
            "collate_fn":
            collater,
            "num_workers":
            self.system_dict["params"]["num_workers"]
        }

        self.system_dict["local"]["training_set"] = CocoDataset(
            root_dir=self.system_dict["dataset"]["train"]["root_dir"] + "/" +
            self.system_dict["dataset"]["train"]["coco_dir"],
            img_dir=self.system_dict["dataset"]["train"]["img_dir"],
            set_dir=self.system_dict["dataset"]["train"]["set_dir"],
            transform=transforms.Compose(
                [Normalizer(), Augmenter(),
                 Resizer()]))

        self.system_dict["local"]["training_generator"] = DataLoader(
            self.system_dict["local"]["training_set"],
            **self.system_dict["local"]["training_params"])
Пример #4
0
def test(opt):
    #model = torch.load(opt.pretrained_model).module
    model_path = "./trained_models/best_75.pth"

    model = EfficientDet(num_classes=1)

    checkpoint = torch.load(model_path).module

    model.load_state_dict(checkpoint.state_dict())
    model.cuda()

    dataset = LGSVLDataset(opt.data_path,
                           image_sets='test',
                           transform=transforms.Compose(
                               [Normalizer(), Resizer()]))

    if os.path.isdir(opt.output):
        shutil.rmtree(opt.output)
    os.makedirs(opt.output)

    total_time = 0.0
    #for index in range(len(dataset)):
    for index in range(1000):
        data = dataset[index]
        scale = data['scale']

        #start = time.time()
        #with torch.no_grad():
        #scores, labels, boxes = model(data['img'].cuda().permute(2, 0, 1).float().unsqueeze(dim=0))
        #end = time.time()
        #total_time += end - start

        #print("Total time", total_time)
        #print("Mean time", total_time / 1000.0)

        #if dataset.ids[index] != "1585019926_4100":
        #    continue

        with torch.no_grad():
            #scores, labels, boxes = model(data['img'].cuda().permute(2, 0, 1).float().unsqueeze(dim=0))
            classification, transformed_anchors, scores_over_thresh, scores = model(
                data['img'].cuda().permute(2, 0, 1).float().unsqueeze(dim=0))

            #print(scores_over_thresh.sum())

            if scores_over_thresh.sum() == 0:
                continue

            classification = classification[:, scores_over_thresh, :]
            #print(classification.size())
            transformed_anchors = transformed_anchors[:, scores_over_thresh, :]
            scores = scores[:, scores_over_thresh, :]

            anchors_nms_idx = nms(
                torch.cat([transformed_anchors, scores], dim=2)[0, :, :], 0.5)

            #print("adsgsadgsadg", classification[0, anchors_nms_idx, :])

            #print(classification[0, anchors_nms_idx, :].size())
            scores, labels = classification[0, anchors_nms_idx, :].max(dim=1)
            #print(scores.size())
            #print(labels.size())

            #print("klklklkllkgdlj", classification[0, anchors_nms_idx, :].max(dim=1))

            boxes = transformed_anchors[0, anchors_nms_idx, :]

            boxes /= scale

        #print(boxes.shape)
        if boxes.shape[0] > 0:
            #image_info = dataset.coco.loadImgs(dataset.image_ids[index])[0]
            #path = os.path.join(dataset.root_dir, 'images', dataset.set_name, image_info['file_name'])
            path = os.path.join(dataset.root, 'rgb',
                                dataset.ids[index] + '.png')
            output_image = cv2.imread(path)
            print(path)

            for box_id in range(boxes.shape[0]):
                pred_prob = float(scores[box_id])
                if pred_prob < opt.cls_threshold:
                    break
                pred_label = int(labels[box_id])
                xmin, ymin, xmax, ymax = boxes[box_id, :]
                color = colors[pred_label]
                cv2.rectangle(output_image, (xmin, ymin), (xmax, ymax), color,
                              2)
                text_size = cv2.getTextSize(
                    COCO_CLASSES[pred_label] + ' : %.2f' % pred_prob,
                    cv2.FONT_HERSHEY_PLAIN, 1, 1)[0]

                cv2.rectangle(
                    output_image, (xmin, ymin),
                    (xmin + text_size[0] + 3, ymin + text_size[1] + 4), color,
                    -1)
                cv2.putText(output_image,
                            COCO_CLASSES[pred_label] + ' : %.2f' % pred_prob,
                            (xmin, ymin + text_size[1] + 4),
                            cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)

            cv2.imwrite(
                "{}/{}_prediction.png".format(opt.output, dataset.ids[index]),
                output_image)
Пример #5
0
def train(opt):
    num_gpus = 1
    if torch.cuda.is_available():
        num_gpus = torch.cuda.device_count()
        torch.cuda.manual_seed(123)
    else:
        torch.manual_seed(123)

    training_params = {
        "batch_size": opt.batch_size * num_gpus,
        "shuffle": True,
        "drop_last": True,
        "collate_fn": collater,
        "num_workers": 12
    }

    test_params = {
        "batch_size": opt.batch_size,
        "shuffle": False,
        "drop_last": False,
        "collate_fn": collater,
        "num_workers": 12
    }

    training_set = CocoDataset(root_dir=opt.data_path,
                               set="train2017",
                               transform=transforms.Compose(
                                   [Normalizer(),
                                    Augmenter(),
                                    Resizer()]))
    training_generator = DataLoader(training_set, **training_params)

    test_set = CocoDataset(root_dir=opt.data_path,
                           set="val2017",
                           transform=transforms.Compose(
                               [Normalizer(), Resizer()]))
    test_generator = DataLoader(test_set, **test_params)

    model = EfficientDet(num_classes=training_set.num_classes())

    if os.path.isdir(opt.log_path):
        shutil.rmtree(opt.log_path)
    os.makedirs(opt.log_path)

    if not os.path.isdir(opt.saved_path):
        os.makedirs(opt.saved_path)

    writer = SummaryWriter(opt.log_path)
    if torch.cuda.is_available():
        model = model.cuda()
        model = nn.DataParallel(model)

    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           patience=3,
                                                           verbose=True)

    best_loss = 1e5
    best_epoch = 0
    model.train()

    num_iter_per_epoch = len(training_generator)
    for epoch in range(opt.num_epochs):
        model.train()
        # if torch.cuda.is_available():
        #     model.module.freeze_bn()
        # else:
        #     model.freeze_bn()
        epoch_loss = []
        progress_bar = tqdm(training_generator)
        for iter, data in enumerate(progress_bar):
            try:
                optimizer.zero_grad()
                if torch.cuda.is_available():
                    cls_loss, reg_loss = model(
                        [data['img'].cuda().float(), data['annot'].cuda()])
                else:
                    cls_loss, reg_loss = model(
                        [data['img'].float(), data['annot']])

                cls_loss = cls_loss.mean()
                reg_loss = reg_loss.mean()
                loss = cls_loss + reg_loss
                if loss == 0:
                    continue
                loss.backward()
                torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
                optimizer.step()
                epoch_loss.append(float(loss))
                total_loss = np.mean(epoch_loss)

                progress_bar.set_description(
                    'Epoch: {}/{}. Iteration: {}/{}. Cls loss: {:.5f}. Reg loss: {:.5f}. Batch loss: {:.5f} Total loss: {:.5f}'
                    .format(epoch + 1, opt.num_epochs, iter + 1,
                            num_iter_per_epoch, cls_loss, reg_loss, loss,
                            total_loss))
                writer.add_scalar('Train/Total_loss', total_loss,
                                  epoch * num_iter_per_epoch + iter)
                writer.add_scalar('Train/Regression_loss', reg_loss,
                                  epoch * num_iter_per_epoch + iter)
                writer.add_scalar('Train/Classfication_loss (focal loss)',
                                  cls_loss, epoch * num_iter_per_epoch + iter)

            except Exception as e:
                print(e)
                continue
        scheduler.step(np.mean(epoch_loss))

        if epoch % opt.test_interval == 0:
            model.eval()
            loss_regression_ls = []
            loss_classification_ls = []
            for iter, data in enumerate(test_generator):
                with torch.no_grad():
                    if torch.cuda.is_available():
                        cls_loss, reg_loss = model(
                            [data['img'].cuda().float(), data['annot'].cuda()])
                    else:
                        cls_loss, reg_loss = model(
                            [data['img'].float(), data['annot']])

                    cls_loss = cls_loss.mean()
                    reg_loss = reg_loss.mean()

                    loss_classification_ls.append(float(cls_loss))
                    loss_regression_ls.append(float(reg_loss))

            cls_loss = np.mean(loss_classification_ls)
            reg_loss = np.mean(loss_regression_ls)
            loss = cls_loss + reg_loss

            print(
                'Epoch: {}/{}. Classification loss: {:1.5f}. Regression loss: {:1.5f}. Total loss: {:1.5f}'
                .format(epoch + 1, opt.num_epochs, cls_loss, reg_loss,
                        np.mean(loss)))
            writer.add_scalar('Test/Total_loss', loss, epoch)
            writer.add_scalar('Test/Regression_loss', reg_loss, epoch)
            writer.add_scalar('Test/Classfication_loss (focal loss)', cls_loss,
                              epoch)

            if loss + opt.es_min_delta < best_loss:
                best_loss = loss
                best_epoch = epoch
                torch.save(
                    model,
                    os.path.join(opt.saved_path,
                                 "signatrix_efficientdet_coco.pth"))

                dummy_input = torch.rand(opt.batch_size, 3, 512, 512)
                if torch.cuda.is_available():
                    dummy_input = dummy_input.cuda()
                if isinstance(model, nn.DataParallel):
                    model.module.backbone_net.model.set_swish(
                        memory_efficient=False)

                    torch.onnx.export(model.module,
                                      dummy_input,
                                      os.path.join(
                                          opt.saved_path,
                                          "signatrix_efficientdet_coco.onnx"),
                                      verbose=False,
                                      opset_version=11)
                    model.module.backbone_net.model.set_swish(
                        memory_efficient=True)
                else:
                    model.backbone_net.model.set_swish(memory_efficient=False)

                    torch.onnx.export(model,
                                      dummy_input,
                                      os.path.join(
                                          opt.saved_path,
                                          "signatrix_efficientdet_coco.onnx"),
                                      verbose=False,
                                      opset_version=11)
                    model.backbone_net.model.set_swish(memory_efficient=True)

            # Early stopping
            if epoch - best_epoch > opt.es_patience > 0:
                print(
                    "Stop training at epoch {}. The lowest loss achieved is {}"
                    .format(epoch, loss))
                break
    writer.close()
def test(opt):
    test_set = CocoDataset(opt.data_path,
                           set='val2017',
                           transform=transforms.Compose(
                               [Normalizer(), Resizer()]))
    opt.num_classes = test_set.num_classes()
    opt.batch_size = opt.batch_size * 4
    test_params = {
        "batch_size": opt.batch_size,
        "shuffle": False,
        "drop_last": False,
        "collate_fn": collater,
        "num_workers": 12
    }
    test_generator = DataLoader(test_set, **test_params)

    model = EfficientDet(opt)
    model.load_state_dict(
        torch.load(os.path.join(opt.pretrained_model, opt.network + '.pth')))
    model.cuda()
    model.set_is_training(False)
    model.eval()

    if os.path.isdir(opt.prediction_dir):
        shutil.rmtree(opt.prediction_dir)
    os.makedirs(opt.prediction_dir)

    progress_bar = tqdm(test_generator)
    progress_bar.set_description_str(' Evaluating')
    IoU_scores = []
    for i, data in enumerate(progress_bar):
        scale = data['scale']
        with torch.no_grad():
            output_list = model(data['img'].cuda().float())

        for j, output in enumerate(output_list):
            scores, labels, boxes = output
            annot = data['annot'][j]
            annot = annot[annot[:, 4] != -1]
            # print(scores.size(), labels.size(), boxes.size(), annot.size())
            if boxes.shape[0] == 0:
                if annot.size(0) == 0:
                    IoU_scores.append(1.0)
                else:
                    IoU_scores.append(0.0)
                continue
            if annot.size(0) == 0:
                IoU_scores.append(0.0)
            else:
                classes = set(annot[:, 4].tolist())
                cat = torch.cat(
                    [scores.view(-1, 1),
                     labels.view(-1, 1).float(), boxes],
                    dim=1)
                cat = cat[cat[:, 0] >= opt.cls_threshold]
                iou_score = []
                for c in classes:
                    box = cat[cat[:, 1] == c][:, 2:]
                    if box.size(0) == 0:
                        iou_score.append(0.0)
                        continue
                    tgt = annot[annot[:, 4] == c][:, :4]
                    iou_s = iou(box, tgt.cuda())
                    iou_score.append(iou_s.cpu().numpy())
                classes_pre = set(cat[:, 1].tolist())
                for c in classes_pre:
                    if c not in classes:
                        iou_score.append(0)
                # print(classes_pre, classes ,iou_score)
                IoU_scores.append(sum(iou_score) / len(iou_score))

            if writePIC:
                annot /= scale[j]
                boxes /= scale[j]
                image_info = test_set.coco.loadImgs(
                    test_set.image_ids[i * opt.batch_size + j])[0]
                # print(image_info['file_name'])
                path = os.path.join(test_set.root_dir, 'images',
                                    test_set.set_name, image_info['file_name'])
                output_image = cv2.imread(path)
                # print(output_image.shape)
                for box_id in range(boxes.shape[0]):
                    pred_prob = float(scores[box_id])
                    if pred_prob < opt.cls_threshold:
                        break
                    pred_label = int(labels[box_id])
                    xmin, ymin, xmax, ymax = boxes[box_id, :]
                    color = colors[pred_label]
                    cv2.rectangle(output_image, (xmin, ymin), (xmax, ymax),
                                  color, 1)
                    text_size = cv2.getTextSize(
                        COCO_CLASSES[pred_label] + ' : %.2f' % pred_prob,
                        cv2.FONT_HERSHEY_PLAIN, 1, 1)[0]

                    cv2.rectangle(
                        output_image, (xmin, ymin),
                        (xmin + text_size[0] + 3, ymin + text_size[1] + 4),
                        color, -1)
                    cv2.putText(
                        output_image,
                        COCO_CLASSES[pred_label] + ' : %.2f' % pred_prob,
                        (xmin, ymin + text_size[1] + 4),
                        cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
                for box_id in range(annot.size(0)):
                    xmin, ymin, xmax, ymax = annot[box_id, :4]
                    cv2.rectangle(output_image, (xmin, ymin), (xmax, ymax),
                                  (255, 0, 0), 1)

                cv2.imwrite(
                    "{}/{}_prediction.jpg".format(
                        opt.prediction_dir, image_info["file_name"][:-4]),
                    output_image)
    print(sum(IoU_scores) / len(IoU_scores))
def train(opt):
    num_gpus = 1
    if torch.cuda.is_available():
        num_gpus = torch.cuda.device_count()
    else:
        raise Exception('no GPU')

    cudnn.benchmark = True

    training_params = {
        "batch_size": opt.batch_size * num_gpus,
        "shuffle": True,
        "drop_last": True,
        "collate_fn": collater,
        "num_workers": 12
    }

    test_params = {
        "batch_size": opt.batch_size,
        "shuffle": False,
        "drop_last": False,
        "collate_fn": collater,
        "num_workers": 12
    }

    training_set = CocoDataset(root_dir=opt.data_path,
                               set="train2017",
                               transform=transforms.Compose(
                                   [Normalizer(),
                                    Augmenter(),
                                    Resizer()]))
    training_generator = DataLoader(training_set, **training_params)

    test_set = CocoDataset(root_dir=opt.data_path,
                           set="val2017",
                           transform=transforms.Compose(
                               [Normalizer(), Resizer()]))
    test_generator = DataLoader(test_set, **test_params)

    opt.num_classes = training_set.num_classes()

    model = EfficientDet(opt)
    if opt.resume:
        print('Loading model...')
        model.load_state_dict(
            torch.load(os.path.join(opt.saved_path, opt.network + '.pth')))

    if not os.path.isdir(opt.saved_path):
        os.makedirs(opt.saved_path)

    model = model.cuda()
    model = nn.DataParallel(model)

    optimizer = torch.optim.AdamW(model.parameters(), opt.lr)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           patience=3,
                                                           verbose=True)

    best_loss = 1e5
    best_epoch = 0
    model.train()

    num_iter_per_epoch = len(training_generator)
    for epoch in range(opt.num_epochs):
        print('Epoch: {}/{}:'.format(epoch + 1, opt.num_epochs))
        model.train()
        epoch_loss = []
        progress_bar = tqdm(training_generator)
        for iter, data in enumerate(progress_bar):
            try:
                optimizer.zero_grad()
                if torch.cuda.is_available():
                    cls_loss, cls_2_loss, reg_loss = model(
                        [data['img'].cuda().float(), data['annot'].cuda()])
                else:
                    cls_loss, cls_2_loss, reg_loss = model(
                        [data['img'].float(), data['annot']])

                cls_loss = cls_loss.mean()
                reg_loss = reg_loss.mean()
                cls_2_loss = cls_2_loss.mean()
                loss = cls_loss + cls_2_loss + reg_loss
                if loss == 0:
                    continue
                loss.backward()
                torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
                optimizer.step()
                epoch_loss.append(float(loss))
                total_loss = np.mean(epoch_loss)

                progress_bar.set_description(
                    'Epoch: {}/{}. Iteration: {}/{}'.format(
                        epoch + 1, opt.num_epochs, iter + 1,
                        num_iter_per_epoch))

                progress_bar.write(
                    'Cls loss: {:.5f}\tReg loss: {:.5f}\tCls+Reg loss: {:.5f}\tBatch loss: {:.5f}\tTotal loss: {:.5f}'
                    .format(cls_loss, reg_loss, cls_loss + reg_loss, loss,
                            total_loss))

            except Exception as e:
                print(e)
                continue
        scheduler.step(np.mean(epoch_loss))

        if epoch % opt.test_interval == 0:
            model.eval()
            loss_regression_ls = []
            loss_classification_ls = []
            loss_classification_2_ls = []
            progress_bar = tqdm(test_generator)
            progress_bar.set_description_str(' Evaluating')
            for iter, data in enumerate(progress_bar):
                with torch.no_grad():
                    if torch.cuda.is_available():
                        cls_loss, cls_2_loss, reg_loss = model(
                            [data['img'].cuda().float(), data['annot'].cuda()])
                    else:
                        cls_loss, cls_2_loss, reg_loss = model(
                            [data['img'].float(), data['annot']])

                    cls_loss = cls_loss.mean()
                    cls_2_loss = cls_2_loss.mean()
                    reg_loss = reg_loss.mean()

                    loss_classification_ls.append(float(cls_loss))
                    loss_classification_2_ls.append(float(cls_2_loss))
                    loss_regression_ls.append(float(reg_loss))

            cls_loss = np.mean(loss_classification_ls)
            cls_2_loss = np.mean(loss_classification_2_ls)
            reg_loss = np.mean(loss_regression_ls)
            loss = cls_loss + cls_2_loss + reg_loss

            print(
                'Epoch: {}/{}. \nClassification loss: {:1.5f}. \tClassification_2 loss: {:1.5f}. \tRegression loss: {:1.5f}. \tTotal loss: {:1.5f}'
                .format(epoch + 1, opt.num_epochs, cls_loss, cls_2_loss,
                        reg_loss, np.mean(loss)))

            if loss + opt.es_min_delta < best_loss:
                print('Saving model...')
                best_loss = loss
                best_epoch = epoch
                torch.save(model.module.state_dict(),
                           os.path.join(opt.saved_path, opt.network + '.pth'))
                # torch.save(model, os.path.join(opt.saved_path, opt.network+'.pth'))

            # Early stopping
            if epoch - best_epoch > opt.es_patience > 0:
                print(
                    "Stop training at epoch {}. The lowest loss achieved is {}"
                    .format(epoch, loss))
                break
        if not len(results):
            return

        # write output
        json.dump(results,
                  open('{}_bbox_results.json'.format(dataset.set_name), 'w'),
                  indent=4)

        # load results in COCO evaluation tool
        coco_true = dataset.coco
        coco_pred = coco_true.loadRes('{}_bbox_results.json'.format(
            dataset.set_name))

        # run COCO evaluation
        coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
        coco_eval.params.imgIds = image_ids
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()


if __name__ == '__main__':
    efficientdet = torch.load(
        "trained_models/signatrix_efficientdet_coco.pth").module
    efficientdet.cuda()
    dataset_val = CocoDataset("data/COCO",
                              set='val2017',
                              transform=transforms.Compose(
                                  [Normalizer(), Resizer()]))
    evaluate_coco(dataset_val, efficientdet)
Пример #9
0
		def Train_Dataset(self, root_dir, coco_dir, img_dir, set_dir, batch_size=8, image_size=512, use_gpu=True, num_workers=3):
				'''
				User function: Set training dataset parameters

				Dataset Directory Structure

									 root_dir
											|
											|------coco_dir 
											|         |
											|         |----img_dir
											|                |
											|                |------<set_dir_train> (set_dir) (Train)
											|                         |
											|                         |---------img1.jpg
											|                         |---------img2.jpg
											|                         |---------..........(and so on)  
											|
											|
											|         |---annotations 
											|         |----|
											|              |--------------------instances_Train.json  (instances_<set_dir_train>.json)
											|              |--------------------classes.txt
											
											
						 - instances_Train.json -> In proper COCO format
						 - classes.txt          -> A list of classes in alphabetical order
						 

						For TrainSet
						 - root_dir = "../sample_dataset";
						 - coco_dir = "kangaroo";
						 - img_dir = "images";
						 - set_dir = "Train";
						
						 
						Note: Annotation file name too coincides against the set_dir

				Args:
						root_dir (str): Path to root directory containing coco_dir
						coco_dir (str): Name of coco_dir containing image folder and annotation folder
						img_dir (str): Name of folder containing all training and validation folders
						set_dir (str): Name of folder containing all training images
						batch_size (int): Mini batch sampling size for training epochs
						image_size (int): Either of [512, 300]
						use_gpu (bool): If True use GPU else run on CPU
						num_workers (int): Number of parallel processors for data loader 

				Returns:
						None
				'''
				self.system_dict["dataset"]["train"]["root_dir"] = root_dir;
				self.system_dict["dataset"]["train"]["coco_dir"] = coco_dir;
				self.system_dict["dataset"]["train"]["img_dir"] = img_dir;
				self.system_dict["dataset"]["train"]["set_dir"] = set_dir;


				self.system_dict["params"]["batch_size"] = batch_size;
				self.system_dict["params"]["image_size"] = image_size;
				self.system_dict["params"]["use_gpu"] = use_gpu;
				self.system_dict["params"]["num_workers"] = num_workers;

				if(self.system_dict["params"]["use_gpu"]):
						if torch.cuda.is_available():
								self.system_dict["local"]["num_gpus"] = torch.cuda.device_count()
								torch.cuda.manual_seed(123)
						else:
								torch.manual_seed(123)

				self.system_dict["local"]["training_params"] = {"batch_size": self.system_dict["params"]["batch_size"] * self.system_dict["local"]["num_gpus"],
																													 "shuffle": True,
																													 "drop_last": True,
																													 "collate_fn": collater,
																													 "num_workers": self.system_dict["params"]["num_workers"]}

				self.system_dict["local"]["training_set"] = CocoDataset(root_dir=self.system_dict["dataset"]["train"]["root_dir"] + "/" + self.system_dict["dataset"]["train"]["coco_dir"],
																														img_dir = self.system_dict["dataset"]["train"]["img_dir"],
																														set_dir = self.system_dict["dataset"]["train"]["set_dir"],
																														transform = transforms.Compose([Normalizer(), Resizer()])) #Augmenter(),
				
				self.system_dict["local"]["training_generator"] = DataLoader(self.system_dict["local"]["training_set"], 
																																		**self.system_dict["local"]["training_params"]);
Пример #10
0
		def Val_Dataset(self, root_dir, coco_dir, img_dir, set_dir):
				'''
				User function: Set training dataset parameters

				Dataset Directory Structure

									 root_dir
											|
											|------coco_dir 
											|         |
											|         |----img_dir
											|                |
											|                |------<set_dir_val> (set_dir) (Validation)
											|                         |
											|                         |---------img1.jpg
											|                         |---------img2.jpg
											|                         |---------..........(and so on)  
											|
											|
											|         |---annotations 
											|         |----|
											|              |--------------------instances_Val.json  (instances_<set_dir_val>.json)
											|              |--------------------classes.txt
											
											
						 - instances_Train.json -> In proper COCO format
						 - classes.txt          -> A list of classes in alphabetical order

						 
						For ValSet
						 - root_dir = "..sample_dataset";
						 - coco_dir = "kangaroo";
						 - img_dir = "images";
						 - set_dir = "Val";
						 
						 Note: Annotation file name too coincides against the set_dir

				Args:
						root_dir (str): Path to root directory containing coco_dir
						coco_dir (str): Name of coco_dir containing image folder and annotation folder
						img_dir (str): Name of folder containing all training and validation folders
						set_dir (str): Name of folder containing all validation images

				Returns:
						None
				'''
				self.system_dict["dataset"]["val"]["status"] = True;
				self.system_dict["dataset"]["val"]["root_dir"] = root_dir;
				self.system_dict["dataset"]["val"]["coco_dir"] = coco_dir;
				self.system_dict["dataset"]["val"]["img_dir"] = img_dir;
				self.system_dict["dataset"]["val"]["set_dir"] = set_dir;     

				self.system_dict["local"]["val_params"] = {"batch_size": self.system_dict["params"]["batch_size"],
																									 "shuffle": False,
																									 "drop_last": False,
																									 "collate_fn": collater,
																									 "num_workers": self.system_dict["params"]["num_workers"]}

				self.system_dict["local"]["val_set"] = CocoDataset(root_dir=self.system_dict["dataset"]["val"]["root_dir"] + "/" + self.system_dict["dataset"]["val"]["coco_dir"], 
																										img_dir = self.system_dict["dataset"]["val"]["img_dir"],
																										set_dir = self.system_dict["dataset"]["val"]["set_dir"],
																										transform=transforms.Compose([Normalizer(), Resizer()]))
				
				self.system_dict["local"]["test_generator"] = DataLoader(self.system_dict["local"]["val_set"], 
																																**self.system_dict["local"]["val_params"])
Пример #11
0
def test(opt):
    if torch.cuda.is_available():
        model = torch.load(opt.model).module.cuda()
    else:
        model = torch.load(opt.model, map_location=torch.device('cpu')).module

    dataset = OpenImagesDataset(root_dir=opt.data_path,
                                set_name='val',
                                transform=transforms.Compose(
                                    [Normalizer(), Resizer()]))

    if os.path.isdir(opt.output):
        shutil.rmtree(opt.output)
    os.makedirs(opt.output)
    for c in dataset.class_names:
        os.makedirs(os.path.join(opt.output, c.lower()))

    for idx in tqdm(range(len(dataset))):
        data = dataset[idx]
        scale = data['scale']
        with torch.no_grad():
            if torch.cuda.is_available():
                scores, labels, boxes = model(data['img'].cuda().permute(
                    2, 0, 1).float().unsqueeze(dim=0))
            else:
                scores, labels, boxes = model(data['img'].permute(
                    2, 0, 1).float().unsqueeze(dim=0))
            boxes /= scale

        if boxes.shape[0] > 0:
            class_name = dataset.image_to_category_name[dataset.images[idx]]
            path = f'{opt.data_path}/val/{class_name}/images/{dataset.images[idx]}.jpg'
            output_image = cv2.imread(path)

            for box_id in range(boxes.shape[0]):
                pred_prob = float(scores[box_id])
                if pred_prob < opt.cls_threshold:
                    break
                pred_label = int(labels[box_id])
                x1, y1, x2, y2 = boxes[box_id, :]
                color = OPEN_IMAGES_COLORS[pred_label]
                start_point = (int(x1), int(y1))
                end_point = (int(x2), int(y2))

                cv2.rectangle(output_image,
                              start_point,
                              end_point,
                              color,
                              thickness=2)
                text_size = cv2.getTextSize(
                    f'{OPEN_IMAGES_CLASSES[pred_label]}: {pred_prob:.2f}',
                    cv2.FONT_HERSHEY_PLAIN, 1, 1)[0]

                cv2.rectangle(
                    output_image,
                    start_point,
                    (int(x1 + text_size[0] + 3), int(y1 + text_size[1] + 4)),
                    color,
                    thickness=-1)
                cv2.putText(
                    output_image,
                    f'{OPEN_IMAGES_CLASSES[pred_label]}: {pred_prob:.2f}',
                    (int(x1), int(y1 + text_size[1] + 4)),
                    cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)

            cv2.imwrite(
                f"{opt.output}/{class_name}/{dataset.images[idx]}_prediction.jpg",
                output_image)
Пример #12
0
            # append image to list of processed images
            image_ids.append(dataset.image_ids[index])

            # print progress
            print('{}/{}'.format(index, len(dataset)), end='\r')

        if not len(results):
            return

        # write output
        json.dump(results, open('{}_bbox_results.json'.format(dataset.set_name), 'w'), indent=4)

        # load results in COCO evaluation tool
        coco_true = dataset.coco
        coco_pred = coco_true.loadRes('{}_bbox_results.json'.format(dataset.set_name))

        # run COCO evaluation
        coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
        coco_eval.params.imgIds = image_ids
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()


if __name__ == '__main__':
    efficientdet = torch.load("trained_models/signatrix_efficientdet_coco_latest.pth").module
    efficientdet.cuda()
    dataset_val = CocoDataset("data/coco", set='val2017',
                              transform=transforms.Compose([Normalizer(), Resizer()]))
    evaluate_coco(dataset_val, efficientdet)
Пример #13
0
def train(opt):
    num_gpus = 1
    if torch.cuda.is_available():
        num_gpus = torch.cuda.device_count()
        torch.cuda.manual_seed(123)
    else:
        torch.manual_seed(123)

    training_params = {
        "batch_size": opt.batch_size * num_gpus,
        "shuffle": True,
        "drop_last": True,
        "collate_fn": collater,
        "num_workers": 12
    }

    test_params = {
        "batch_size": opt.batch_size,
        "shuffle": False,
        "drop_last": False,
        "collate_fn": collater,
        "num_workers": 12
    }

    training_set = CocoDataset(root_dir=opt.data_path,
                               set="train2017",
                               transform=transforms.Compose(
                                   [Normalizer(),
                                    Augmenter(),
                                    Resizer()]))
    training_generator = DataLoader(training_set, **training_params)

    test_set = CocoDataset(root_dir=opt.data_path,
                           set="val2017",
                           transform=transforms.Compose(
                               [Normalizer(), Resizer()]))
    test_generator = DataLoader(test_set, **test_params)

    channels_map = {
        'efficientnet-b0': [40, 80, 192],
        'efficientnet-b1': [40, 80, 192],
        'efficientnet-b2': [48, 88, 208],
        'efficientnet-b3': [48, 96, 232],
        'efficientnet-b4': [56, 112, 272],
        'efficientnet-b5': [64, 128, 304],
        'efficientnet-b6': [72, 144, 344],
        'efficientnet-b7': [80, 160, 384],
        'efficientnet-b8': [80, 160, 384]
    }

    if os.path.isdir(opt.log_path):
        shutil.rmtree(opt.log_path)
    os.makedirs(opt.log_path)

    if not os.path.isdir(opt.saved_path):
        os.makedirs(opt.saved_path)

    writer = SummaryWriter(opt.log_path)

    if opt.resume:
        resume_path = os.path.join(opt.saved_path,
                                   'signatrix_efficientdet_coco_latest.pth')
        model = torch.load(resume_path).module
        print("model loaded from {}".format(resume_path))
    else:
        model = EfficientDet(
            num_classes=training_set.num_classes(),
            network=opt.backbone_network,
            remote_loading=opt.remote_loading,
            advprop=opt.advprop,
            conv_in_channels=channels_map[opt.backbone_network])
        print("model created with backbone {}, advprop {}".format(
            opt.backbone_network, opt.advprop))

    if torch.cuda.is_available():
        model = model.cuda()
        model = nn.DataParallel(model)

    if opt.resume:
        m = round(opt.start_epoch / 100)
        opt.lr = opt.lr * (0.1**m)
    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           patience=3,
                                                           verbose=True)

    best_loss = 1e5
    best_epoch = 0
    model.train()

    num_iter_per_epoch = len(training_generator)

    start_epoch = 0
    if opt.resume:
        start_epoch = opt.start_epoch
    for epoch in range(start_epoch, opt.num_epochs):
        model.train()
        # if torch.cuda.is_available():
        #     model.module.freeze_bn()
        # else:
        #     model.freeze_bn()
        epoch_loss = []
        progress_bar = tqdm(training_generator)
        for iter, data in enumerate(progress_bar):
            try:
                optimizer.zero_grad()
                if torch.cuda.is_available():
                    cls_loss, reg_loss = model(
                        [data['img'].cuda().float(), data['annot'].cuda()])
                else:
                    cls_loss, reg_loss = model(
                        [data['img'].float(), data['annot']])

                cls_loss = cls_loss.mean()
                reg_loss = reg_loss.mean()
                loss = cls_loss + reg_loss
                if loss == 0:
                    continue
                loss.backward()
                torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
                optimizer.step()
                epoch_loss.append(float(loss))
                total_loss = np.mean(epoch_loss)

                progress_bar.set_description(
                    '{} Epoch: {}/{}. Iteration: {}/{}. Cls loss: {:.5f}. Reg loss: {:.5f}. Batch loss: {:.5f} Total loss: {:.5f}'
                    .format(datetime.now(), epoch + 1, opt.num_epochs,
                            iter + 1, num_iter_per_epoch, cls_loss, reg_loss,
                            loss, total_loss))
                writer.add_scalar('Train/Total_loss', total_loss,
                                  epoch * num_iter_per_epoch + iter)
                writer.add_scalar('Train/Regression_loss', reg_loss,
                                  epoch * num_iter_per_epoch + iter)
                writer.add_scalar('Train/Classfication_loss (focal loss)',
                                  cls_loss, epoch * num_iter_per_epoch + iter)

            except Exception as e:
                print(e)
                continue
        scheduler.step(np.mean(epoch_loss))

        if epoch % opt.test_interval == 0:
            model.eval()
            loss_regression_ls = []
            loss_classification_ls = []
            for iter, data in enumerate(test_generator):
                with torch.no_grad():
                    if torch.cuda.is_available():
                        cls_loss, reg_loss = model(
                            [data['img'].cuda().float(), data['annot'].cuda()])
                    else:
                        cls_loss, reg_loss = model(
                            [data['img'].float(), data['annot']])

                    cls_loss = cls_loss.mean()
                    reg_loss = reg_loss.mean()

                    loss_classification_ls.append(float(cls_loss))
                    loss_regression_ls.append(float(reg_loss))

            cls_loss = np.mean(loss_classification_ls)
            reg_loss = np.mean(loss_regression_ls)
            loss = cls_loss + reg_loss

            print(
                '{} Epoch: {}/{}. Classification loss: {:1.5f}. Regression loss: {:1.5f}. Total loss: {:1.5f}'
                .format(datetime.now(), epoch + 1, opt.num_epochs, cls_loss,
                        reg_loss, np.mean(loss)))
            writer.add_scalar('Test/Total_loss', loss, epoch)
            writer.add_scalar('Test/Regression_loss', reg_loss, epoch)
            writer.add_scalar('Test/Classfication_loss (focal loss)', cls_loss,
                              epoch)

            if loss + opt.es_min_delta < best_loss:
                best_loss = loss
                best_epoch = epoch
                torch.save(
                    model,
                    os.path.join(
                        opt.saved_path,
                        "signatrix_efficientdet_coco_best_epoch{}.pth".format(
                            epoch)))
                ''' 
                dummy_input = torch.rand(opt.batch_size, 3, 512, 512)
                if torch.cuda.is_available():
                    dummy_input = dummy_input.cuda()
                if isinstance(model, nn.DataParallel):
                    model.module.backbone_net.model.set_swish(memory_efficient=False)
                    
                    torch.onnx.export(model.module, dummy_input,
                                      os.path.join(opt.saved_path, "signatrix_efficientdet_coco.onnx"),
                                      verbose=False)
                    
                    model.module.backbone_net.model.set_swish(memory_efficient=True)
                else:
                    model.backbone_net.model.set_swish(memory_efficient=False)
                    
                    torch.onnx.export(model, dummy_input,
                                      os.path.join(opt.saved_path, "signatrix_efficientdet_coco.onnx"),
                                      verbose=False)
                    
                    model.backbone_net.model.set_swish(memory_efficient=True)
                '''
            print("epoch:", epoch, "best_epoch:", best_epoch,
                  "epoch - best_epoch=", epoch - best_epoch)
            # Early stopping
            if epoch - best_epoch > opt.es_patience > 0:
                print(
                    "Stop training at epoch {}. The lowest loss achieved is {}"
                    .format(epoch, loss))
                break
        if epoch % opt.save_interval == 0:
            torch.save(
                model,
                os.path.join(opt.saved_path,
                             "signatrix_efficientdet_coco_latest.pth"))
    writer.close()
Пример #14
0
def train(opt):
    if not os.path.isdir(opt.data_path):
        print(f"Data for dataset not found at {opt.data_path}")
        return

    num_gpus = 1
    if torch.cuda.is_available():
        num_gpus = torch.cuda.device_count()
        torch.cuda.manual_seed(123)
    else:
        torch.manual_seed(123)

    training_params = {
        "batch_size": opt.batch_size * num_gpus,
        "shuffle": True,
        "drop_last": True,
        "collate_fn": collater,
        "num_workers": 12
    }

    test_params = {
        "batch_size": opt.batch_size,
        "shuffle": False,
        "drop_last": False,
        "collate_fn": collater,
        "num_workers": 12
    }

    training_set = OpenImagesDataset(
        root_dir=opt.data_path,
        set_name="train",
        transform=transforms.Compose([Normalizer(),
                                      Augmenter(),
                                      Resizer()]))
    training_loader = DataLoader(training_set, **training_params)

    test_set = OpenImagesDataset(root_dir=opt.data_path,
                                 set_name="val",
                                 transform=transforms.Compose(
                                     [Normalizer(), Resizer()]))
    test_loader = DataLoader(test_set, **test_params)

    model = EfficientDet(num_classes=training_set.num_classes())

    if os.path.isdir(opt.log_path):
        shutil.rmtree(opt.log_path)
    os.makedirs(opt.log_path)

    if not os.path.isdir(opt.saved_path):
        os.makedirs(opt.saved_path)

    writer = SummaryWriter(opt.log_path)
    if torch.cuda.is_available():
        model = model.cuda()
        model = nn.DataParallel(model)

    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           patience=3,
                                                           verbose=True)

    best_loss = 1e5
    best_epoch = 0
    model.train()

    num_iter_per_epoch = len(training_loader)
    for epoch in range(opt.num_epochs):
        model.train()
        epoch_loss = []
        progress_bar = tqdm(training_loader)

        for iter, data in enumerate(progress_bar):
            try:
                optimizer.zero_grad()
                if torch.cuda.is_available():
                    cls_loss, reg_loss = model(
                        [data['img'].cuda().float(), data['annot'].cuda()])
                else:
                    cls_loss, reg_loss = model(
                        [data['img'].float(), data['annot']])

                cls_loss = cls_loss.mean()
                reg_loss = reg_loss.mean()
                loss = cls_loss + reg_loss
                if loss == 0:
                    continue
                loss.backward()
                torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
                optimizer.step()
                epoch_loss.append(float(loss))
                total_loss = np.mean(epoch_loss)

                progress_bar.set_description(
                    f'Epoch: {epoch + 1}/{opt.num_epochs} | '
                    f'Iteration: {iter + 1}/{num_iter_per_epoch} | '
                    f'Cls loss: {cls_loss:.5f} | Reg loss: {reg_loss:.5f} | '
                    f'Batch loss: {loss:.5f} | Total loss: {total_loss:.5f}')

                writer.add_scalar('Train/Total_loss', total_loss,
                                  epoch * num_iter_per_epoch + iter)
                writer.add_scalar('Train/Regression_loss', reg_loss,
                                  epoch * num_iter_per_epoch + iter)
                writer.add_scalar('Train/Classification_loss (focal loss)',
                                  cls_loss, epoch * num_iter_per_epoch + iter)

            except Exception as e:
                print(e)
                continue
        scheduler.step(np.mean(epoch_loss))

        if epoch % opt.test_interval == 0:
            model.eval()
            loss_regression_ls = []
            loss_classification_ls = []
            for iter, data in enumerate(test_loader):
                with torch.no_grad():
                    if torch.cuda.is_available():
                        cls_loss, reg_loss = model(
                            [data['img'].cuda().float(), data['annot'].cuda()])
                    else:
                        cls_loss, reg_loss = model(
                            [data['img'].float(), data['annot']])

                    cls_loss = cls_loss.mean()
                    reg_loss = reg_loss.mean()

                    loss_classification_ls.append(float(cls_loss))
                    loss_regression_ls.append(float(reg_loss))

            cls_loss = np.mean(loss_classification_ls)
            reg_loss = np.mean(loss_regression_ls)
            loss = cls_loss + reg_loss

            print(
                f'Epoch: {epoch + 1}/{opt.num_epochs} | '
                f'Classification loss: {cls_loss:1.5f} | '
                f'Regression loss: {reg_loss:1.5f} | Total loss: {np.mean(loss):1.5f}'
            )

            writer.add_scalar('Test/Total_loss', loss, epoch)
            writer.add_scalar('Test/Regression_loss', reg_loss, epoch)
            writer.add_scalar('Test/Classification_loss (focal loss)',
                              cls_loss, epoch)

            if loss + opt.es_min_delta < best_loss:
                best_loss = loss
                best_epoch = epoch
                torch.save(
                    model, os.path.join(opt.saved_path,
                                        f'{opt.model_name}.pth'))

            # Early stopping
            if epoch - best_epoch > opt.es_patience > 0:
                print(
                    f"Stop training at epoch {epoch}. The lowest loss achieved is {loss}"
                )
                break

    torch.save(model,
               os.path.join(opt.saved_path, f'{opt.model_name}-final.pth'))
    writer.flush()
    writer.close()
def test(opt):
    model = torch.load(opt.pretrained_model).module
    model.cuda()
    # dataset = CocoDataset(opt.data_path, set='val2017', transform=transforms.Compose([Normalizer(), Resizer()]))
    dataset = MaJiaDataset(root_dir="/home/pc/work/data/majia",
                           label_txt="/home/pc/work/data/majia/data_02.txt",
                           transform=transforms.Compose(
                               [Normalizer(), Resizer()]))
    if os.path.isdir(opt.output):
        shutil.rmtree(opt.output)
    os.makedirs(opt.output)

    # fw = open('majia_pred.txt', 'w')
    box_count = 0

    for index in range(len(dataset)):
        data = dataset[index]
        scale = data['scale']
        with torch.no_grad():
            scores, labels, boxes, landmarks = model(
                data['img'].cuda().permute(2, 0, 1).float().unsqueeze(dim=0))
            boxes /= scale
            landmarks /= scale

        image_id = dataset.ids[index]
        # fw.write('# {}\n'.format(str(image_id)))
        # fw1 = open('detection-results/{}.txt'.format(os.path.basename(str(image_id))[:-4]), 'w')

        if boxes.shape[0] > 0:
            # image_info = dataset.coco.loadImgs(dataset.image_ids[index])[0]
            # path = os.path.join(dataset.root_dir, 'images', dataset.set_name, image_info['file_name'])
            # output_image = cv2.imread(path)
            image_id = dataset.ids[index]
            output_image = cv2.imread(str(image_id))

            for box_id in range(boxes.shape[0]):
                pred_prob = float(scores[box_id])
                if pred_prob < opt.cls_threshold:
                    continue
                pred_label = int(labels[box_id])
                xmin, ymin, xmax, ymax = np.int0(
                    boxes[box_id, :].cpu().numpy())
                color = colors[pred_label]
                cv2.rectangle(output_image, (xmin, ymin), (xmax, ymax), color,
                              2)
                text_size = cv2.getTextSize(
                    COCO_CLASSES[pred_label] + ' : %.2f' % pred_prob,
                    cv2.FONT_HERSHEY_PLAIN, 1, 1)[0]

                cv2.rectangle(
                    output_image, (xmin, ymin),
                    (xmin + text_size[0] + 3, ymin + text_size[1] + 4), color,
                    -1)
                cv2.putText(output_image,
                            COCO_CLASSES[pred_label] + ' : %.2f' % pred_prob,
                            (xmin, ymin + text_size[1] + 4),
                            cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
                for k in range(4):
                    cv2.circle(output_image, (landmarks[box_id][2 * k],
                                              landmarks[box_id][2 * k + 1]), 2,
                               (0, 255, 0), 2)
                # fw.write('{} {} {} {} {} {} {}\n'.format(xmin, ymin, xmax, ymax, pred_prob, 'majia', 1))
                # fw1.write('{} {} {} {} {} {}\n'.format('majia', pred_prob, xmin, ymin, xmax, ymax))
            # print("{}/{}_prediction.jpg".format(opt.output, image_id[:-4]))
            # cv2.imwrite("{}/{}_prediction.jpg".format(opt.output, image_id[-10:-4]), output_image)
            cv2.imshow('1', output_image)
            key = cv2.waitKey(0)
            if key == ord('q'):
                exit()