def Train_Dataset(self, root_dir, coco_dir, img_dir, set_dir, batch_size=8, image_size=512, use_gpu=True, num_workers=3):
        self.system_dict["dataset"]["train"]["root_dir"] = root_dir;
        self.system_dict["dataset"]["train"]["coco_dir"] = coco_dir;
        self.system_dict["dataset"]["train"]["img_dir"] = img_dir;
        self.system_dict["dataset"]["train"]["set_dir"] = set_dir;


        self.system_dict["params"]["batch_size"] = batch_size;
        self.system_dict["params"]["image_size"] = image_size;
        self.system_dict["params"]["use_gpu"] = use_gpu;
        self.system_dict["params"]["num_workers"] = num_workers;

        if(self.system_dict["params"]["use_gpu"]):
            if torch.cuda.is_available():
                self.system_dict["local"]["num_gpus"] = torch.cuda.device_count()
                torch.cuda.manual_seed(123)
            else:
                torch.manual_seed(123)

        self.system_dict["local"]["training_params"] = {"batch_size": self.system_dict["params"]["batch_size"] * self.system_dict["local"]["num_gpus"],
                                                           "shuffle": True,
                                                           "drop_last": True,
                                                           "collate_fn": collater,
                                                           "num_workers": self.system_dict["params"]["num_workers"]}

        self.system_dict["local"]["training_set"] = CocoDataset(root_dir=self.system_dict["dataset"]["train"]["root_dir"] + "/" + self.system_dict["dataset"]["train"]["coco_dir"],
                                                            img_dir = self.system_dict["dataset"]["train"]["img_dir"],
                                                            set_dir = self.system_dict["dataset"]["train"]["set_dir"],
                                                            transform = transforms.Compose([Normalizer(), Augmenter(), Resizer()]))
        
        self.system_dict["local"]["training_generator"] = DataLoader(self.system_dict["local"]["training_set"], 
                                                                    **self.system_dict["local"]["training_params"]);
    def Val_Dataset(self, root_dir, coco_dir, img_dir, set_dir):
        self.system_dict["dataset"]["val"]["status"] = True
        self.system_dict["dataset"]["val"]["root_dir"] = root_dir
        self.system_dict["dataset"]["val"]["coco_dir"] = coco_dir
        self.system_dict["dataset"]["val"]["img_dir"] = img_dir
        self.system_dict["dataset"]["val"]["set_dir"] = set_dir

        self.system_dict["local"]["val_params"] = {
            "batch_size": self.system_dict["params"]["batch_size"],
            "shuffle": False,
            "drop_last": False,
            "collate_fn": collater,
            "num_workers": self.system_dict["params"]["num_workers"]
        }

        self.system_dict["local"]["val_set"] = CocoDataset(
            root_dir=self.system_dict["dataset"]["val"]["root_dir"] + "/" +
            self.system_dict["dataset"]["val"]["coco_dir"],
            img_dir=self.system_dict["dataset"]["val"]["img_dir"],
            set_dir=self.system_dict["dataset"]["val"]["set_dir"],
            transform=transforms.Compose([Normalizer(),
                                          Resizer()]))

        self.system_dict["local"]["test_generator"] = DataLoader(
            self.system_dict["local"]["val_set"],
            **self.system_dict["local"]["val_params"])
예제 #3
0
def test(opt):
    model = torch.load(opt.pretrained_model).module
    model.cuda()
    dataset = CocoDataset(opt.data_path,
                          set='val2017',
                          transform=transforms.Compose(
                              [Normalizer(), Resizer()]))

    if os.path.isdir(opt.output):
        shutil.rmtree(opt.output)
    os.makedirs(opt.output)

    for index in range(len(dataset)):
        data = dataset[index]
        scale = data['scale']
        image_info = dataset.coco.loadImgs(dataset.image_ids[index])[0]
        with torch.no_grad():
            tb = datetime.now()
            scores, labels, boxes = model(data['img'].cuda().permute(
                2, 0, 1).float().unsqueeze(dim=0))
            boxes /= scale
            te = datetime.now()
            print(te, image_info['file_name'] + " cost " + str(te - tb))

        if boxes.shape[0] > 0:
            #image_info = dataset.coco.loadImgs(dataset.image_ids[index])[0]
            #path = os.path.join(dataset.root_dir, 'images', dataset.set_name, image_info['file_name'])
            path = os.path.join(dataset.root_dir, dataset.set_name,
                                image_info['file_name'])
            print("read from ", path)
            output_image = cv2.imread(path)

            for box_id in range(boxes.shape[0]):
                pred_prob = float(scores[box_id])
                if pred_prob < opt.cls_threshold:
                    break
                pred_label = int(labels[box_id])
                xmin, ymin, xmax, ymax = boxes[box_id, :]
                color = colors[pred_label]
                cv2.rectangle(output_image, (xmin, ymin), (xmax, ymax), color,
                              2)
                text_size = cv2.getTextSize(
                    COCO_CLASSES[pred_label] + ' : %.2f' % pred_prob,
                    cv2.FONT_HERSHEY_PLAIN, 1, 1)[0]

                cv2.rectangle(
                    output_image, (xmin, ymin),
                    (xmin + text_size[0] + 3, ymin + text_size[1] + 4), color,
                    -1)
                cv2.putText(output_image,
                            COCO_CLASSES[pred_label] + ' : %.2f' % pred_prob,
                            (xmin, ymin + text_size[1] + 4),
                            cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)

            cv2.imwrite(
                "{}/{}_prediction.jpg".format(opt.output,
                                              image_info["file_name"][:-4]),
                output_image)
예제 #4
0
def test(opt):
    model = SSD(backbone=ResNet())
    checkpoint = torch.load(opt.pretrained_model)
    model.load_state_dict(checkpoint["model_state_dict"])
    if torch.cuda.is_available():
        model.cuda()
    model.eval()
    dboxes = generate_dboxes()
    test_set = CocoDataset(opt.data_path, 2017, "val",
                           SSDTransformer(dboxes, (300, 300), val=True))
    encoder = Encoder(dboxes)

    if os.path.isdir(opt.output):
        shutil.rmtree(opt.output)
    os.makedirs(opt.output)

    for img, img_id, img_size, _, _ in test_set:
        if img is None:
            continue
        if torch.cuda.is_available():
            img = img.cuda()
        with torch.no_grad():
            ploc, plabel = model(img.unsqueeze(dim=0))
            result = encoder.decode_batch(ploc, plabel, opt.nms_threshold,
                                          20)[0]
            loc, label, prob = [r.cpu().numpy() for r in result]
            best = np.argwhere(prob > opt.cls_threshold).squeeze(axis=1)
            loc = loc[best]
            label = label[best]
            prob = prob[best]
            if len(loc) > 0:
                path = test_set.coco.loadImgs(img_id)[0]["file_name"]
                output_img = cv2.imread(
                    os.path.join(opt.data_path, "val2017", path))
                height, width, _ = output_img.shape
                loc[:, 0::2] *= width
                loc[:, 1::2] *= height
                loc = loc.astype(np.int32)
                for box, lb, pr in zip(loc, label, prob):
                    category = test_set.label_info[lb]
                    color = colors[lb]
                    xmin, ymin, xmax, ymax = box
                    cv2.rectangle(output_img, (xmin, ymin), (xmax, ymax),
                                  color, 2)
                    text_size = cv2.getTextSize(category + " : %.2f" % pr,
                                                cv2.FONT_HERSHEY_PLAIN, 1,
                                                1)[0]
                    cv2.rectangle(
                        output_img, (xmin, ymin),
                        (xmin + text_size[0] + 3, ymin + text_size[1] + 4),
                        color, -1)
                    cv2.putText(output_img, category + " : %.2f" % pr,
                                (xmin, ymin + text_size[1] + 4),
                                cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
                    cv2.imwrite(
                        "{}/{}_prediction.jpg".format(opt.output, path[:-4]),
                        output_img)
예제 #5
0
def train(opt):
    num_gpus = 1
    if torch.cuda.is_available():
        num_gpus = torch.cuda.device_count()
        torch.cuda.manual_seed(123)
    else:
        torch.manual_seed(123)

    training_params = {
        "batch_size": opt.batch_size * num_gpus,
        "shuffle": True,
        "drop_last": True,
        "collate_fn": collater,
        "num_workers": 12
    }

    test_params = {
        "batch_size": opt.batch_size,
        "shuffle": False,
        "drop_last": False,
        "collate_fn": collater,
        "num_workers": 12
    }

    training_set = CocoDataset(root_dir=opt.data_path,
                               set="train2017",
                               transform=transforms.Compose(
                                   [Normalizer(),
                                    Augmenter(),
                                    Resizer()]))
    training_generator = DataLoader(training_set, **training_params)

    test_set = CocoDataset(root_dir=opt.data_path,
                           set="val2017",
                           transform=transforms.Compose(
                               [Normalizer(), Resizer()]))
    test_generator = DataLoader(test_set, **test_params)

    model = EfficientDet(num_classes=training_set.num_classes())

    if os.path.isdir(opt.log_path):
        shutil.rmtree(opt.log_path)
    os.makedirs(opt.log_path)

    if not os.path.isdir(opt.saved_path):
        os.makedirs(opt.saved_path)

    writer = SummaryWriter(opt.log_path)
    if torch.cuda.is_available():
        model = model.cuda()
        model = nn.DataParallel(model)

    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           patience=3,
                                                           verbose=True)

    best_loss = 1e5
    best_epoch = 0
    model.train()

    num_iter_per_epoch = len(training_generator)
    for epoch in range(opt.num_epochs):
        model.train()
        # if torch.cuda.is_available():
        #     model.module.freeze_bn()
        # else:
        #     model.freeze_bn()
        epoch_loss = []
        progress_bar = tqdm(training_generator)
        for iter, data in enumerate(progress_bar):
            try:
                optimizer.zero_grad()
                if torch.cuda.is_available():
                    cls_loss, reg_loss = model(
                        [data['img'].cuda().float(), data['annot'].cuda()])
                else:
                    cls_loss, reg_loss = model(
                        [data['img'].float(), data['annot']])

                cls_loss = cls_loss.mean()
                reg_loss = reg_loss.mean()
                loss = cls_loss + reg_loss
                if loss == 0:
                    continue
                loss.backward()
                torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
                optimizer.step()
                epoch_loss.append(float(loss))
                total_loss = np.mean(epoch_loss)

                progress_bar.set_description(
                    'Epoch: {}/{}. Iteration: {}/{}. Cls loss: {:.5f}. Reg loss: {:.5f}. Batch loss: {:.5f} Total loss: {:.5f}'
                    .format(epoch + 1, opt.num_epochs, iter + 1,
                            num_iter_per_epoch, cls_loss, reg_loss, loss,
                            total_loss))
                writer.add_scalar('Train/Total_loss', total_loss,
                                  epoch * num_iter_per_epoch + iter)
                writer.add_scalar('Train/Regression_loss', reg_loss,
                                  epoch * num_iter_per_epoch + iter)
                writer.add_scalar('Train/Classfication_loss (focal loss)',
                                  cls_loss, epoch * num_iter_per_epoch + iter)

            except Exception as e:
                print(e)
                continue
        scheduler.step(np.mean(epoch_loss))

        if epoch % opt.test_interval == 0:
            model.eval()
            loss_regression_ls = []
            loss_classification_ls = []
            for iter, data in enumerate(test_generator):
                with torch.no_grad():
                    if torch.cuda.is_available():
                        cls_loss, reg_loss = model(
                            [data['img'].cuda().float(), data['annot'].cuda()])
                    else:
                        cls_loss, reg_loss = model(
                            [data['img'].float(), data['annot']])

                    cls_loss = cls_loss.mean()
                    reg_loss = reg_loss.mean()

                    loss_classification_ls.append(float(cls_loss))
                    loss_regression_ls.append(float(reg_loss))

            cls_loss = np.mean(loss_classification_ls)
            reg_loss = np.mean(loss_regression_ls)
            loss = cls_loss + reg_loss

            print(
                'Epoch: {}/{}. Classification loss: {:1.5f}. Regression loss: {:1.5f}. Total loss: {:1.5f}'
                .format(epoch + 1, opt.num_epochs, cls_loss, reg_loss,
                        np.mean(loss)))
            writer.add_scalar('Test/Total_loss', loss, epoch)
            writer.add_scalar('Test/Regression_loss', reg_loss, epoch)
            writer.add_scalar('Test/Classfication_loss (focal loss)', cls_loss,
                              epoch)

            if loss + opt.es_min_delta < best_loss:
                best_loss = loss
                best_epoch = epoch
                torch.save(
                    model,
                    os.path.join(opt.saved_path,
                                 "signatrix_efficientdet_coco.pth"))

                dummy_input = torch.rand(opt.batch_size, 3, 512, 512)
                if torch.cuda.is_available():
                    dummy_input = dummy_input.cuda()
                if isinstance(model, nn.DataParallel):
                    model.module.backbone_net.model.set_swish(
                        memory_efficient=False)

                    torch.onnx.export(model.module,
                                      dummy_input,
                                      os.path.join(
                                          opt.saved_path,
                                          "signatrix_efficientdet_coco.onnx"),
                                      verbose=False,
                                      opset_version=11)
                    model.module.backbone_net.model.set_swish(
                        memory_efficient=True)
                else:
                    model.backbone_net.model.set_swish(memory_efficient=False)

                    torch.onnx.export(model,
                                      dummy_input,
                                      os.path.join(
                                          opt.saved_path,
                                          "signatrix_efficientdet_coco.onnx"),
                                      verbose=False,
                                      opset_version=11)
                    model.backbone_net.model.set_swish(memory_efficient=True)

            # Early stopping
            if epoch - best_epoch > opt.es_patience > 0:
                print(
                    "Stop training at epoch {}. The lowest loss achieved is {}"
                    .format(epoch, loss))
                break
    writer.close()
def test(opt):
    test_set = CocoDataset(opt.data_path,
                           set='val2017',
                           transform=transforms.Compose(
                               [Normalizer(), Resizer()]))
    opt.num_classes = test_set.num_classes()
    opt.batch_size = opt.batch_size * 4
    test_params = {
        "batch_size": opt.batch_size,
        "shuffle": False,
        "drop_last": False,
        "collate_fn": collater,
        "num_workers": 12
    }
    test_generator = DataLoader(test_set, **test_params)

    model = EfficientDet(opt)
    model.load_state_dict(
        torch.load(os.path.join(opt.pretrained_model, opt.network + '.pth')))
    model.cuda()
    model.set_is_training(False)
    model.eval()

    if os.path.isdir(opt.prediction_dir):
        shutil.rmtree(opt.prediction_dir)
    os.makedirs(opt.prediction_dir)

    progress_bar = tqdm(test_generator)
    progress_bar.set_description_str(' Evaluating')
    IoU_scores = []
    for i, data in enumerate(progress_bar):
        scale = data['scale']
        with torch.no_grad():
            output_list = model(data['img'].cuda().float())

        for j, output in enumerate(output_list):
            scores, labels, boxes = output
            annot = data['annot'][j]
            annot = annot[annot[:, 4] != -1]
            # print(scores.size(), labels.size(), boxes.size(), annot.size())
            if boxes.shape[0] == 0:
                if annot.size(0) == 0:
                    IoU_scores.append(1.0)
                else:
                    IoU_scores.append(0.0)
                continue
            if annot.size(0) == 0:
                IoU_scores.append(0.0)
            else:
                classes = set(annot[:, 4].tolist())
                cat = torch.cat(
                    [scores.view(-1, 1),
                     labels.view(-1, 1).float(), boxes],
                    dim=1)
                cat = cat[cat[:, 0] >= opt.cls_threshold]
                iou_score = []
                for c in classes:
                    box = cat[cat[:, 1] == c][:, 2:]
                    if box.size(0) == 0:
                        iou_score.append(0.0)
                        continue
                    tgt = annot[annot[:, 4] == c][:, :4]
                    iou_s = iou(box, tgt.cuda())
                    iou_score.append(iou_s.cpu().numpy())
                classes_pre = set(cat[:, 1].tolist())
                for c in classes_pre:
                    if c not in classes:
                        iou_score.append(0)
                # print(classes_pre, classes ,iou_score)
                IoU_scores.append(sum(iou_score) / len(iou_score))

            if writePIC:
                annot /= scale[j]
                boxes /= scale[j]
                image_info = test_set.coco.loadImgs(
                    test_set.image_ids[i * opt.batch_size + j])[0]
                # print(image_info['file_name'])
                path = os.path.join(test_set.root_dir, 'images',
                                    test_set.set_name, image_info['file_name'])
                output_image = cv2.imread(path)
                # print(output_image.shape)
                for box_id in range(boxes.shape[0]):
                    pred_prob = float(scores[box_id])
                    if pred_prob < opt.cls_threshold:
                        break
                    pred_label = int(labels[box_id])
                    xmin, ymin, xmax, ymax = boxes[box_id, :]
                    color = colors[pred_label]
                    cv2.rectangle(output_image, (xmin, ymin), (xmax, ymax),
                                  color, 1)
                    text_size = cv2.getTextSize(
                        COCO_CLASSES[pred_label] + ' : %.2f' % pred_prob,
                        cv2.FONT_HERSHEY_PLAIN, 1, 1)[0]

                    cv2.rectangle(
                        output_image, (xmin, ymin),
                        (xmin + text_size[0] + 3, ymin + text_size[1] + 4),
                        color, -1)
                    cv2.putText(
                        output_image,
                        COCO_CLASSES[pred_label] + ' : %.2f' % pred_prob,
                        (xmin, ymin + text_size[1] + 4),
                        cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
                for box_id in range(annot.size(0)):
                    xmin, ymin, xmax, ymax = annot[box_id, :4]
                    cv2.rectangle(output_image, (xmin, ymin), (xmax, ymax),
                                  (255, 0, 0), 1)

                cv2.imwrite(
                    "{}/{}_prediction.jpg".format(
                        opt.prediction_dir, image_info["file_name"][:-4]),
                    output_image)
    print(sum(IoU_scores) / len(IoU_scores))
예제 #7
0
파일: train.py 프로젝트: uvipen/SSD-pytorch
def main(opt):
    if torch.cuda.is_available():
        torch.distributed.init_process_group(backend='nccl',
                                             init_method='env://')
        num_gpus = torch.distributed.get_world_size()
        torch.cuda.manual_seed(123)
    else:
        torch.manual_seed(123)
        num_gpus = 1

    train_params = {
        "batch_size": opt.batch_size * num_gpus,
        "shuffle": True,
        "drop_last": False,
        "num_workers": opt.num_workers,
        "collate_fn": collate_fn
    }

    test_params = {
        "batch_size": opt.batch_size * num_gpus,
        "shuffle": False,
        "drop_last": False,
        "num_workers": opt.num_workers,
        "collate_fn": collate_fn
    }

    if opt.model == "ssd":
        dboxes = generate_dboxes(model="ssd")
        model = SSD(backbone=ResNet(), num_classes=len(coco_classes))
    else:
        dboxes = generate_dboxes(model="ssdlite")
        model = SSDLite(backbone=MobileNetV2(), num_classes=len(coco_classes))
    train_set = CocoDataset(opt.data_path, 2017, "train",
                            SSDTransformer(dboxes, (300, 300), val=False))
    train_loader = DataLoader(train_set, **train_params)
    test_set = CocoDataset(opt.data_path, 2017, "val",
                           SSDTransformer(dboxes, (300, 300), val=True))
    test_loader = DataLoader(test_set, **test_params)

    encoder = Encoder(dboxes)

    opt.lr = opt.lr * num_gpus * (opt.batch_size / 32)
    criterion = Loss(dboxes)

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=opt.lr,
                                momentum=opt.momentum,
                                weight_decay=opt.weight_decay,
                                nesterov=True)
    scheduler = MultiStepLR(optimizer=optimizer,
                            milestones=opt.multistep,
                            gamma=0.1)

    if torch.cuda.is_available():
        model.cuda()
        criterion.cuda()

        if opt.amp:
            from apex import amp
            from apex.parallel import DistributedDataParallel as DDP
            model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
        else:
            from torch.nn.parallel import DistributedDataParallel as DDP
        # It is recommended to use DistributedDataParallel, instead of DataParallel
        # to do multi-GPU training, even if there is only a single node.
        model = DDP(model)

    if os.path.isdir(opt.log_path):
        shutil.rmtree(opt.log_path)
    os.makedirs(opt.log_path)

    if not os.path.isdir(opt.save_folder):
        os.makedirs(opt.save_folder)
    checkpoint_path = os.path.join(opt.save_folder, "SSD.pth")

    writer = SummaryWriter(opt.log_path)

    if os.path.isfile(checkpoint_path):
        checkpoint = torch.load(checkpoint_path)
        first_epoch = checkpoint["epoch"] + 1
        model.module.load_state_dict(checkpoint["model_state_dict"])
        scheduler.load_state_dict(checkpoint["scheduler"])
        optimizer.load_state_dict(checkpoint["optimizer"])
    else:
        first_epoch = 0

    for epoch in range(first_epoch, opt.epochs):
        train(model, train_loader, epoch, writer, criterion, optimizer,
              scheduler, opt.amp)
        evaluate(model, test_loader, epoch, writer, encoder, opt.nms_threshold)

        checkpoint = {
            "epoch": epoch,
            "model_state_dict": model.module.state_dict(),
            "optimizer": optimizer.state_dict(),
            "scheduler": scheduler.state_dict()
        }
        torch.save(checkpoint, checkpoint_path)
def train(opt):
    num_gpus = 1
    if torch.cuda.is_available():
        num_gpus = torch.cuda.device_count()
    else:
        raise Exception('no GPU')

    cudnn.benchmark = True

    training_params = {
        "batch_size": opt.batch_size * num_gpus,
        "shuffle": True,
        "drop_last": True,
        "collate_fn": collater,
        "num_workers": 12
    }

    test_params = {
        "batch_size": opt.batch_size,
        "shuffle": False,
        "drop_last": False,
        "collate_fn": collater,
        "num_workers": 12
    }

    training_set = CocoDataset(root_dir=opt.data_path,
                               set="train2017",
                               transform=transforms.Compose(
                                   [Normalizer(),
                                    Augmenter(),
                                    Resizer()]))
    training_generator = DataLoader(training_set, **training_params)

    test_set = CocoDataset(root_dir=opt.data_path,
                           set="val2017",
                           transform=transforms.Compose(
                               [Normalizer(), Resizer()]))
    test_generator = DataLoader(test_set, **test_params)

    opt.num_classes = training_set.num_classes()

    model = EfficientDet(opt)
    if opt.resume:
        print('Loading model...')
        model.load_state_dict(
            torch.load(os.path.join(opt.saved_path, opt.network + '.pth')))

    if not os.path.isdir(opt.saved_path):
        os.makedirs(opt.saved_path)

    model = model.cuda()
    model = nn.DataParallel(model)

    optimizer = torch.optim.AdamW(model.parameters(), opt.lr)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           patience=3,
                                                           verbose=True)

    best_loss = 1e5
    best_epoch = 0
    model.train()

    num_iter_per_epoch = len(training_generator)
    for epoch in range(opt.num_epochs):
        print('Epoch: {}/{}:'.format(epoch + 1, opt.num_epochs))
        model.train()
        epoch_loss = []
        progress_bar = tqdm(training_generator)
        for iter, data in enumerate(progress_bar):
            try:
                optimizer.zero_grad()
                if torch.cuda.is_available():
                    cls_loss, cls_2_loss, reg_loss = model(
                        [data['img'].cuda().float(), data['annot'].cuda()])
                else:
                    cls_loss, cls_2_loss, reg_loss = model(
                        [data['img'].float(), data['annot']])

                cls_loss = cls_loss.mean()
                reg_loss = reg_loss.mean()
                cls_2_loss = cls_2_loss.mean()
                loss = cls_loss + cls_2_loss + reg_loss
                if loss == 0:
                    continue
                loss.backward()
                torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
                optimizer.step()
                epoch_loss.append(float(loss))
                total_loss = np.mean(epoch_loss)

                progress_bar.set_description(
                    'Epoch: {}/{}. Iteration: {}/{}'.format(
                        epoch + 1, opt.num_epochs, iter + 1,
                        num_iter_per_epoch))

                progress_bar.write(
                    'Cls loss: {:.5f}\tReg loss: {:.5f}\tCls+Reg loss: {:.5f}\tBatch loss: {:.5f}\tTotal loss: {:.5f}'
                    .format(cls_loss, reg_loss, cls_loss + reg_loss, loss,
                            total_loss))

            except Exception as e:
                print(e)
                continue
        scheduler.step(np.mean(epoch_loss))

        if epoch % opt.test_interval == 0:
            model.eval()
            loss_regression_ls = []
            loss_classification_ls = []
            loss_classification_2_ls = []
            progress_bar = tqdm(test_generator)
            progress_bar.set_description_str(' Evaluating')
            for iter, data in enumerate(progress_bar):
                with torch.no_grad():
                    if torch.cuda.is_available():
                        cls_loss, cls_2_loss, reg_loss = model(
                            [data['img'].cuda().float(), data['annot'].cuda()])
                    else:
                        cls_loss, cls_2_loss, reg_loss = model(
                            [data['img'].float(), data['annot']])

                    cls_loss = cls_loss.mean()
                    cls_2_loss = cls_2_loss.mean()
                    reg_loss = reg_loss.mean()

                    loss_classification_ls.append(float(cls_loss))
                    loss_classification_2_ls.append(float(cls_2_loss))
                    loss_regression_ls.append(float(reg_loss))

            cls_loss = np.mean(loss_classification_ls)
            cls_2_loss = np.mean(loss_classification_2_ls)
            reg_loss = np.mean(loss_regression_ls)
            loss = cls_loss + cls_2_loss + reg_loss

            print(
                'Epoch: {}/{}. \nClassification loss: {:1.5f}. \tClassification_2 loss: {:1.5f}. \tRegression loss: {:1.5f}. \tTotal loss: {:1.5f}'
                .format(epoch + 1, opt.num_epochs, cls_loss, cls_2_loss,
                        reg_loss, np.mean(loss)))

            if loss + opt.es_min_delta < best_loss:
                print('Saving model...')
                best_loss = loss
                best_epoch = epoch
                torch.save(model.module.state_dict(),
                           os.path.join(opt.saved_path, opt.network + '.pth'))
                # torch.save(model, os.path.join(opt.saved_path, opt.network+'.pth'))

            # Early stopping
            if epoch - best_epoch > opt.es_patience > 0:
                print(
                    "Stop training at epoch {}. The lowest loss achieved is {}"
                    .format(epoch, loss))
                break
        if not len(results):
            return

        # write output
        json.dump(results,
                  open('{}_bbox_results.json'.format(dataset.set_name), 'w'),
                  indent=4)

        # load results in COCO evaluation tool
        coco_true = dataset.coco
        coco_pred = coco_true.loadRes('{}_bbox_results.json'.format(
            dataset.set_name))

        # run COCO evaluation
        coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
        coco_eval.params.imgIds = image_ids
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()


if __name__ == '__main__':
    efficientdet = torch.load(
        "trained_models/signatrix_efficientdet_coco.pth").module
    efficientdet.cuda()
    dataset_val = CocoDataset("data/COCO",
                              set='val2017',
                              transform=transforms.Compose(
                                  [Normalizer(), Resizer()]))
    evaluate_coco(dataset_val, efficientdet)
예제 #10
0
		def Train_Dataset(self, root_dir, coco_dir, img_dir, set_dir, batch_size=8, image_size=512, use_gpu=True, num_workers=3):
				'''
				User function: Set training dataset parameters

				Dataset Directory Structure

									 root_dir
											|
											|------coco_dir 
											|         |
											|         |----img_dir
											|                |
											|                |------<set_dir_train> (set_dir) (Train)
											|                         |
											|                         |---------img1.jpg
											|                         |---------img2.jpg
											|                         |---------..........(and so on)  
											|
											|
											|         |---annotations 
											|         |----|
											|              |--------------------instances_Train.json  (instances_<set_dir_train>.json)
											|              |--------------------classes.txt
											
											
						 - instances_Train.json -> In proper COCO format
						 - classes.txt          -> A list of classes in alphabetical order
						 

						For TrainSet
						 - root_dir = "../sample_dataset";
						 - coco_dir = "kangaroo";
						 - img_dir = "images";
						 - set_dir = "Train";
						
						 
						Note: Annotation file name too coincides against the set_dir

				Args:
						root_dir (str): Path to root directory containing coco_dir
						coco_dir (str): Name of coco_dir containing image folder and annotation folder
						img_dir (str): Name of folder containing all training and validation folders
						set_dir (str): Name of folder containing all training images
						batch_size (int): Mini batch sampling size for training epochs
						image_size (int): Either of [512, 300]
						use_gpu (bool): If True use GPU else run on CPU
						num_workers (int): Number of parallel processors for data loader 

				Returns:
						None
				'''
				self.system_dict["dataset"]["train"]["root_dir"] = root_dir;
				self.system_dict["dataset"]["train"]["coco_dir"] = coco_dir;
				self.system_dict["dataset"]["train"]["img_dir"] = img_dir;
				self.system_dict["dataset"]["train"]["set_dir"] = set_dir;


				self.system_dict["params"]["batch_size"] = batch_size;
				self.system_dict["params"]["image_size"] = image_size;
				self.system_dict["params"]["use_gpu"] = use_gpu;
				self.system_dict["params"]["num_workers"] = num_workers;

				if(self.system_dict["params"]["use_gpu"]):
						if torch.cuda.is_available():
								self.system_dict["local"]["num_gpus"] = torch.cuda.device_count()
								torch.cuda.manual_seed(123)
						else:
								torch.manual_seed(123)

				self.system_dict["local"]["training_params"] = {"batch_size": self.system_dict["params"]["batch_size"] * self.system_dict["local"]["num_gpus"],
																													 "shuffle": True,
																													 "drop_last": True,
																													 "collate_fn": collater,
																													 "num_workers": self.system_dict["params"]["num_workers"]}

				self.system_dict["local"]["training_set"] = CocoDataset(root_dir=self.system_dict["dataset"]["train"]["root_dir"] + "/" + self.system_dict["dataset"]["train"]["coco_dir"],
																														img_dir = self.system_dict["dataset"]["train"]["img_dir"],
																														set_dir = self.system_dict["dataset"]["train"]["set_dir"],
																														transform = transforms.Compose([Normalizer(), Resizer()])) #Augmenter(),
				
				self.system_dict["local"]["training_generator"] = DataLoader(self.system_dict["local"]["training_set"], 
																																		**self.system_dict["local"]["training_params"]);
예제 #11
0
		def Val_Dataset(self, root_dir, coco_dir, img_dir, set_dir):
				'''
				User function: Set training dataset parameters

				Dataset Directory Structure

									 root_dir
											|
											|------coco_dir 
											|         |
											|         |----img_dir
											|                |
											|                |------<set_dir_val> (set_dir) (Validation)
											|                         |
											|                         |---------img1.jpg
											|                         |---------img2.jpg
											|                         |---------..........(and so on)  
											|
											|
											|         |---annotations 
											|         |----|
											|              |--------------------instances_Val.json  (instances_<set_dir_val>.json)
											|              |--------------------classes.txt
											
											
						 - instances_Train.json -> In proper COCO format
						 - classes.txt          -> A list of classes in alphabetical order

						 
						For ValSet
						 - root_dir = "..sample_dataset";
						 - coco_dir = "kangaroo";
						 - img_dir = "images";
						 - set_dir = "Val";
						 
						 Note: Annotation file name too coincides against the set_dir

				Args:
						root_dir (str): Path to root directory containing coco_dir
						coco_dir (str): Name of coco_dir containing image folder and annotation folder
						img_dir (str): Name of folder containing all training and validation folders
						set_dir (str): Name of folder containing all validation images

				Returns:
						None
				'''
				self.system_dict["dataset"]["val"]["status"] = True;
				self.system_dict["dataset"]["val"]["root_dir"] = root_dir;
				self.system_dict["dataset"]["val"]["coco_dir"] = coco_dir;
				self.system_dict["dataset"]["val"]["img_dir"] = img_dir;
				self.system_dict["dataset"]["val"]["set_dir"] = set_dir;     

				self.system_dict["local"]["val_params"] = {"batch_size": self.system_dict["params"]["batch_size"],
																									 "shuffle": False,
																									 "drop_last": False,
																									 "collate_fn": collater,
																									 "num_workers": self.system_dict["params"]["num_workers"]}

				self.system_dict["local"]["val_set"] = CocoDataset(root_dir=self.system_dict["dataset"]["val"]["root_dir"] + "/" + self.system_dict["dataset"]["val"]["coco_dir"], 
																										img_dir = self.system_dict["dataset"]["val"]["img_dir"],
																										set_dir = self.system_dict["dataset"]["val"]["set_dir"],
																										transform=transforms.Compose([Normalizer(), Resizer()]))
				
				self.system_dict["local"]["test_generator"] = DataLoader(self.system_dict["local"]["val_set"], 
																																**self.system_dict["local"]["val_params"])
        if not len(results):
            return

        # write output
        json.dump(results,
                  open('{}_bbox_results.json'.format(dataset.set_name), 'w'),
                  indent=4)

        # load results in COCO evaluation tool
        coco_true = dataset.coco
        coco_pred = coco_true.loadRes('{}_bbox_results.json'.format(
            dataset.set_name))

        # run COCO evaluation
        coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
        coco_eval.params.imgIds = image_ids
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()


if __name__ == '__main__':
    efficientdet = torch.load(
        "trained_models/signatrix_efficientdet_coco.pth").module
    efficientdet.cuda()
    dataset_val = CocoDataset("/disk4t/data/coco/data/coco",
                              set='val2017',
                              transform=transforms.Compose(
                                  [Normalizer(), Resizer()]))
    evaluate_coco(dataset_val, efficientdet)
예제 #13
0
            return

        # write output
        json.dump(results,
                  open('{}_bbox_results.json'.format(dataset.set_name), 'w'),
                  indent=4)

        # load results in COCO evaluation tool
        coco_true = dataset.coco
        coco_pred = coco_true.loadRes('{}_bbox_results.json'.format(
            dataset.set_name))

        # run COCO evaluation
        coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
        coco_eval.params.imgIds = image_ids
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()


if __name__ == '__main__':
    #load model
    efficientdet = torch.load(args.model).module
    efficientdet.cuda()

    dataset_val = CocoDataset(args.dataset,
                              set='val2017',
                              transform=transforms.Compose(
                                  [Normalizer(), Resizer()]))
    evaluate_coco(dataset_val, efficientdet)
예제 #14
0
def train(opt):
    num_gpus = 1
    if torch.cuda.is_available():
        num_gpus = torch.cuda.device_count()
        torch.cuda.manual_seed(123)
    else:
        torch.manual_seed(123)

    training_params = {
        "batch_size": opt.batch_size * num_gpus,
        "shuffle": True,
        "drop_last": True,
        "collate_fn": collater,
        "num_workers": 12
    }

    test_params = {
        "batch_size": opt.batch_size,
        "shuffle": False,
        "drop_last": False,
        "collate_fn": collater,
        "num_workers": 12
    }

    training_set = CocoDataset(root_dir=opt.data_path,
                               set="train2017",
                               transform=transforms.Compose(
                                   [Normalizer(),
                                    Augmenter(),
                                    Resizer()]))
    training_generator = DataLoader(training_set, **training_params)

    test_set = CocoDataset(root_dir=opt.data_path,
                           set="val2017",
                           transform=transforms.Compose(
                               [Normalizer(), Resizer()]))
    test_generator = DataLoader(test_set, **test_params)

    channels_map = {
        'efficientnet-b0': [40, 80, 192],
        'efficientnet-b1': [40, 80, 192],
        'efficientnet-b2': [48, 88, 208],
        'efficientnet-b3': [48, 96, 232],
        'efficientnet-b4': [56, 112, 272],
        'efficientnet-b5': [64, 128, 304],
        'efficientnet-b6': [72, 144, 344],
        'efficientnet-b7': [80, 160, 384],
        'efficientnet-b8': [80, 160, 384]
    }

    if os.path.isdir(opt.log_path):
        shutil.rmtree(opt.log_path)
    os.makedirs(opt.log_path)

    if not os.path.isdir(opt.saved_path):
        os.makedirs(opt.saved_path)

    writer = SummaryWriter(opt.log_path)

    if opt.resume:
        resume_path = os.path.join(opt.saved_path,
                                   'signatrix_efficientdet_coco_latest.pth')
        model = torch.load(resume_path).module
        print("model loaded from {}".format(resume_path))
    else:
        model = EfficientDet(
            num_classes=training_set.num_classes(),
            network=opt.backbone_network,
            remote_loading=opt.remote_loading,
            advprop=opt.advprop,
            conv_in_channels=channels_map[opt.backbone_network])
        print("model created with backbone {}, advprop {}".format(
            opt.backbone_network, opt.advprop))

    if torch.cuda.is_available():
        model = model.cuda()
        model = nn.DataParallel(model)

    if opt.resume:
        m = round(opt.start_epoch / 100)
        opt.lr = opt.lr * (0.1**m)
    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           patience=3,
                                                           verbose=True)

    best_loss = 1e5
    best_epoch = 0
    model.train()

    num_iter_per_epoch = len(training_generator)

    start_epoch = 0
    if opt.resume:
        start_epoch = opt.start_epoch
    for epoch in range(start_epoch, opt.num_epochs):
        model.train()
        # if torch.cuda.is_available():
        #     model.module.freeze_bn()
        # else:
        #     model.freeze_bn()
        epoch_loss = []
        progress_bar = tqdm(training_generator)
        for iter, data in enumerate(progress_bar):
            try:
                optimizer.zero_grad()
                if torch.cuda.is_available():
                    cls_loss, reg_loss = model(
                        [data['img'].cuda().float(), data['annot'].cuda()])
                else:
                    cls_loss, reg_loss = model(
                        [data['img'].float(), data['annot']])

                cls_loss = cls_loss.mean()
                reg_loss = reg_loss.mean()
                loss = cls_loss + reg_loss
                if loss == 0:
                    continue
                loss.backward()
                torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
                optimizer.step()
                epoch_loss.append(float(loss))
                total_loss = np.mean(epoch_loss)

                progress_bar.set_description(
                    '{} Epoch: {}/{}. Iteration: {}/{}. Cls loss: {:.5f}. Reg loss: {:.5f}. Batch loss: {:.5f} Total loss: {:.5f}'
                    .format(datetime.now(), epoch + 1, opt.num_epochs,
                            iter + 1, num_iter_per_epoch, cls_loss, reg_loss,
                            loss, total_loss))
                writer.add_scalar('Train/Total_loss', total_loss,
                                  epoch * num_iter_per_epoch + iter)
                writer.add_scalar('Train/Regression_loss', reg_loss,
                                  epoch * num_iter_per_epoch + iter)
                writer.add_scalar('Train/Classfication_loss (focal loss)',
                                  cls_loss, epoch * num_iter_per_epoch + iter)

            except Exception as e:
                print(e)
                continue
        scheduler.step(np.mean(epoch_loss))

        if epoch % opt.test_interval == 0:
            model.eval()
            loss_regression_ls = []
            loss_classification_ls = []
            for iter, data in enumerate(test_generator):
                with torch.no_grad():
                    if torch.cuda.is_available():
                        cls_loss, reg_loss = model(
                            [data['img'].cuda().float(), data['annot'].cuda()])
                    else:
                        cls_loss, reg_loss = model(
                            [data['img'].float(), data['annot']])

                    cls_loss = cls_loss.mean()
                    reg_loss = reg_loss.mean()

                    loss_classification_ls.append(float(cls_loss))
                    loss_regression_ls.append(float(reg_loss))

            cls_loss = np.mean(loss_classification_ls)
            reg_loss = np.mean(loss_regression_ls)
            loss = cls_loss + reg_loss

            print(
                '{} Epoch: {}/{}. Classification loss: {:1.5f}. Regression loss: {:1.5f}. Total loss: {:1.5f}'
                .format(datetime.now(), epoch + 1, opt.num_epochs, cls_loss,
                        reg_loss, np.mean(loss)))
            writer.add_scalar('Test/Total_loss', loss, epoch)
            writer.add_scalar('Test/Regression_loss', reg_loss, epoch)
            writer.add_scalar('Test/Classfication_loss (focal loss)', cls_loss,
                              epoch)

            if loss + opt.es_min_delta < best_loss:
                best_loss = loss
                best_epoch = epoch
                torch.save(
                    model,
                    os.path.join(
                        opt.saved_path,
                        "signatrix_efficientdet_coco_best_epoch{}.pth".format(
                            epoch)))
                ''' 
                dummy_input = torch.rand(opt.batch_size, 3, 512, 512)
                if torch.cuda.is_available():
                    dummy_input = dummy_input.cuda()
                if isinstance(model, nn.DataParallel):
                    model.module.backbone_net.model.set_swish(memory_efficient=False)
                    
                    torch.onnx.export(model.module, dummy_input,
                                      os.path.join(opt.saved_path, "signatrix_efficientdet_coco.onnx"),
                                      verbose=False)
                    
                    model.module.backbone_net.model.set_swish(memory_efficient=True)
                else:
                    model.backbone_net.model.set_swish(memory_efficient=False)
                    
                    torch.onnx.export(model, dummy_input,
                                      os.path.join(opt.saved_path, "signatrix_efficientdet_coco.onnx"),
                                      verbose=False)
                    
                    model.backbone_net.model.set_swish(memory_efficient=True)
                '''
            print("epoch:", epoch, "best_epoch:", best_epoch,
                  "epoch - best_epoch=", epoch - best_epoch)
            # Early stopping
            if epoch - best_epoch > opt.es_patience > 0:
                print(
                    "Stop training at epoch {}. The lowest loss achieved is {}"
                    .format(epoch, loss))
                break
        if epoch % opt.save_interval == 0:
            torch.save(
                model,
                os.path.join(opt.saved_path,
                             "signatrix_efficientdet_coco_latest.pth"))
    writer.close()