Пример #1
0
    size = args.size
    coco_annotation = args.coco_annotation
    model_weights = args.model_weights
    fig_dir = args.fig_dir

    if not os.path.exists(fig_dir):
        os.makedirs(fig_dir)

    model = get_model(num_classes=num_classes)
    model.load_state_dict(torch.load(model_weights))
    model.eval()

    pic_count = 0
    dataset = BreadDataset(coco_annotation,
                           size,
                           transforms=get_transform(False))
    for image, targets in dataset:
        bboxes = targets["boxes"]

        with torch.no_grad():
            prediction = model([image])

        draw_image = Image.fromarray(
            image.mul(255).permute(1, 2, 0).byte().numpy())
        draw = ImageDraw.Draw(draw_image)

        for elem in range(len(bboxes)):
            draw.rectangle([(bboxes[elem][0], bboxes[elem][1]),
                            (bboxes[elem][2], bboxes[elem][3])],
                           outline="green",
                           width=3)
Пример #2
0
                  ]  # Bounding boxes
    pred_score = list(pred[0]['scores'].detach().numpy())
    # import ipdb; ipdb.set_trace()
    pred_t = [pred_score.index(x) for x in pred_score if x > threshold
              ][-1]  # Get list of index with score greater than threshold.
    pred_boxes = pred_boxes[:pred_t + 1]
    pred_class = pred_class[:pred_t + 1]
    pred_score = pred_score[:pred_t + 1]
    # print(len(pred_boxes))
    # print(pred)
    return pred_boxes, pred_class, pred_score


if __name__ == "__main__":
    dataset_test = SatelliteDataset('validating_data/',
                                    get_transform(train=False))
    data_loader_test = torch.utils.data.DataLoader(dataset_test,
                                                   batch_size=1,
                                                   shuffle=False,
                                                   num_workers=4,
                                                   collate_fn=utils.collate_fn)
    model = init_model()
    imgs = glob.glob(os.path.join('validating_data/', "*"))
    # rand_img = random.sample(imgs, 1)
    # import ipdb; ipdb.set_trace()
    myBoundingBoxes = BoundingBoxes()
    for idx in range(len(dataset_test)):
        img, target = dataset_test.__getitem__(idx)
        for i in range(len(target['boxes'])):
            gt_boundingBox = BoundingBox(
                imageName=target["img_name"],
Пример #3
0
            cv2.drawContours(img, contours, -1, (0, 255, 0), 3)
            cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (255, 0, 0), 5)
            cv2.putText(img, 'mark_type_1', (xmin, ymin), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0))
        elif label == 2:
            cv2.drawContours(img, contours, -1, (0, 0, 255), 3)
            cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 255, 0), 5)
            cv2.putText(img, 'mark_type_2', (xmin, ymin), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0))

    # plt.figure(figsize=(20, 15))
    # cv2.namedWindow("test", cv2.WINDOW_NORMAL)
    # cv2.imshow("test", img)
    # cv2.waitKey(0)
    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
    return img


if __name__ == "__main__":
    num_class = 2
    model = get_model_instance_segmentation(num_class)
    model.load_state_dict(torch.load("test.pth"))
    model.eval()
    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')

    model.to(device)

    dataset_test = PennFudanDataset('PennFudanPed', get_transform(train=False))

    img, _ = dataset_test[1]
    showbbox(model, img)

Пример #4
0
def main(args):
    utils.init_distributed_mode(args)
    print(args)

    device = torch.device(args.device)

    # Data loading code
    print("Loading data")

    dataset, num_classes = get_dataset(args.dataset, "train", get_transform(train=True), args.data_path)
    dataset_test, _ = get_dataset(args.dataset, "val", get_transform(train=False), args.data_path)

    print("Creating data loaders")
    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
        test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test)
    else:
        train_sampler = torch.utils.data.RandomSampler(dataset)
        test_sampler = torch.utils.data.SequentialSampler(dataset_test)

    if args.aspect_ratio_group_factor >= 0:
        group_ids = create_aspect_ratio_groups(dataset, k=args.aspect_ratio_group_factor)
        train_batch_sampler = GroupedBatchSampler(train_sampler, group_ids, args.batch_size)
    else:
        train_batch_sampler = torch.utils.data.BatchSampler(
            train_sampler, args.batch_size, drop_last=True)

    data_loader = torch.utils.data.DataLoader(
        dataset, batch_sampler=train_batch_sampler, num_workers=args.workers,
        collate_fn=utils.collate_fn)

    data_loader_test = torch.utils.data.DataLoader(
        dataset_test, batch_size=5,
        sampler=test_sampler, num_workers=args.workers,
        collate_fn=utils.collate_fn)

    print("Creating model")
    model = torchvision.models.detection.__dict__[args.model](num_classes=num_classes,
                                                              pretrained=args.pretrained)
    model.to(device)

    model_without_ddp = model
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
        model_without_ddp = model.module

    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(
        params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)

    # lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_step_size, gamma=args.lr_gamma)
    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_steps, gamma=args.lr_gamma)

    if args.resume:
        checkpoint = torch.load(args.resume, map_location='cpu')
        model_without_ddp.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        args.start_epoch = checkpoint['epoch'] + 1

    if args.test_only:
        evaluate(model, data_loader_test, device=device)
        return

    print("Start training")
    start_time = time.time()
    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        train_one_epoch(model, optimizer, data_loader, device, epoch, args.print_freq)
        lr_scheduler.step()
        if args.output_dir:
            utils.save_on_master({
                'model': model_without_ddp.state_dict(),
                'optimizer': optimizer.state_dict(),
                'lr_scheduler': lr_scheduler.state_dict(),
                'args': args,
                'epoch': epoch},
                os.path.join(args.output_dir, 'model_{}.pth'.format(epoch)))

        # evaluate after every epoch
        evaluate(model, data_loader_test, device=device)

    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print('Training time {}'.format(total_time_str))
def get_dataset(args):
    """ Returns train and test datasets and a user group which is a dict where
    the keys are the user index and the values are the corresponding data for
    each of those users.
    """
    if args.dataset == 'coco':
        if args.data == 'val2017':
            path2data = os.path.join(args.root, 'data/coco/val2017')
            path2ann = os.path.join(
                args.root, 'data/coco/annotations/instances_val2017.json')
        elif args.data == 'train2017':
            path2data = os.path.join(args.root, 'data/coco/train2017')
            path2ann = os.path.join(
                args.root, 'data/coco/annotations/instances_train2017.json')

        # path2data = r"C:\Users\cgong002\Google Drive\data\coco\val2017" #local
        # path2ann = r"C:\Users\cgong002\Google Drive\data\coco\annotations\instances_val2017.json" #local

        if args.num_classes == 81:
            catIds = random_n_classes(args.num_classes)
        elif args.num_classes == 21:
            catIds = [
                0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4, 1, 64, 20,
                63, 7, 72
            ]
        elif args.num_classes == 2:
            catIds = [0, 1]
        augmentation = False if args.dp else True
        train_dataset = datasets.CocoDetection(
            path2data,
            path2ann,
            transforms=Compose([
                FilterAndRemapCocoCategories(catIds, remap=True),
                ConvertCocoPolysToMask(),
                get_transform(train=augmentation)
            ]))
        test_dataset = datasets.CocoDetection(path2data,
                                              path2ann,
                                              transforms=Compose([
                                                  FilterAndRemapCocoCategories(
                                                      catIds, remap=True),
                                                  ConvertCocoPolysToMask(),
                                                  get_transform(train=False)
                                              ]))
        # split train and test indice
        n = len(train_dataset)
        torch.manual_seed(args.seed)
        idxs = torch.randperm(n).tolist()
        idxs = idxs[:int(n * args.sample_rate)]
        split_idx = len(idxs) // 5 * 4
        # torch.save(idxs, 'idxs.pt')#check if same idxs for different runs: YES
        train_dataset = torch.utils.data.Subset(train_dataset,
                                                idxs[:split_idx])
        test_dataset = torch.utils.data.Subset(test_dataset, idxs[split_idx:])

        # sample training data amongst users
        if args.iid:
            # Sample IID user data from Mnist
            user_groups = coco_iid(train_dataset, args.num_users)
        else:
            # Sample Non-IID user data from Mnist
            if args.unequal:
                # Chose uneuqal splits for every user
                user_groups = coco_noniid_unequal(train_dataset,
                                                  args.num_users, args.data)
            else:
                # Chose euqal splits for every user
                user_groups = coco_noniid(train_dataset, args.num_users,
                                          args.data)

    else:
        exit('Unrecognized dataset')

    return train_dataset, test_dataset, user_groups