def get_coco(root, image_set, transforms, mode='instances'):
    anno_file_template = "{}_{}2017.json"
    PATHS = {
        "train": ("train2017",
                  os.path.join("annotations",
                               anno_file_template.format(mode, "train"))),
        "val": ("val2017",
                os.path.join("annotations",
                             anno_file_template.format(mode, "val"))),
        # "train": ("val2017", os.path.join("annotations", anno_file_template.format(mode, "val")))
    }

    t = [ConvertCocoPolysToMask()]

    if transforms is not None:
        t.append(transforms)
    transforms = T.Compose(t)

    img_folder, ann_file = PATHS[image_set]
    img_folder = os.path.join(root, img_folder)
    ann_file = os.path.join(root, ann_file)

    dataset = CocoDetection(img_folder, ann_file, transforms=transforms)

    if image_set == "train":
        dataset = _coco_remove_images_without_annotations(dataset)

    # dataset = torch.utils.data.Subset(dataset, [i for i in range(500)])

    return dataset
Example #2
0
def get_transform(train):
    transforms = []
    transforms.append(T.ToTensor())
    if train:
        # Augmentations go here... Won't do any for now
        pass
        # transforms.append(T.RandomHorizontalFlip(0.5))
    return T.Compose(transforms)
Example #3
0
def get_transform(train):
    transforms = []
    transforms.append(T.ToTensor())
    if train:
        transforms.append(T.RandomHorizontalFlip(0.5))
    # 注意:这里将`.ToTensor()`放到`.RandomHorizontalFlip()`前
    # 会导致报错 `TypeError: img should be PIL Image. Got <class `torch.Tensor`>`
    return T.Compose(transforms)
Example #4
0
def get_transform(train):
    transforms = []
    # converts the image, a PIL image, into a PyTorch Tensor
    transforms.append(T.ToTensor())
    if train:
        # during training, randomly flip the training images
        # and ground-truth for data augmentation
        transforms.append(T.RandomHorizontalFlip(0.5))
    return T.Compose(transforms)
Example #5
0
    boxes = np.zeros([mask.shape[-1], 4], dtype=np.int32)
    for i in range(mask.shape[-1]):
        m = mask[:, :, i]
        # Bounding box.
        horizontal_indicies = np.where(np.any(m, axis=0))[0]
        vertical_indicies = np.where(np.any(m, axis=1))[0]
        if horizontal_indicies.shape[0]:
            x1, x2 = horizontal_indicies[[0, -1]]
            y1, y2 = vertical_indicies[[0, -1]]
            # x2 and y2 should not be part of the box. Increment by 1.
            x2 += 1
            y2 += 1
        else:
            # No mask for this instance. Might happen due to
            # resizing or cropping. Set bbox to zeros
            x1, x2, y1, y2 = 0, 0, 0, 0


#        boxes[i] = np.array([y1, x1, y2, x2])
        boxes[i] = np.array([x1, y1, x2, y2])
    return boxes.astype(np.int32, copy=False)

if __name__ == "__main__":
    dataset = PennFudanDataset(root='./data-ins/train/',
                               transforms=T.Compose([T.ToTensor()]))

    loader = DataLoader(dataset, batch_size=16, shuffle=False, num_workers=0)

    for i, image, label in enumerate(loader):
        print(image.shape)
Example #6
0
def get_transform(train):
    transforms = []
    transforms.append(T.ToTensor())
    if train:
        transforms.append(T.RandomHorizontalFlip(0.5))
    return T.Compose(transforms)
Example #7
0
 def get_transform(train):
     transforms = []
     transforms.append(T.ToTensor())
     # transforms.append(T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))
     return T.Compose(transforms)