Esempio n. 1
0
    def initialize(self, source, target, batch_size1, batch_size2, scale=32):
        transform = transforms.Compose([
            transforms.Scale(scale),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
        dataset_source = Dataset(source['imgs'],
                                 source['labels'],
                                 transform=transform)
        dataset_target = Dataset(target['imgs'],
                                 target['labels'],
                                 transform=transform)
        # dataset_source = tnt.dataset.TensorDataset([source['imgs'], source['labels']])
        # dataset_target = tnt.dataset.TensorDataset([target['imgs'], target['labels']])
        data_loader_s = torch.utils.data.DataLoader(dataset_source,
                                                    batch_size=batch_size1,
                                                    shuffle=True,
                                                    num_workers=4)

        data_loader_t = torch.utils.data.DataLoader(dataset_target,
                                                    batch_size=batch_size2,
                                                    shuffle=True,
                                                    num_workers=4)
        self.dataset_s = dataset_source
        self.dataset_t = dataset_target
        self.paired_data = PairedData(data_loader_s, data_loader_t,
                                      float("inf"))
    def initialize(self, source, target, batch_size1, batch_size2, scale=32):
        #         transform = transforms.Compose([
        #             transforms.Scale(scale),
        #             transforms.ToTensor(),
        #             transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        #         ])
        dataset_source = Dataset(source['imgs'], source['labels'])
        dataset_target = Dataset(target['imgs'], target['labels'])

        data_loader_s = torch.utils.data.DataLoader(dataset_source,
                                                    batch_size=batch_size1,
                                                    shuffle=True,
                                                    num_workers=0)

        data_loader_t = torch.utils.data.DataLoader(dataset_target,
                                                    batch_size=batch_size2,
                                                    shuffle=True,
                                                    num_workers=0)

        # print('Source shape: {}, target shape: {}'.format(len(data_loader_s), len(data_loader_t)))
        self.dataset_s = dataset_source
        self.dataset_t = dataset_target
        self.paired_data = PairedData(data_loader_s, data_loader_t,
                                      float("inf"))
Esempio n. 3
0

if __name__ == "__main__":
    from config import config
    from datasets.datasets import Dataset
    import matplotlib.patches as patches
    import matplotlib.pyplot as plt
    import torchvision.transforms as transforms

    ii = r'/home/wei/Deep_learning_pytorch/Data/UCAS/ucas_train.txt'
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    transform = transforms.Compose([
        transforms.ToTensor(),
    ])

    img_size = 256
    da = Dataset(ii, transform=transform, img_size=img_size, train=False)
    dataloader = torch.utils.data.DataLoader(da, batch_size=1, shuffle=False)
    #x = torch.randn(1,3,128,128)
    f = FCOS(config)
    #checkpoint = torch.load('./checkpoint/ckpt.pth')
    #f.load_state_dict(checkpoint['weights'])

    for batch_i, (_, imgs, targets) in enumerate(dataloader):
        images = imgs
        targets = targets
        #loss = f(images, targets)
        detections = f(images)

        break