Ejemplo n.º 1
0
def eval(labelled_list, masks, scans, nb_classes, start_filters):

    # Use CUDA
    device = torch.device("cuda:0")

    # Open labelled list
    with open(labelled_list, "rb") as f:
        list_scans = pickle.load(f)

    # Parse list of CT-scans
    ct_scans = [s.split('/')[1] for s in list_scans]
    ct_scans = ct_scans[30:]

    # Build dataset
    data = dataset.Dataset(ct_scans, scans, masks, mode="3d")

    # Load model
    criterion = utils.dice_loss
    unet = model.UNet(1, nb_classes, start_filters).to(device)
    unet.load_state_dict(torch.load("./model"))

    # Apply model to a known scan
    x, y = data.__getitem__(15, verbose=True)
    x = torch.Tensor(np.array([x.astype(np.float16)])).to(device)
    y = torch.Tensor(np.array([y.astype(np.float16)])).to(device)
    logits = unet(x)
    loss = criterion(logits, y)
    print(loss.item())
    mask = logits.cpu().detach().numpy()
    nrrd.write("lung_mask2.nrrd", mask[0][0])
Ejemplo n.º 2
0
    def __init__(self, Dataset, resize):
        resultList = []
        transform = transforms.Compose([
            transforms.RandomCrop(int(resize * 0.99)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        ])
        befor_folder_name = ''
        for obj in range(len(Dataset)):
            data = Dataset[obj]
            is_concat = False
            index_count = 0
            for i in range(len(data)):
                img = data.__getitem__(i)
                data_name = data.imgs[i]
                if len(data_name[0].split('/')[-1].split('_')) == 1:
                    center = transform(img[0])
                else:
                    focus_num = int(data_name[0].split('_')[-1].split('.jpg')
                                    [0])  # number of Focus
                    if focus_num == 0:
                        background = transform(img[0])
                    elif focus_num == 1:
                        continue

                index_count += 1

                if befor_folder_name == data_name[0].split('/')[-2]:
                    is_concat = True

                if is_concat:
                    if not befor_folder_name == data_name[0].split('/')[-2]:
                        print(befor_folder_name, data_name[0].split('/')[-2])
                    result = torch.cat([center, background])
                    is_concat = False
                    index_count = 0
                    r_f = data_name[0].split('/')[-3]
                    if r_f == 'real':
                        result = (result, 0)
                    else:
                        result = (result, 1)
                    # print(result[0].shape, result[1])
                    resultList.append(result)

                befor_folder_name = data_name[0].split('/')[-2]
        self.len = resultList.__len__()
        self.resultData = resultList
Ejemplo n.º 3
0
        point_set = point_set[choice]
        seg = seg[choice]

        point_set = torch.from_numpy(point_set)
        seg = torch.from_numpy(seg)
        cls = torch.from_numpy(np.array([cls]).astype(np.int64))
        if self.classification:
            return point_set, cls
        else:
            return point_set, seg

    def __len__(self):
        return len(self.datapath)


### test scrip
print('start runing')
args = Arguments().parser().parse_args()

args.device = torch.device(
    'cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
torch.cuda.set_device(args.device)
data = BenchmarkDataset(root=args.dataset_path,
                        npoints=args.point_num,
                        uniform=None,
                        class_choice='Chair')
for i in range(10):
    data.__getitem__()
print(len(data.num_points_list))
print(data.num_points_list)
Ejemplo n.º 4
0
        video = self.videofiles[index]
        video_index = self.videoindex[video]
        imagefile1 = self.imagefiles[index]
        image = cv2.imread(imagefile1)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        samples['content{}'.format(0)] = image

        index2 = min(index + 1, video_index[1] - 1)
        imagefile2 = self.imagefiles[index2]
        image2 = cv2.imread(imagefile2)
        image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
        samples['content{}'.format(1)] = image2

        if self.augment_transform is not None:
            samples = self.augment_transform(samples)

        samples['seq_name'] = video
        return samples


if __name__ == '__main__':
    data = DAVISLoader(data_root='/home/lwq/sdb1/xiaoxin/data/YoutubeVOS',
                       num_sample=3,
                       Training=True)
    # data = DAVISLoader(data_root='/home/lwq/sdb1/xiaoxin/data/DAVIS', num_sample=3, Training=False)
    samples = data.__getitem__(0)
    for i in samples:
        print(i)
    print(samples['content0'])
Ejemplo n.º 5
0
def _test_gcd():
    with open("cfg/gcd_mlp.yaml") as f:
        cfg = yaml.load(f)
    data = gcd(cfg)
    feature, label = data.__getitem__(0)
    print(feature.dtype)
Ejemplo n.º 6
0
        point_id = np.random.randint(len(pixel_ids[0]))
        px, py = pixel_ids[0][point_id], pixel_ids[1][point_id]

        y = np.reshape(y, [h, w, 1])
        x = np.zeros([h, w, 2])
        x[:,:,0] = s
        x[px,py,1] = 1.0
        return x, y
    def __getitem__(self, idx):
        p = self.paths[0]
        x, y = self._preprocess_path(p, self.w, self.h)
        x = torch.from_numpy(x)
        x = x.permute(2, 0, 1).float()
        y = torch.from_numpy(y)
        y = y.permute(2, 0, 1).float()
        return x, y, p + str(idx)
    def __len__(self):
        # return len(self.paths)
        return 128
if __name__ == "__main__":
    data = PathDataset("/content/kanji/val/", 64, 64)
    x, y, _ = data.__getitem__(0)
    pad = torch.zeros(1, 64, 64)
    path = torch.cat((x, pad), dim=0)
    path = path.permute(1,2,0)
    path = np.uint8(path.cpu() * 255)
    print(path.shape)
    y = y.squeeze(0)
    y = np.uint8(y * 255)
    cv2.imwrite('y.png', y)
    cv2.imwrite('x.png', path)
Ejemplo n.º 7
0
        if random.random() < 0.5:
            img = np.fliplr(img)
            label = np.fliplr(label)
        return img, label

    def resize(self, img, label, s):
        # print(s, img.shape)
        img = cv2.resize(img, (s, s), interpolation=cv2.INTER_LINEAR)
        label = cv2.resize(label, (s, s), interpolation=cv2.INTER_NEAREST)
        return img, label

    def randomCrop(self, img, label):
        h, w, _ = img.shape
        short_size = min(w, h)
        rand_size = random.randrange(int(0.7 * short_size), short_size)
        x = random.randrange(0, w - rand_size)
        y = random.randrange(0, h - rand_size)

        return img[y:y + rand_size, x:x + rand_size], label[y:y + rand_size,
                                                            x:x + rand_size]

    def __len__(self):
        return len(self.data)


if __name__ == '__main__':

    data = VOCClassSeg(root='../des/', split='train.txt', transform=True)

    x, y = data.__getitem__(4)