예제 #1
0
    def load_picture(self, fname, nbits=16):
        """

        :param fname: str or numpy.ndarray
            Takes either full path to the image or the numpy array
        :return:
        """

        if isinstance(fname, str):
            img = Image.open(fname)
        elif isinstance(fname, np.ndarray):
            img = fname
            if nbits == 16:
                img = Image.fromarray(np.uint8(255 * (img / 65535.)))
            elif nbits == 8:
                if img.dtype != np.uint8:
                    raise TypeError
                img = Image.fromarray(img)
            else:
                raise TypeError
        else:
            raise TypeError

        cropper = CenterCrop(300)

        l, m = get_pair(cropper(img))

        l = self.patch_transform(l)
        m = self.patch_transform(m)

        return cropper(img), l.view(1, 1, 128, 128), m.view(1, 1, 128, 128)
예제 #2
0
def load_picture16bit(fname):
    img = Image.open(fname)
    # We will use 8bit
    img = np.array(img, dtype=float)
    img = np.uint8(255 * (img / 65535.))
    img = Image.fromarray(np.repeat(img[:, :, np.newaxis], 3, axis=2))

    return CenterCrop(300)(img)
예제 #3
0
    train_files = np.array(train_files)
    np.random.shuffle(train_files)
    val_files = np.array(os.listdir(os.path.join(args.dataset, 'val')))

    if os.path.isfile(os.path.join(args.snapshots, 'mean_std.npy')):
        tmp = np.load(os.path.join(args.snapshots, 'mean_std.npy'))
        mean_vector, std_vector = tmp
    else:

        transf_tens = transforms.Compose(
            [transforms.ToTensor(), lambda x: x.float()])

        train_ds = KneeGradingDataset(args.dataset,
                                      train_files.tolist(),
                                      transform=transf_tens,
                                      augment=CenterCrop(300),
                                      stage='train')

        train_loader = data.DataLoader(train_ds,
                                       batch_size=args.bs,
                                       num_workers=args.n_threads)

        mean_vector = np.zeros(1)
        std_vector = np.zeros(1)

        print(colored('==> ', 'green') + 'Estimating the mean')
        pbar = tqdm(total=len(train_loader))
        for entry in train_loader:
            batch_l = entry[0]
            batch_m = entry[0]
            for j in range(mean_vector.shape[0]):
예제 #4
0
파일: train.py 프로젝트: bofei5675/KneeNet
from augmentation import CenterCrop, CorrectGamma, Jitter, Rotate, CorrectBrightness, CorrectContrast
import pandas as pd
if __name__ == '__main__':
    USE_CUDA = torch.cuda.is_available()
    device = torch.device("cuda" if USE_CUDA else "cpu")
    train_dir = '/gpfs/data/denizlab/Users/bz1030/KneeNet/KneeProject/project_oulu/train.csv'
    val_dir = '/gpfs/data/denizlab/Users/bz1030/KneeNet/KneeProject/project_oulu/val.csv'
    dataset = pd.read_csv(train_dir).sample(n=20).reset_index()
    dataset_val = pd.read_csv(val_dir).sample(n=20).reset_index()
    # Defining the transforms
    # This is the transformation for each patch
    saved = '/gpfs/data/denizlab/Users/bz1030/KneeNet/KneeProject/project_oulu/resnet_codes/saved'
    if not os.path.exists(saved):
        os.mkdir(saved)
        scale_tensor_transform = transforms.Compose([
            CenterCrop(300),
            transforms.Resize(224),
            transforms.ToTensor(),
            lambda x: x.float(),
        ])
        train_ds = KneeGradingDataset(dataset,
                                      transform=scale_tensor_transform,
                                      stage='train')

        train_loader = data.DataLoader(train_ds, batch_size=256)

        mean_vector = np.zeros(3)
        std_vector = np.zeros(3)

        print(colored('==> ', 'green') + 'Estimating the mean')
        pbar = tqdm(total=len(train_loader))
                    action='store',
                    dest='summary_path',
                    default=None,
                    type=str,
                    help='Path of dataloader file train.csv/val.csv/test.csv')
if __name__ == '__main__':
    args = parser.parse_args()
    USE_CUDA = torch.cuda.is_available()
    device = torch.device("cuda" if USE_CUDA else "cpu")
    HOME_PATH = args.home_path
    summary_path = args.summary_path
    test = pd.read_csv(summary_path)

    start_test = 0
    tensor_transform_test = transforms.Compose([
        CenterCrop(896),
        transforms.ToTensor(),
        lambda x: x.float(),
    ])
    dataset_test = KneeGradingDatasetNew(test, HOME_PATH,
                                         tensor_transform_test, 'float')

    test_loader = data.DataLoader(dataset_test, batch_size=8)
    print('Test data:', len(dataset_test))
    # Network
    if USE_CUDA:
        net = torch.load(args.load_model)
    else:
        net = torch.load(args.load_model, map_location='cpu')
    if USE_CUDA:
        net = nn.DataParallel(net)
예제 #6
0
        tmp.eval()
        models.append(tmp)

    normTransform = transforms.Normalize(mean_vector, std_vector)

    patch_transform = transforms.Compose([
        transforms.ToTensor(),
        lambda x: x.float(),
        normTransform,
    ])

    imgs_fnames = glob.glob(os.path.join(args.dataset, '**', '*.png'))
    sm = torch.nn.Softmax(1)
    preds = []
    for fname in tqdm(imgs_fnames, total=len(imgs_fnames)):
        inp = load_img(fname, CenterCrop(300), patch_transform)

        lateral = Variable(inp[0].unsqueeze(0), volatile=True)
        medial = Variable(inp[1].unsqueeze(0), volatile=True)
        res = 0
        for m in models:
            res += m(lateral, medial)
        res = sm(res).data.squeeze().numpy()

        preds.append([
            fname.split('/')[-1],
        ] + res.tolist())

    with open(args.save_results, 'w') as f:
        for pred in preds:
            f.write(