Esempio n. 1
0
 def __init__(self, input_size, color_mean, color_std):
     self.data_transform = {
         "train":
         Compose([
             Scale(scale=[0.5, 1.5]),
             RandomRotation(angle=[-10, 10]),
             RandomMirror(),
             Resize(input_size),
             Normalize_Tensor(color_mean, color_std),
         ]),
         "val":
         Compose(
             [Resize(input_size),
              Normalize_Tensor(color_mean, color_std)])
     }
def predict_one_case(subject_folder, mean, std, caseID, model):
    # load data
    preprocessor = [Resize(config["input_size"] + config["nr_slices"])
                    ]  # prepare preprocessor for resizing
    data, label = NiiImagesLoader(preprocessor).load(
        subject_folder, config["training_modalities"])  # load data
    print("input shape: ", data.shape)
    # normalize input image
    data = normalize_data(data, mean, std)
    # create mirrored copy of input
    data2 = np.flip(data, axis=(2))
    # expand dimension of batch size
    data = np.expand_dims(data, axis=0)
    data2 = np.expand_dims(data2, axis=0)
    # predict output
    prediction = model.predict(data)[0, 0]
    prediction2 = model.predict(data2)[0, 0]
    # mirror the output back
    prediction2 = np.flip(prediction2, axis=(1))
    # load CT image to get SMIR ID, original size, header and afiine
    MTT_path = glob.glob(os.path.join(
        subject_folder, "*MTT.*.nii"))[0]  # get right ID for SMIR
    CT_path = glob.glob(os.path.join(
        subject_folder, "*CT.*.nii"))[0]  # get right header for SMIR
    CT = nib.load(CT_path)
    # transpose label mat to mask
    prediction = np.mean(np.array([prediction, prediction2]), axis=0)
    label_map_data = np.zeros(prediction.shape, np.int8)
    label_map_data[prediction > config["threshhold"]] = 1
    # write prediction to niftiimage into prediction_path folder
    prediction = Resize(CT.shape,
                        interpolation="nearest").preprocess(label_map_data)
    predNifti = nib.Nifti1Image(prediction, CT.affine, CT.header)
    print("Output prediction: ", prediction.shape)
    # predNifti.set_data_dtype('short')
    if not os.path.exists(config["prediction_path"]):
        os.makedirs(config["prediction_path"])
    prediction_path = os.path.join(
        config["prediction_path"],
        "SMIR.prediction" + config["output_foder"].split("/")[-1] + "_case" +
        caseID + "." + MTT_path.split(".")[-2] + ".nii")
    predNifti.to_filename(prediction_path)
    if config["test_data"] != True:
        # evaluate dice coeficient
        dice = weighted_dice_coefficient(label, prediction)
        print("Dice: ", dice)
        return dice
def get_3dshapes_dataloader(args, path_to_data='3dshapes'):
    """3dshapes dataloader with images rescaled to (28,28,3)"""

    name = '{}/3dshapes.h5'.format(path_to_data)
    if not os.path.exists(name):
        print('Data at the given path doesn\'t exist. ')
        os.system(
            "  mkdir 3dshapes;"
            "  wget -O 3dshapes/3dshapes.h5 https://storage.googleapis.com/3d-shapes/3dshapes.h5"
        )

    transform = transforms.Compose([Resize(28), transforms.ToTensor()])

    d3shapes_data = d3shapesDataset(name, transform=transform)
    d3shapes_loader = DataLoader(d3shapes_data,
                                 batch_size=args.mb_size,
                                 shuffle=args.shuffle,
                                 pin_memory=True,
                                 num_workers=args.workers)
    _, c, x, y = next(iter(d3shapes_loader))[0].size()
    return d3shapes_loader, c * x * y, c
opt = CycleMcdTrainOptions().parse()

# set model

model = createModel(opt)  # create a new model
model.setup(opt)  # set model

# set dataloader

if opt.augment:
    print("with data augmentation")
    transformList = [
        RandomRotation(10),
        RandomResizedCrop(),
        Resize(opt.loadSize),
        ToTensor(),
        Normalize([.485, .456, .406], [.229, .224, .225]),
        RandomHorizontalFlip(),
    ]
else:
    print("without data augmentation")
    transformList = [
        Resize(opt.loadSize),
        ToTensor(),
        Normalize([.485, .456, .406], [.229, .224, .225])
    ]

transform = Compose(transformList)

supervisedADataset = createDataset([opt.supervisedADataset],
Esempio n. 5
0
from utils import ChestXrayDataset, ToTensor, LeftToRightFlip, RandomCrop, Resize, ColorJitter, RandomRotation
from torchvision import transforms, models
from torch.utils.data import DataLoader
import torch
import torch.nn as nn

from focal_loss import FocalLoss
from model import get_model

transform = {
    'train':
    transforms.Compose([
        LeftToRightFlip(0.5),
        RandomRotation(angle=3, p=0.5),
        Resize(224),
        ColorJitter(p=0.5,
                    color=0.1,
                    contrast=0.1,
                    brightness=0.1,
                    sharpness=0.1),
        RandomCrop(scale=210, p=0.5),
        Resize(224),
        ToTensor()
    ]),
    'test':
    transforms.Compose([ToTensor()])
}

datasets = {
    x: ChestXrayDataset(csv_file=os.path.join('dataset', x, x + '.csv'),
Esempio n. 6
0
    ret1, frame1 = cap1.read()
    ret2, frame2 = cap2.read()
    if ret1 and ret2:
        boxes1 = face_detector.DetectFaces(frame1)
        boxes2 = face_detector.DetectFaces(frame2)
        if boxes1 and boxes2:
            exp_box1 = Box(boxes1[0][0], boxes1[0][1], boxes1[0][2]-boxes1[0][0], boxes1[0][3]-boxes1[0][1]).ExpandBox(1.5)
            exp_box2 = Box(boxes2[0][0], boxes2[0][1], boxes2[0][2]-boxes2[0][0], boxes2[0][3]-boxes2[0][1]).ExpandBox(1.5)
            sqr_box1 = exp_box1.SquareBox(1)
            sqr_box2 = exp_box2.SquareBox(1)
            crop1,_,_,_,_ = SmartCrop(frame1, sqr_box1)
            crop2,_,_,_,_ = SmartCrop(frame2, sqr_box2)

            crop1 = cv2.cvtColor(crop1, cv2.COLOR_BGR2GRAY)
            crop2 = cv2.cvtColor(crop2, cv2.COLOR_BGR2GRAY)
            crop1 = Resize(crop1, dst_size, dst_size)
            crop2 = Resize(crop2, dst_size, dst_size)

            resize_coef1 = dst_size / sqr_box1.width
            resize_coef2 = dst_size / sqr_box2.width
            resized_width = int(max(resize_coef1 * exp_box1.width, resize_coef2 * exp_box2.width))

            crop1 = exclude_face(crop1, resized_width)
            crop2 = exclude_face(crop2, resized_width)

            diff = cv2.absdiff(crop1, crop2)
            diff_sum = diff.sum()
            diff_sum_norm = diff_sum / 1000000
            print(f"Diff sum: {diff_sum_norm}")
            if diff_sum_norm > 0.85:
                result = "Fake"
Esempio n. 7
0
]  # std of each modality in the dataset
config[
    "labels"] = 1  # the label numbers on the input image (exclude background)
config[
    "threshhold"] = 0.5  # threshold used to convert output heat map to output mask with 0 and 1 only, i.e. >thresh => 1

# training settings
config["batch_size"] = 4
config["n_epochs"] = 2000  # cutoff the training after this many epochs
config[
    "test_size"] = 0.2  # portion of the data that will be used for validation

# load the dataset from disk
from utils import NiiDatasetLoader
from utils import Resize
preprocessor = [Resize(config["input_size"] + config["nr_slices"])]
(data, labels) = NiiDatasetLoader(preprocessor).load(config["data_folder"],
                                                     config["modalities"])


# normalize data
def normalize_data(data, mean, std):
    for i in range(len(data)):
        for j in range(len(mean)):
            data[i][j] -= mean[j]
            data[i][j] /= std[j]
    return data


data = normalize_data(data, config["mean"], config["std"])