Ejemplo n.º 1
0
def preprocess():
    """Preprocess."""
    print("Transform images")
    client = storage.Client(PROJECT)
    read_bucket = client.get_bucket(RAW_BUCKET)
    write_bucket = client.get_bucket(BUCKET)

    transformations = transforms.Compose([Rescale(256), RandomCrop(224)])

    metadata_df = (
        pd.read_csv(f"gs://{RAW_BUCKET}/{RAW_DATA_DIR}/metadata.csv",
                    usecols=["filename", "view"
                             ]).query("view == 'PA'")  # taking only PA view
    )

    start_time = time.time()
    for filename in metadata_df["filename"].tolist():
        image = gcs_imread(read_bucket,
                           os.path.join(RAW_DATA_DIR, "images", filename))

        proc_image = transformations(image)
        proc_image = (proc_image * 255).astype(np.uint8)

        gcs_imwrite(write_bucket,
                    os.path.join(BASE_DIR, PREPROCESSED_DIR, filename),
                    filename, proc_image)

    print(f"  Time taken = {time.time() - start_time:.2f}s")
    print(f"  Number of images processed = {metadata_df.shape[0]}")
Ejemplo n.º 2
0
def process_image(image):
    """Process image."""
    seed_torch(seed=42)
    proc_image = transforms.Compose([
        Rescale(256),
        RandomCrop(224),
        ToTensor(),
    ])(image)
    proc_image = proc_image.unsqueeze(0).to(DEVICE)
    return proc_image
Ejemplo n.º 3
0
    def __init__(self):
        self.cfg_json = "config/openi_progressive_hiach_test.json"
        self.cfg = self.pare_cfg(self.cfg_json)
        self.exp_name = self.cfg["EXPER_NAME"]
        self.test_csv = self.cfg["TEST_CSV"]
        self.encoder_resume = self.cfg["RESUME_ENCODER"]
        self.decoder_resume_F = self.cfg["RESUME_DECODER_F"]
        self.decoder_resume_L = self.cfg["RESUME_DECODER_L"]
        self.word_dict = self.cfg["DICTIONARY"]
        self.text_csv = self.cfg["TEXT_CSV"]
        self.img_csv = self.cfg["IMG_CSV"]

        self.data_root = self.cfg["DATA_ROOT"]
        self.image_size = tuple(self.cfg["IMAGE_SIZE"])
        self.name = self.cfg["EXPER_NAME"]
        self.save_img_dir = './save_image/OPENI_HIAv3'
        if os.path.exists(self.save_img_dir) == False:
            os.mkdir(self.save_img_dir)

        self.ENCODERS = {"HAttnEncoder": HAttnEncoder}
        self.DECODERS = {
            "baseDECODER": baseDecoder,
            "baseDECODERv2": baseDecoderv2,
            "baseDECODERv3": baseDecoderv3
        }
        self.P_DECODER = {
            "PDECODER": PDecoder,
            "PDECODERv2": PDecoderv2,
            "PDECODERv3": PDecoderv3
        }
        self.DISCRIMINATOR = {
            "baseDISCRIMINATOR": baseDiscriminator,
            "noconDISCRIMINATOR": noCon_Discriminator,
            "Patch": PatchDiscriminator,
            "SNDiscriminator": SNDiscriminator,
            "ResDISCRIMINATOR": ResDiscriminator,
            "PDISCRIMINATOR": PDiscriminator
        }
        self.dataset = {
            "OPENI": OpeniDataset2_Hiachy,
            "MIMIC-CXR": MIMICDataset2_Hiachy
        }

        ##################################################
        ################# Dataset Setup ##################
        ##################################################
        self.t2i_dataset = self.dataset[self.cfg["DATASET"]](
            csv_txt=self.text_csv,
            csv_img=self.img_csv,
            root=self.data_root,
            word_dict=self.word_dict,
            transform=transforms.Compose(
                [Rescale(self.image_size),
                 Equalize(), ToTensor()]))

        self.testset = self.dataset[self.cfg["DATASET"]](
            csv_txt=self.test_csv,
            csv_img=self.img_csv,
            root=self.data_root,
            word_dict=self.word_dict,
            transform=transforms.Compose(
                [Rescale(self.image_size),
                 Equalize(), ToTensor()]))

        self.testset = self.t2i_dataset
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

        s_gpus = self.cfg["GPU_ID"].split(',')
        self.gpus = [int(ix) for ix in s_gpus]
        self.num_gpus = len(self.gpus)

        self.test_dataloader = DataLoader(self.testset,
                                          batch_size=12,
                                          shuffle=False,
                                          num_workers=8,
                                          drop_last=True)

        self.base_size = 32
        self.P_ratio = int(np.log2(self.image_size[0] // self.base_size))
        self.base_ratio = int(np.log2(self.base_size))
        print("Number of Decoders", self.P_ratio + 1)
        print("Number of Discriminator", self.P_ratio + 1)

        #########################################
        ############ Network Init ###############
        #########################################
        self.define_nets()
        if self.num_gpus > 1:
            self.encoder = nn.DataParallel(self.encoder, device_ids=self.gpus)
            self.decoder_L = nn.DataParallel(self.decoder_L,
                                             device_ids=self.gpus)
            self.decoder_F = nn.DataParallel(self.decoder_F,
                                             device_ids=self.gpus)
Ejemplo n.º 4
0
    def __init__(self):
        self.cfg_json = "config/MIMIC_wgan.json"
        self.cfg = self.pare_cfg(self.cfg_json)
        self.exp_name = self.cfg["EXPER_NAME"]
        self.max_epoch = self.cfg["MAX_EPOCH"]

        self.encoder_checkpoint = self.cfg["CHECKPOINT_ENCODER"]
        self.decoder_checkpoint = self.cfg["CHECKPOINT_DECODER"]

        self.D_checkpoint = self.cfg["CHECKPOINT_D"]
        self.check_create_checkpoint()

        self.encoder_resume = self.cfg["RESUME_ENCODER"]
        self.decoder_resume_F = self.cfg["RESUME_DECODER_F"]
        self.decoder_resume_L = self.cfg["RESUME_DECODER_L"]
        self.D_resume = self.cfg["RESUME_D"]

        self.train_csv = self.cfg["TRAIN_CSV"]
        self.val_csv = self.cfg["VAL_CSV"]
        self.test_csv = self.cfg["TEST_CSV"]

        self.img_csv = self.cfg["IMG_CSV"]
        self.data_root = self.cfg["DATA_ROOT"]
        self.batch_size = self.cfg["BATCH_SIZE"]
        self.image_size = tuple(self.cfg["IMAGE_SIZE"])
        self.name = self.cfg["EXPER_NAME"]
        self.pix_loss_ratio = self.cfg["PIXEL_LOSS_RATIO"]
        self.adv_loss_ratio = self.cfg["ADV_LOSS_RATIO"]
        self.checkpoint_epoch = self.cfg["CHECKPOINT_EPOCH"]

        self.beta1 = self.cfg["beta1"]
        self.word_dict = self.cfg["DICTIONARY"]
        self.writer = SummaryWriter(os.path.join("runs", self.exp_name))
        self.ENCODERS = {
            "HAttnEncoder": HAttnEncoder,
        }
        self.DECODERS = {
            "baseDECODER": baseDecoder,
            "baseDECODERv2": baseDecoderv2,
            "baseDECODERv3": baseDecoderv3
        }

        self.DISCRIMINATOR = {
            "baseDISCRIMINATOR": baseDiscriminator,
            "noconDISCRIMINATOR": noCon_Discriminator,
            "Patch": PatchDiscriminator,
            "SNDiscriminator": SNDiscriminator,
            "ResDISCRIMINATOR": ResDiscriminator,
            "PDISCRIMINATOR": PDiscriminator
        }
        self.dataset = {"OPENI": OpeniDataset2, "MIMIC-CXR": MIMICDataset2}

        ##################################################
        ################# Dataset Setup ##################
        ##################################################
        self.trainset = self.dataset[self.cfg["DATASET"]](
            csv_txt=self.train_csv,
            csv_img=self.img_csv,
            root=self.data_root,
            word_dict=self.word_dict,
            transform=transforms.Compose(
                [Rescale(self.image_size),
                 Equalize(), ToTensor()]))

        self.valset = self.dataset[self.cfg["DATASET"]](
            csv_txt=self.val_csv,
            csv_img=self.img_csv,
            root=self.data_root,
            word_dict=self.word_dict,
            transform=transforms.Compose(
                [Rescale(self.image_size),
                 Equalize(), ToTensor()]))

        self.sia_dataset = self.dataset[self.cfg["DATASET"]][1](
            csv_txt=self.train_csv,
            csv_img=self.img_csv,
            root=self.data_root,
            transform=transforms.Compose(
                [Rescale(self.image_size),
                 Equalize(), ToTensor()]))
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

        s_gpus = self.cfg["GPU_ID"].split(',')
        self.gpus = [int(ix) for ix in s_gpus]
        self.num_gpus = len(self.gpus)

        #########################################
        ############ Loss Function ##############
        #########################################
        content_losses = {"L2": nn.MSELoss(), "L1": nn.L1Loss()}
        self.G_criterion = content_losses[self.cfg["CONTENT_LOSS"]].to(
            self.device)

        #########################################
        ############ Network Init ###############
        #########################################

        self.base_size = self.image_size[0]
        self.base_ratio = int(np.log2(self.base_size))

        self.define_nets()
        if self.num_gpus > 1:
            self.encoder = nn.DataParallel(self.encoder, device_ids=self.gpus)
            self.decoder_L = nn.DataParallel(self.decoder_L,
                                             device_ids=self.gpus)
            self.decoder_F = nn.DataParallel(self.decoder_F,
                                             device_ids=self.gpus)
Ejemplo n.º 5
0
        f.write('\n')

    cfg = dict()
    cfg['batch_size'] = 64

    cfg['scale'] = 0.5
    if cfg['scale'] == 0.5:
        mnet_v2_mean = [0.4679]
        mnet_v2_std = [0.2699]
    else:
        mnet_v2_mean = [0.4679]
        mnet_v2_std = [0.2699]

    train_set = OpenEDS(root_path=root_path + 'train',
                        transform=transforms.Compose(
                            [Rescale(cfg['scale']), ToTensor(), Normalize(mnet_v2_mean, mnet_v2_std)]))  #
    val_set = OpenEDS(root_path=root_path + 'validation',
                      transform=transforms.Compose(
                          [Rescale(cfg['scale']), ToTensor(), Normalize(mnet_v2_mean, mnet_v2_std)]))  #

    test_set = OpenEDS(root_path=root_path + 'test',
                       transform=transforms.Compose(
                           [Rescale(cfg['scale']), ToTensor(), Normalize(mnet_v2_mean, mnet_v2_std)]))  #

    loaders = {'train': torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True,
                                                    num_workers=args.num_workers),
               'val': torch.utils.data.DataLoader(val_set, batch_size=args.batch_size, shuffle=False,
                                                  num_workers=args.num_workers, pin_memory=False),
               'test': torch.utils.data.DataLoader(test_set, batch_size=args.batch_size, shuffle=False,
                                                   num_workers=args.num_workers, pin_memory=False)
               }
Ejemplo n.º 6
0
    def __init__(self):
        self.cfg_json = "config/MIMIC_StackGANtest.json"
        self.cfg = self.pare_cfg(self.cfg_json)
        self.exp_name = self.cfg["EXPER_NAME"]

        self.encoder_resume = self.cfg["RESUME_ENCODER"]
        self.decoder_resume_F = self.cfg["RESUME_DECODER_F"]
        self.decoder_resume_L = self.cfg["RESUME_DECODER_L"]
        self.word_dict = self.cfg["DICTIONARY"]
        self.text_csv = self.cfg["TEXT_CSV"]
        self.img_csv = self.cfg["IMG_CSV"]
        self.data_root = self.cfg["DATA_ROOT"]
        self.image_size = tuple(self.cfg["IMAGE_SIZE"])
        self.name = self.cfg["EXPER_NAME"]
        self.test_csv = self.cfg["TEST_CSV"]
        self.save_img_dir = './save_image/MIMIC_Stack256'
        if os.path.exists(self.save_img_dir)==False:
            os.mkdir(self.save_img_dir)

        self.ENCODERS = {
            "baseENCODER": baseEncoder,
            "baseENCODERv2": baseEncoderv2,
            "harchyENCODER": harchyEncoder
        }

        self.dataset = {
            "OPENI": OpeniDataset2,
            "MIMIC-CXR": MIMICDataset2
        }

        ##################################################
        ################# Dataset Setup ##################
        ##################################################
        self.t2i_dataset = self.dataset[self.cfg["DATASET"]](csv_txt=self.text_csv,
                                                             csv_img=self.img_csv,
                                                             root=self.data_root,
                                                             word_dict=self.word_dict,
                                                             transform=transforms.Compose([
                                                                 Rescale(self.image_size),
                                                                 Equalize(),
                                                                 ToTensor()
                                                             ]))
        self.testset = self.dataset[self.cfg["DATASET"]](csv_txt=self.test_csv,
                                                             csv_img=self.img_csv,
                                                             root=self.data_root,
                                                             word_dict=self.word_dict,
                                                             transform=transforms.Compose([
                                                                 Rescale(self.image_size),
                                                                 Equalize(),
                                                                 ToTensor()
                                                             ]))

        self.testset = self.t2i_dataset
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        s_gpus = self.cfg["GPU_ID"].split(',')
        self.gpus = [int(ix) for ix in s_gpus]
        self.num_gpus = len(self.gpus)


        self.test_dataloader = DataLoader(self.testset,
                                         batch_size=12,
                                         shuffle=False,
                                         num_workers=8,
                                         drop_last=True)

        self.base_size = self.image_size[0]
        self.base_ratio = int(np.log2(self.base_size))

        #########################################
        ############ Network Init ###############
        #########################################
        self.define_nets()

        self.decoder_L= self.define_nets()
        self.decoder_F= self.define_nets()
        self.encoder = self.ENCODERS[self.cfg["ENCODER"]](vocab_size=self.t2i_dataset.vocab_size,
                                                          embed_size=self.cfg["E_EMBED_SIZE"],
                                                          hidden_size=self.cfg["E_HIDEN_SIZE"],
                                                          max_len=[self.t2i_dataset.max_len_finding,
                                                                   self.t2i_dataset.max_len_impression],
                                                          unit=self.cfg["RNN_CELL"],
                                                          feature_base_dim=self.cfg["D_CHANNEL_SIZE"]
                                                          ).to(self.device)

        self.encoder = nn.DataParallel(self.encoder, device_ids=self.gpus)
    def setup(self):
        pkl_dir = self.config.split_dir
        with open(os.path.join(pkl_dir, "splits.pkl"), 'rb') as f:
            splits = pickle.load(f)

        tr_keys = splits[self.config.fold]['train']
        val_keys = splits[self.config.fold]['val']
        test_keys = splits[self.config.fold]['test']
        print("pkl_dir: ", pkl_dir)
        print("tr_keys: ", tr_keys)
        print("val_keys: ", val_keys)
        print("test_keys: ", test_keys)
        self.device = torch.device(
            self.config.device if torch.cuda.is_available() else "cpu")
        task = self.config.dataset_name
        self.train_data_loader = torch.utils.data.DataLoader(
            NucleusDataset(self.config.data_root_dir,
                           train=True,
                           transform=transforms.Compose([
                               Normalize(),
                               Rescale(self.config.patch_size),
                               ToTensor()
                           ]),
                           target_transform=transforms.Compose([
                               Normalize(),
                               Rescale(self.config.patch_size),
                               ToTensor()
                           ]),
                           mode="train",
                           keys=tr_keys,
                           taskname=task),
            batch_size=self.config.batch_size,
            shuffle=True)

        self.val_data_loader = torch.utils.data.DataLoader(
            NucleusDataset(self.config.data_root_dir,
                           train=True,
                           transform=transforms.Compose([
                               Normalize(),
                               Rescale(self.config.patch_size),
                               ToTensor()
                           ]),
                           target_transform=transforms.Compose([
                               Normalize(),
                               Rescale(self.config.patch_size),
                               ToTensor()
                           ]),
                           mode="val",
                           keys=val_keys,
                           taskname=self.config.dataset_name),
            batch_size=self.config.batch_size,
            shuffle=True)

        self.test_data_loader = torch.utils.data.DataLoader(
            NucleusDataset(self.config.data_root_dir,
                           train=True,
                           transform=transforms.Compose([
                               Normalize(),
                               Rescale(self.config.patch_size),
                               ToTensor()
                           ]),
                           target_transform=transforms.Compose([
                               Normalize(),
                               Rescale(self.config.patch_size),
                               ToTensor()
                           ]),
                           mode="test",
                           keys=test_keys,
                           taskname=self.config.dataset_name),
            batch_size=self.config.batch_size,
            shuffle=True)

        self.model = UNet(num_classes=self.config.num_classes,
                          in_channels=self.config.in_channels)
        #self.model = UNet()
        self.model.to(self.device)
        self.bce_weight = 0.5
        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=self.config.learning_rate)
        self.scheduler = ReduceLROnPlateau(self.optimizer, 'min')

        # If directory for checkpoint is provided, we load it.
        if self.config.do_load_checkpoint:
            if self.config.checkpoint_dir == '':
                print(
                    'checkpoint_dir is empty, please provide directory to load checkpoint.'
                )
            else:
                self.load_checkpoint(name=self.config.checkpoint_dir,
                                     save_types=("model"))

        self.save_checkpoint(name="checkpoint_start")
        self.elog.print('Experiment set up.')
Ejemplo n.º 8
0
For now focus on wether two signatures are matches or not.

Afterwards add forgery detection to network.

"""
import torch
import torch.nn as nn
from torchvision import transforms
from torch.utils.data import DataLoader, Dataset
from utils import SignatureDataset, Rescale, ToTensor
from model import SigNet
import argparse

DATADIR = './data/sign_data/'

rescale = Rescale((100, 100))

# TODO Speed up data loader & add cuda support
train_set = SignatureDataset(root='./data/sign_data/',
                             split='train',
                             transforms=transforms.Compose(
                                 [rescale, ToTensor()]))
test_set = SignatureDataset(root='./data/sign_Data/',
                            split='test',
                            transforms=transforms.Compose(
                                [rescale, ToTensor()]))

if __name__ == '__main__':
    # TODO: Add argparser
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
Ejemplo n.º 9
0
# encoding: utf-8
'''
@author: andy
@contact: [email protected]
@github: https://github.com/AndyandViky
@csdn: https://blog.csdn.net/AndyViky
@file: compose-transforms.py
@time: 2019/5/25 下午8:31
@desc: 使用compose转换图片格式,使用utils中的工具类
'''

import torchvision.transforms as transforms
from utils import Rescale, RandomCrop, ToTensor
from dataclass import MdataSample

data = MdataSample("",
                   "",
                   transform=transforms.Compose(
                       [Rescale(256),
                        RandomCrop(224),
                        ToTensor()]))
Ejemplo n.º 10
0

######################################
############### Main #################
######################################
# default settings
learning_rate = 0.001
batch_size = 4
num_classes = 2
log_schedule = 10
epochCount = 5
num_workers = 1
print_every = batch_size

transform = T.Compose([
    Rescale(32),
    T.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

train_data = MammogramDataset("Mini_DDSM_Upload", "train", transform=transform)
test_data = MammogramDataset("Mini_DDSM_Upload", "test")

VAL_RATIO = 0.2
NUM_VAL = int(len(train_data) * VAL_RATIO)
NUM_TRAIN = len(train_data) - NUM_VAL
NUM_TEST = len(test_data)
BATCH_SIZE = batch_size

loader_train = DataLoader(train_data,
                          batch_size=BATCH_SIZE,
Ejemplo n.º 11
0
def train(epochs, batch_size, learning_rate):

    torch.manual_seed(1234)

    train_loader = torch.utils.data.DataLoader(SegThorDataset(
        "/home/WIN-UNI-DUE/smnemada/Master_Thesis/SegThor/data/train",
        phase='train',
        transform=transforms.Compose([Rescale(1.0),
                                      Normalize(),
                                      ToTensor()])),
                                               batch_size=batch_size,
                                               shuffle=True)
    '''
    # Loading validation data
    val_set = SegThorDataset("/home/WIN-UNI-DUE/smnemada/Master_Thesis/SegThor/data_val", phase='val',
                                   transform=transforms.Compose([
                                       Rescale(0.5),
                                       Normalize(),
                                       ToTensor2()
                                   ]))

    val_loader = torch.utils.data.DataLoader(dataset=val_set,
                                             batch_size=1,
                                             shuffle=False)
    '''

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = UNet().to(device)
    model.apply(weight_init)
    #optimizer = optim.Adam(model.parameters(), lr=learning_rate)    #learning rate to 0.001 for initial stage
    optimizer = optim.SGD(model.parameters(),
                          lr=0.01,
                          momentum=0.9,
                          weight_decay=0.00001)
    #optimizer = adabound.AdaBound(params = model.parameters(), lr = 0.001, final_lr = 0.1)

    for epoch in range(epochs):
        f = open('train_output.log', 'a')
        f.write('Epoch {}/{}\n'.format(epoch + 1, epochs))
        f.write('-' * 10)

        running_loss = 0.0
        running_loss_label = np.zeros(5)
        for batch_idx, sample in enumerate(train_loader):
            train_data, labels = sample['image'].to(
                device,
                dtype=torch.float), sample['label'].to(device,
                                                       dtype=torch.uint8)

            optimizer.zero_grad()
            output = model(train_data)

            loss_label, loss = dice_loss2(output, labels)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            for i in range(5):
                running_loss_label[i] += loss_label[i]

        epoch_loss = running_loss / len(train_loader)
        writer.add_scalar('Train/Loss', epoch_loss, epoch)
        f.write("\n Total Dice Loss: {:.4f}\n".format(epoch_loss))
        epoch_loss_class = np.true_divide(running_loss_label,
                                          len(train_loader))
        f.write(
            "Dice per class: Background = {:.4f} Eusophagus = {:.4f}  Heart = {:.4f}  Trachea = {:.4f}  Aorta = {:.4f}\n"
            .format(epoch_loss_class[0], epoch_loss_class[1],
                    epoch_loss_class[2], epoch_loss_class[3],
                    epoch_loss_class[4]))
        #f.write("Dice per class: Background = {:.4f} Eusophagus = {:.4f}\n".format(epoch_loss_class[0], epoch_loss_class[1]))
        f.close()

        if epoch % 4 == 0:
            os.makedirs("models", exist_ok=True)
            torch.save(model, "models/model.pt")

    # export scalar data to JSON for external processing
    writer.export_scalars_to_json("./all_scalars.json")
    writer.close()
    os.makedirs("models", exist_ok=True)
    torch.save(model, "models/model.pt")
Ejemplo n.º 12
0
def test():
    test_path = '/home/WIN-UNI-DUE/smnemada/Master_Thesis/SegThor/data_sub/test'
    for patient in tqdm(os.listdir(test_path)): 
        count = 0
        area = 0
        
        file = patient
        x = file.split(".")
        filename = x[0] + '.' + x[1]

        print("patient = ", patient)
        test_set = SegThorDataset(test_path,
                                  patient=patient, 
                                  phase='test',
                                  transform=transforms.Compose([
                                         Rescale(1.0, labeled=False),
                                         Normalize(labeled=False),
                                         ToTensor(labeled=False)
                                  ]))

        test_loader = torch.utils.data.DataLoader(dataset=test_set, 
                                                  batch_size=1, 
                                                  shuffle=False)


        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        model = torch.load("models/model.pt")
        model.eval()

        '''
        with torch.no_grad():
            for batch_idx, sample in enumerate(test_loader):     
                images = sample['image'].to(device, dtype=torch.float)        
                outputs = model(images)

                print("tensor: {} and {} ".format(images.size(), outputs.size()))
                images = tensor_to_numpy(images)            
                max_idx = torch.argmax(outputs, 1, keepdim=True)
                max_idx = tensor_to_numpy(max_idx)
                print("numpy: {} and {} ".format(images.shape, max_idx.shape))

                fig=plt.figure()
                fig.add_subplot(1,2,1)
                plt.imshow(max_idx)
                fig.add_subplot(1,2,2)
                plt.imshow(images)
                plt.show()
#                fig.close()
                count = count + 1
                if count==150:
                    break
        '''
#        '''
        seg_vol_2d = zeros([len(test_set),  512, 512])
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        model = torch.load("models/model.pt")
        model.eval()
        model.to(device)
        
        with torch.no_grad():
            for batch_idx, sample in enumerate(test_loader):     
                images = sample['image'].to(device, dtype=torch.float)        
                outputs = model(images)

                images = tensor_to_numpy(images)            
                max_idx = torch.argmax(outputs, 1, keepdim=True)
                max_idx = tensor_to_numpy(max_idx)
                          
              #  for k in range(outputs.size(0)): 
              #  print(max_idx.shape)
                slice_v = max_idx[:,:]   
                slice_v = slice_v.astype(float32)
                slice_v = ndimage.interpolation.zoom(slice_v, zoom=1, order=0, mode='nearest', prefilter=True)
                seg_vol_2d[count,:,:] = slice_v
                count = count + 1
               
            segmentation = sitk.GetImageFromArray(seg_vol_2d, isVector=False)
            print(segmentation.GetSize())
            sitk.WriteImage(sitk.Cast( segmentation, sitk.sitkUInt8 ), filename, True) 
Ejemplo n.º 13
0
    cfg = dict()
    cfg['batch_size'] = 64

    cfg['scale'] = 0.5
    # Original, mean 0.4679, std 0.2699
    # Gamma correction: mean 0.3977, std 0.2307
    if cfg['scale'] == 0.5:
        mnet_v2_mean = [0.4679]
        mnet_v2_std = [0.2699]
    else:
        mnet_v2_mean = [0.4679]
        mnet_v2_std = [0.2699]

    train_set = OpenEDS(root_path=root_path + 'train',
                        transform=transforms.Compose([
                            Rescale(cfg['scale']),
                            Brightness(brightness=(0.5, 2.75)),
                            ToTensor(),
                            Normalize(mnet_v2_mean, mnet_v2_std)
                        ]))

    val_set = OpenEDS(root_path=root_path + 'validation',
                      transform=transforms.Compose([
                          Rescale(cfg['scale']),
                          ToTensor(),
                          Normalize(mnet_v2_mean, mnet_v2_std)
                      ]))  #

    test_set = OpenEDS(root_path=root_path + 'test',
                       transform=transforms.Compose([
                           Rescale(cfg['scale']),
Ejemplo n.º 14
0
    def __init__(self):
        self.cfg_json = "config/MIMIC_StackGAN.json"
        self.cfg = self.pare_cfg(self.cfg_json)
        self.exp_name = self.cfg["EXPER_NAME"]
        self.max_epoch = self.cfg["MAX_EPOCH"]

        self.encoder_checkpoint = self.cfg["CHECKPOINT_ENCODER"]
        self.decoder_checkpoint = self.cfg["CHECKPOINT_DECODER"]

        self.D_checkpoint = self.cfg["CHECKPOINT_D"]
        self.check_create_checkpoint()

        self.encoder_resume = self.cfg["RESUME_ENCODER"]
        self.decoder_resume_F = self.cfg["RESUME_DECODER_F"]
        self.decoder_resume_L = self.cfg["RESUME_DECODER_L"]
        self.D_resume_F, self.D_resume_L = self.cfg["RESUME_D"]
        self.train_csv = self.cfg["TRAIN_CSV"]
        self.val_csv = self.cfg["VAL_CSV"]
        self.test_csv = self.cfg["TEST_CSV"]
        self.text_csv = self.cfg["TEXT_CSV"]
        self.img_csv = self.cfg["IMG_CSV"]
        self.data_root = self.cfg["DATA_ROOT"]
        self.batch_size = self.cfg["BATCH_SIZE"]
        self.image_size = tuple(self.cfg["IMAGE_SIZE"])
        self.name = self.cfg["EXPER_NAME"]
        self.pix_loss_ratio = self.cfg["PIXEL_LOSS_RATIO"]
        self.adv_loss_ratio = self.cfg["ADV_LOSS_RATIO"]
        self.checkpoint_epoch = self.cfg["CHECKPOINT_EPOCH"]

        self.beta1 = self.cfg["beta1"]
        self.word_dict = self.cfg["DICTIONARY"]
        self.writer = SummaryWriter(os.path.join("runs", self.exp_name))
        self.dataset = {"OPENI": OpeniDataset2, "MIMIC-CXR": MIMICDataset2}

        ##################################################
        ################# Dataset Setup ##################
        ##################################################
        self.t2i_dataset = self.dataset[self.cfg["DATASET"]](
            csv_txt=self.text_csv,
            csv_img=self.img_csv,
            root=self.data_root,
            word_dict=self.word_dict,
            transform=transforms.Compose(
                [Rescale(self.image_size),
                 Equalize(), ToTensor()]))
        self.trainset = self.dataset[self.cfg["DATASET"]][0](
            csv_txt=self.train_csv,
            csv_img=self.img_csv,
            root=self.data_root,
            word_dict=self.word_dict,
            transform=transforms.Compose(
                [Rescale(self.image_size),
                 Equalize(), ToTensor()]))

        self.valset = self.dataset[self.cfg["DATASET"]][0](
            csv_txt=self.val_csv,
            csv_img=self.img_csv,
            root=self.data_root,
            word_dict=self.word_dict,
            transform=transforms.Compose(
                [Rescale(self.image_size),
                 Equalize(), ToTensor()]))

        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

        s_gpus = self.cfg["GPU_ID"].split(',')
        self.gpus = [int(ix) for ix in s_gpus]
        self.num_gpus = len(self.gpus)

        #########################################
        ############ Loss Function ##############
        #########################################
        content_losses = {"L2": nn.MSELoss(), "L1": nn.L1Loss()}
        self.G_criterion = content_losses[self.cfg["CONTENT_LOSS"]].to(
            self.device)

        #########################################
        ############ Network Init ###############
        #########################################

        self.decoder_L, self.D_L = self.define_nets()
        self.decoder_F, self.D_F = self.define_nets()
        self.encoder = harchyEncoder(
            vocab_size=self.t2i_dataset.vocab_size,
            embed_size=self.cfg["E_EMBED_SIZE"],
            hidden_size=self.cfg["E_HIDEN_SIZE"],
            max_len=[
                self.t2i_dataset.max_len_finding,
                self.t2i_dataset.max_len_impression
            ],
            unit=self.cfg["RNN_CELL"],
            feature_base_dim=self.cfg["D_CHANNEL_SIZE"]).to(self.device)

        self.encoder = nn.DataParallel(self.encoder, device_ids=self.gpus)
Ejemplo n.º 15
0
    for i in range(len(segthor_dataset)):
        sample = segthor_dataset[i]
        
    #    print(i, sample['image'].size())
        plt.imshow(sample['image'])
        plt.show()
        if i == 50:
            break

    '''
    #    '''
    ## Loading data for training phase
    segthor_dataset = SegThorDataset(
        datapath=
        "/home/WIN-UNI-DUE/smnemada/Master_Thesis/SegThor/data_sub/train",
        phase='train',
        transform=transforms.Compose([
            Rescale(1.0, labeled=True),
            Normalize(labeled=True),
            JointTransform2D(crop=(288, 288), p_flip=0.5),
            ToTensor(labeled=True)
        ]))

    for i in range(len(segthor_dataset)):
        sample = segthor_dataset[i]

        print(i, sample['image'].size(), sample['label'].size())
        if i == 5:
            break
#    '''