Exemple #1
0
 def __init__(self, datapath='../data', mode="train", transform=None):
     assert mode in ["train", "test"]
     self.data_path = os.path.join(datapath, mode)
     self.mode = mode
     self.labels, self.data, self.length = self.getDataIndex()
     # self.mean, self.std = self.getStat()
     self.transform = transform if transform is not None else ToTensor()
Exemple #2
0
def process_image(image):
    """Process image."""
    seed_torch(seed=42)
    proc_image = transforms.Compose([
        Rescale(256),
        RandomCrop(224),
        ToTensor(),
    ])(image)
    proc_image = proc_image.unsqueeze(0).to(DEVICE)
    return proc_image
Exemple #3
0
def train():
    """Train"""
    client = storage.Client(PROJECT)
    raw_bucket = client.get_bucket(RAW_BUCKET)
    bucket = client.get_bucket(BUCKET)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"  Device found = {device}")

    metadata_df = (
        pd.read_csv(f"gs://{RAW_BUCKET}/{RAW_DATA_DIR}/metadata.csv").query(
            "view == 'PA'")  # taking only PA view
    )

    print("Split train and validation data")
    proc_data = ImageDataset(
        root_dir=BASE_DIR,
        image_dir=PREPROCESSED_DIR,
        df=metadata_df,
        bucket=bucket,
        transform=ToTensor(),
    )
    seed_torch(seed=42)
    valid_size = int(len(proc_data) * 0.2)
    train_data, valid_data = torch.utils.data.random_split(
        proc_data, [len(proc_data) - valid_size, valid_size])
    train_loader = DataLoader(train_data,
                              batch_size=CFG.batch_size,
                              shuffle=True,
                              drop_last=True)
    valid_loader = DataLoader(valid_data,
                              batch_size=CFG.batch_size,
                              shuffle=False)

    print("Train model")
    se_model_blob = raw_bucket.blob(CFG.pretrained_weights)
    model = CustomSEResNeXt(
        BytesIO(se_model_blob.download_as_string()),
        device,
        CFG.n_classes,
        save=CFG.pretrained_model_path,
    )
    train_fn(model, train_loader, valid_loader, device)

    print("Evaluate")
    y_probs, y_val = predict(model, valid_loader, device)
    y_preds = y_probs.argmax(axis=1)

    compute_log_metrics(y_val, y_probs[:, 1], y_preds)
Exemple #4
0
    def __init__(self):
        self.cfg_json = "config/MIMIC_StackGANtest.json"
        self.cfg = self.pare_cfg(self.cfg_json)
        self.exp_name = self.cfg["EXPER_NAME"]

        self.encoder_resume = self.cfg["RESUME_ENCODER"]
        self.decoder_resume_F = self.cfg["RESUME_DECODER_F"]
        self.decoder_resume_L = self.cfg["RESUME_DECODER_L"]
        self.word_dict = self.cfg["DICTIONARY"]
        self.text_csv = self.cfg["TEXT_CSV"]
        self.img_csv = self.cfg["IMG_CSV"]
        self.data_root = self.cfg["DATA_ROOT"]
        self.image_size = tuple(self.cfg["IMAGE_SIZE"])
        self.name = self.cfg["EXPER_NAME"]
        self.test_csv = self.cfg["TEST_CSV"]
        self.save_img_dir = './save_image/MIMIC_Stack256'
        if os.path.exists(self.save_img_dir)==False:
            os.mkdir(self.save_img_dir)

        self.ENCODERS = {
            "baseENCODER": baseEncoder,
            "baseENCODERv2": baseEncoderv2,
            "harchyENCODER": harchyEncoder
        }

        self.dataset = {
            "OPENI": OpeniDataset2,
            "MIMIC-CXR": MIMICDataset2
        }

        ##################################################
        ################# Dataset Setup ##################
        ##################################################
        self.t2i_dataset = self.dataset[self.cfg["DATASET"]](csv_txt=self.text_csv,
                                                             csv_img=self.img_csv,
                                                             root=self.data_root,
                                                             word_dict=self.word_dict,
                                                             transform=transforms.Compose([
                                                                 Rescale(self.image_size),
                                                                 Equalize(),
                                                                 ToTensor()
                                                             ]))
        self.testset = self.dataset[self.cfg["DATASET"]](csv_txt=self.test_csv,
                                                             csv_img=self.img_csv,
                                                             root=self.data_root,
                                                             word_dict=self.word_dict,
                                                             transform=transforms.Compose([
                                                                 Rescale(self.image_size),
                                                                 Equalize(),
                                                                 ToTensor()
                                                             ]))

        self.testset = self.t2i_dataset
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        s_gpus = self.cfg["GPU_ID"].split(',')
        self.gpus = [int(ix) for ix in s_gpus]
        self.num_gpus = len(self.gpus)


        self.test_dataloader = DataLoader(self.testset,
                                         batch_size=12,
                                         shuffle=False,
                                         num_workers=8,
                                         drop_last=True)

        self.base_size = self.image_size[0]
        self.base_ratio = int(np.log2(self.base_size))

        #########################################
        ############ Network Init ###############
        #########################################
        self.define_nets()

        self.decoder_L= self.define_nets()
        self.decoder_F= self.define_nets()
        self.encoder = self.ENCODERS[self.cfg["ENCODER"]](vocab_size=self.t2i_dataset.vocab_size,
                                                          embed_size=self.cfg["E_EMBED_SIZE"],
                                                          hidden_size=self.cfg["E_HIDEN_SIZE"],
                                                          max_len=[self.t2i_dataset.max_len_finding,
                                                                   self.t2i_dataset.max_len_impression],
                                                          unit=self.cfg["RNN_CELL"],
                                                          feature_base_dim=self.cfg["D_CHANNEL_SIZE"]
                                                          ).to(self.device)

        self.encoder = nn.DataParallel(self.encoder, device_ids=self.gpus)
    def setup(self):
        pkl_dir = self.config.split_dir
        with open(os.path.join(pkl_dir, "splits.pkl"), 'rb') as f:
            splits = pickle.load(f)

        tr_keys = splits[self.config.fold]['train']
        val_keys = splits[self.config.fold]['val']
        test_keys = splits[self.config.fold]['test']
        print("pkl_dir: ", pkl_dir)
        print("tr_keys: ", tr_keys)
        print("val_keys: ", val_keys)
        print("test_keys: ", test_keys)
        self.device = torch.device(
            self.config.device if torch.cuda.is_available() else "cpu")
        task = self.config.dataset_name
        self.train_data_loader = torch.utils.data.DataLoader(
            NucleusDataset(self.config.data_root_dir,
                           train=True,
                           transform=transforms.Compose([
                               Normalize(),
                               Rescale(self.config.patch_size),
                               ToTensor()
                           ]),
                           target_transform=transforms.Compose([
                               Normalize(),
                               Rescale(self.config.patch_size),
                               ToTensor()
                           ]),
                           mode="train",
                           keys=tr_keys,
                           taskname=task),
            batch_size=self.config.batch_size,
            shuffle=True)

        self.val_data_loader = torch.utils.data.DataLoader(
            NucleusDataset(self.config.data_root_dir,
                           train=True,
                           transform=transforms.Compose([
                               Normalize(),
                               Rescale(self.config.patch_size),
                               ToTensor()
                           ]),
                           target_transform=transforms.Compose([
                               Normalize(),
                               Rescale(self.config.patch_size),
                               ToTensor()
                           ]),
                           mode="val",
                           keys=val_keys,
                           taskname=self.config.dataset_name),
            batch_size=self.config.batch_size,
            shuffle=True)

        self.test_data_loader = torch.utils.data.DataLoader(
            NucleusDataset(self.config.data_root_dir,
                           train=True,
                           transform=transforms.Compose([
                               Normalize(),
                               Rescale(self.config.patch_size),
                               ToTensor()
                           ]),
                           target_transform=transforms.Compose([
                               Normalize(),
                               Rescale(self.config.patch_size),
                               ToTensor()
                           ]),
                           mode="test",
                           keys=test_keys,
                           taskname=self.config.dataset_name),
            batch_size=self.config.batch_size,
            shuffle=True)

        self.model = UNet(num_classes=self.config.num_classes,
                          in_channels=self.config.in_channels)
        #self.model = UNet()
        self.model.to(self.device)
        self.bce_weight = 0.5
        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=self.config.learning_rate)
        self.scheduler = ReduceLROnPlateau(self.optimizer, 'min')

        # If directory for checkpoint is provided, we load it.
        if self.config.do_load_checkpoint:
            if self.config.checkpoint_dir == '':
                print(
                    'checkpoint_dir is empty, please provide directory to load checkpoint.'
                )
            else:
                self.load_checkpoint(name=self.config.checkpoint_dir,
                                     save_types=("model"))

        self.save_checkpoint(name="checkpoint_start")
        self.elog.print('Experiment set up.')
Exemple #6
0
from model import get_model

transform = {
    'train':
    transforms.Compose([
        LeftToRightFlip(0.5),
        RandomRotation(angle=3, p=0.5),
        Resize(224),
        ColorJitter(p=0.5,
                    color=0.1,
                    contrast=0.1,
                    brightness=0.1,
                    sharpness=0.1),
        RandomCrop(scale=210, p=0.5),
        Resize(224),
        ToTensor()
    ]),
    'test':
    transforms.Compose([ToTensor()])
}

datasets = {
    x: ChestXrayDataset(csv_file=os.path.join('dataset', x, x + '.csv'),
                        root_dir=os.path.join('dataset', x),
                        transform=transform[x])
    for x in ['train', 'test']
}
dataloaders = {
    x: DataLoader(datasets[x], batch_size=32, shuffle=True)
    for x in ['train', 'test']
}
Exemple #7
0
    def __init__(self):
        self.cfg_json = "config/MIMIC_StackGAN.json"
        self.cfg = self.pare_cfg(self.cfg_json)
        self.exp_name = self.cfg["EXPER_NAME"]
        self.max_epoch = self.cfg["MAX_EPOCH"]

        self.encoder_checkpoint = self.cfg["CHECKPOINT_ENCODER"]
        self.decoder_checkpoint = self.cfg["CHECKPOINT_DECODER"]

        self.D_checkpoint = self.cfg["CHECKPOINT_D"]
        self.check_create_checkpoint()

        self.encoder_resume = self.cfg["RESUME_ENCODER"]
        self.decoder_resume_F = self.cfg["RESUME_DECODER_F"]
        self.decoder_resume_L = self.cfg["RESUME_DECODER_L"]
        self.D_resume_F, self.D_resume_L = self.cfg["RESUME_D"]
        self.train_csv = self.cfg["TRAIN_CSV"]
        self.val_csv = self.cfg["VAL_CSV"]
        self.test_csv = self.cfg["TEST_CSV"]
        self.text_csv = self.cfg["TEXT_CSV"]
        self.img_csv = self.cfg["IMG_CSV"]
        self.data_root = self.cfg["DATA_ROOT"]
        self.batch_size = self.cfg["BATCH_SIZE"]
        self.image_size = tuple(self.cfg["IMAGE_SIZE"])
        self.name = self.cfg["EXPER_NAME"]
        self.pix_loss_ratio = self.cfg["PIXEL_LOSS_RATIO"]
        self.adv_loss_ratio = self.cfg["ADV_LOSS_RATIO"]
        self.checkpoint_epoch = self.cfg["CHECKPOINT_EPOCH"]

        self.beta1 = self.cfg["beta1"]
        self.word_dict = self.cfg["DICTIONARY"]
        self.writer = SummaryWriter(os.path.join("runs", self.exp_name))
        self.dataset = {"OPENI": OpeniDataset2, "MIMIC-CXR": MIMICDataset2}

        ##################################################
        ################# Dataset Setup ##################
        ##################################################
        self.t2i_dataset = self.dataset[self.cfg["DATASET"]](
            csv_txt=self.text_csv,
            csv_img=self.img_csv,
            root=self.data_root,
            word_dict=self.word_dict,
            transform=transforms.Compose(
                [Rescale(self.image_size),
                 Equalize(), ToTensor()]))
        self.trainset = self.dataset[self.cfg["DATASET"]][0](
            csv_txt=self.train_csv,
            csv_img=self.img_csv,
            root=self.data_root,
            word_dict=self.word_dict,
            transform=transforms.Compose(
                [Rescale(self.image_size),
                 Equalize(), ToTensor()]))

        self.valset = self.dataset[self.cfg["DATASET"]][0](
            csv_txt=self.val_csv,
            csv_img=self.img_csv,
            root=self.data_root,
            word_dict=self.word_dict,
            transform=transforms.Compose(
                [Rescale(self.image_size),
                 Equalize(), ToTensor()]))

        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

        s_gpus = self.cfg["GPU_ID"].split(',')
        self.gpus = [int(ix) for ix in s_gpus]
        self.num_gpus = len(self.gpus)

        #########################################
        ############ Loss Function ##############
        #########################################
        content_losses = {"L2": nn.MSELoss(), "L1": nn.L1Loss()}
        self.G_criterion = content_losses[self.cfg["CONTENT_LOSS"]].to(
            self.device)

        #########################################
        ############ Network Init ###############
        #########################################

        self.decoder_L, self.D_L = self.define_nets()
        self.decoder_F, self.D_F = self.define_nets()
        self.encoder = harchyEncoder(
            vocab_size=self.t2i_dataset.vocab_size,
            embed_size=self.cfg["E_EMBED_SIZE"],
            hidden_size=self.cfg["E_HIDEN_SIZE"],
            max_len=[
                self.t2i_dataset.max_len_finding,
                self.t2i_dataset.max_len_impression
            ],
            unit=self.cfg["RNN_CELL"],
            feature_base_dim=self.cfg["D_CHANNEL_SIZE"]).to(self.device)

        self.encoder = nn.DataParallel(self.encoder, device_ids=self.gpus)
Exemple #8
0
    def __init__(self):
        self.cfg_json = "config/openi_progressive_hiach_test.json"
        self.cfg = self.pare_cfg(self.cfg_json)
        self.exp_name = self.cfg["EXPER_NAME"]
        self.test_csv = self.cfg["TEST_CSV"]
        self.encoder_resume = self.cfg["RESUME_ENCODER"]
        self.decoder_resume_F = self.cfg["RESUME_DECODER_F"]
        self.decoder_resume_L = self.cfg["RESUME_DECODER_L"]
        self.word_dict = self.cfg["DICTIONARY"]
        self.text_csv = self.cfg["TEXT_CSV"]
        self.img_csv = self.cfg["IMG_CSV"]

        self.data_root = self.cfg["DATA_ROOT"]
        self.image_size = tuple(self.cfg["IMAGE_SIZE"])
        self.name = self.cfg["EXPER_NAME"]
        self.save_img_dir = './save_image/OPENI_HIAv3'
        if os.path.exists(self.save_img_dir) == False:
            os.mkdir(self.save_img_dir)

        self.ENCODERS = {"HAttnEncoder": HAttnEncoder}
        self.DECODERS = {
            "baseDECODER": baseDecoder,
            "baseDECODERv2": baseDecoderv2,
            "baseDECODERv3": baseDecoderv3
        }
        self.P_DECODER = {
            "PDECODER": PDecoder,
            "PDECODERv2": PDecoderv2,
            "PDECODERv3": PDecoderv3
        }
        self.DISCRIMINATOR = {
            "baseDISCRIMINATOR": baseDiscriminator,
            "noconDISCRIMINATOR": noCon_Discriminator,
            "Patch": PatchDiscriminator,
            "SNDiscriminator": SNDiscriminator,
            "ResDISCRIMINATOR": ResDiscriminator,
            "PDISCRIMINATOR": PDiscriminator
        }
        self.dataset = {
            "OPENI": OpeniDataset2_Hiachy,
            "MIMIC-CXR": MIMICDataset2_Hiachy
        }

        ##################################################
        ################# Dataset Setup ##################
        ##################################################
        self.t2i_dataset = self.dataset[self.cfg["DATASET"]](
            csv_txt=self.text_csv,
            csv_img=self.img_csv,
            root=self.data_root,
            word_dict=self.word_dict,
            transform=transforms.Compose(
                [Rescale(self.image_size),
                 Equalize(), ToTensor()]))

        self.testset = self.dataset[self.cfg["DATASET"]](
            csv_txt=self.test_csv,
            csv_img=self.img_csv,
            root=self.data_root,
            word_dict=self.word_dict,
            transform=transforms.Compose(
                [Rescale(self.image_size),
                 Equalize(), ToTensor()]))

        self.testset = self.t2i_dataset
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

        s_gpus = self.cfg["GPU_ID"].split(',')
        self.gpus = [int(ix) for ix in s_gpus]
        self.num_gpus = len(self.gpus)

        self.test_dataloader = DataLoader(self.testset,
                                          batch_size=12,
                                          shuffle=False,
                                          num_workers=8,
                                          drop_last=True)

        self.base_size = 32
        self.P_ratio = int(np.log2(self.image_size[0] // self.base_size))
        self.base_ratio = int(np.log2(self.base_size))
        print("Number of Decoders", self.P_ratio + 1)
        print("Number of Discriminator", self.P_ratio + 1)

        #########################################
        ############ Network Init ###############
        #########################################
        self.define_nets()
        if self.num_gpus > 1:
            self.encoder = nn.DataParallel(self.encoder, device_ids=self.gpus)
            self.decoder_L = nn.DataParallel(self.decoder_L,
                                             device_ids=self.gpus)
            self.decoder_F = nn.DataParallel(self.decoder_F,
                                             device_ids=self.gpus)
Exemple #9
0
    cal_mean_std_iter = DataLoader(PersianAlphabetDataset(
        csv_files=['dataset/train_x.csv', 'dataset/train_y.csv']),
                                   batch_size=args.batch_size)
    mean, std = CalMeanStd0(
        cal_mean_std_iter)  # you have to pass a dataloader object

    # 					   ------------------
    # --------------------- building dataset
    # 					   ------------------
    #
    print(f"\t✅ building dataset pipeline from CSV files\n")
    # normalize image using calculated mean and std per channel
    # generally mean and std is a list of per channel values
    # in our case one value for std and mean cause we have one channel
    # --------------------------------------------------------------------
    transform = transforms.Compose([ToTensor(), Normalize(mean=mean, std=std)])
    training_transformed = PersianAlphabetDataset(
        csv_files=['dataset/train_x.csv', 'dataset/train_y.csv'],
        transform=transform)
    valid_transformed = PersianAlphabetDataset(
        csv_files=['dataset/test_x.csv', 'dataset/test_y.csv'],
        transform=transform)

    # 						---------------------------
    # --------------------- building dataloader objects
    # 						---------------------------
    #
    print(
        f"\t✅ building dataloader objects from training and valid data pipelines\n"
    )
    # -----------------------------------------------------
Exemple #10
0
## 트레이닝 파라메터 설정하기
lr = 1e-3
batch_size = 4
num_epoch = 100

data_dir = './data'
ckpt_dir = './checkpoint'
log_dir = './log'

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

## Call Transform
transform = transforms.Compose(
    [Normalization(mean=0.5, std=0.5),
     RandomFlip(), ToTensor()])

## 데이터셋 생성
dataset_train = Dataset(data_dir=os.path.join(data_dir, 'train'),
                        transform=transform)
loader_train = DataLoader(dataset_train,
                          batch_size=batch_size,
                          shuffle=True,
                          num_workers=4)

dataset_val = Dataset(data_dir=os.path.join(data_dir, 'val'),
                      transform=transform)
loader_val = DataLoader(dataset_val,
                        batch_size=batch_size,
                        shuffle=False,
                        num_workers=4)
        test_embedding = self.model(test)

        l2_dist = self.l2_dist(self.anchor_emdedding, test_embedding)
        print(l2_dist.detach().item())

        if l2_dist < 10:
            print("correct")
        else:
            print("wrong")
        print("=" * 55)


if __name__ == "__main__":
    cfg = get_configuration()
    model = create_model(cfg)
    valload = create_dataset(cfg, "train", transform=ToTensor())
    restore_path = "./checkpoints/facenet/checkpoint_1000.pth"
    # dataloader = create_dataset(cfg, 'test', transform=ToTensor())
    inference_engie = InferenceEngine(cfg, model, valload)
    inference_engie.setup(restore_path)
    text = "=" * 80
    print("\033[33m{}\033[0m".format(text))
    # test single image
    # img_path = input("Enter the picture you want to save:")
    # inference_engie.save_code_base(img_path)
    # while True:
    #     test_path = input("Enter the picture you want to test:")
    #     if test_path == "q":
    #         break
    #     inference_engie.calculate_similarity(test_path)
def train(epochs, batch_size, learning_rate):

    torch.manual_seed(1234)

    train_loader = torch.utils.data.DataLoader(SegThorDataset(
        "/home/WIN-UNI-DUE/smnemada/Master_Thesis/SegThor/data/train",
        phase='train',
        transform=transforms.Compose([Rescale(1.0),
                                      Normalize(),
                                      ToTensor()])),
                                               batch_size=batch_size,
                                               shuffle=True)
    '''
    # Loading validation data
    val_set = SegThorDataset("/home/WIN-UNI-DUE/smnemada/Master_Thesis/SegThor/data_val", phase='val',
                                   transform=transforms.Compose([
                                       Rescale(0.5),
                                       Normalize(),
                                       ToTensor2()
                                   ]))

    val_loader = torch.utils.data.DataLoader(dataset=val_set,
                                             batch_size=1,
                                             shuffle=False)
    '''

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = UNet().to(device)
    model.apply(weight_init)
    #optimizer = optim.Adam(model.parameters(), lr=learning_rate)    #learning rate to 0.001 for initial stage
    optimizer = optim.SGD(model.parameters(),
                          lr=0.01,
                          momentum=0.9,
                          weight_decay=0.00001)
    #optimizer = adabound.AdaBound(params = model.parameters(), lr = 0.001, final_lr = 0.1)

    for epoch in range(epochs):
        f = open('train_output.log', 'a')
        f.write('Epoch {}/{}\n'.format(epoch + 1, epochs))
        f.write('-' * 10)

        running_loss = 0.0
        running_loss_label = np.zeros(5)
        for batch_idx, sample in enumerate(train_loader):
            train_data, labels = sample['image'].to(
                device,
                dtype=torch.float), sample['label'].to(device,
                                                       dtype=torch.uint8)

            optimizer.zero_grad()
            output = model(train_data)

            loss_label, loss = dice_loss2(output, labels)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            for i in range(5):
                running_loss_label[i] += loss_label[i]

        epoch_loss = running_loss / len(train_loader)
        writer.add_scalar('Train/Loss', epoch_loss, epoch)
        f.write("\n Total Dice Loss: {:.4f}\n".format(epoch_loss))
        epoch_loss_class = np.true_divide(running_loss_label,
                                          len(train_loader))
        f.write(
            "Dice per class: Background = {:.4f} Eusophagus = {:.4f}  Heart = {:.4f}  Trachea = {:.4f}  Aorta = {:.4f}\n"
            .format(epoch_loss_class[0], epoch_loss_class[1],
                    epoch_loss_class[2], epoch_loss_class[3],
                    epoch_loss_class[4]))
        #f.write("Dice per class: Background = {:.4f} Eusophagus = {:.4f}\n".format(epoch_loss_class[0], epoch_loss_class[1]))
        f.close()

        if epoch % 4 == 0:
            os.makedirs("models", exist_ok=True)
            torch.save(model, "models/model.pt")

    # export scalar data to JSON for external processing
    writer.export_scalars_to_json("./all_scalars.json")
    writer.close()
    os.makedirs("models", exist_ok=True)
    torch.save(model, "models/model.pt")
Exemple #13
0
from torchvision import transforms, models
from torch.utils.data import DataLoader
import torch
import torch.nn as nn

from focal_loss import FocalLoss
from model import get_model

transform = transforms.Compose([
    LeftToRightFlip(0.5),
    RandomRotation(angle=3, p=0.5),
    Resize(224),
    ColorJitter(p=0.5, color=0.1, contrast=0.1, brightness=0.1, sharpness=0.1),
    RandomCrop(scale=210, p=0.5),
    Resize(224),
    ToTensor()
])

dataset = ChestXrayDataset(csv_file=os.path.join('dataset', 'train+test',
                                                 'train+test.csv'),
                           root_dir=os.path.join('dataset', 'train+test'),
                           transform=transform)
dataloader = DataLoader(dataset, batch_size=32, shuffle=True)
dataset_size = len(dataset)


def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
    since = time.time()
    best_model_wts = copy.deepcopy(model.state_dict())
    best_acc = 0.0
def test():
    test_path = '/home/WIN-UNI-DUE/smnemada/Master_Thesis/SegThor/data_sub/test'
    for patient in tqdm(os.listdir(test_path)): 
        count = 0
        area = 0
        
        file = patient
        x = file.split(".")
        filename = x[0] + '.' + x[1]

        print("patient = ", patient)
        test_set = SegThorDataset(test_path,
                                  patient=patient, 
                                  phase='test',
                                  transform=transforms.Compose([
                                         Rescale(1.0, labeled=False),
                                         Normalize(labeled=False),
                                         ToTensor(labeled=False)
                                  ]))

        test_loader = torch.utils.data.DataLoader(dataset=test_set, 
                                                  batch_size=1, 
                                                  shuffle=False)


        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        model = torch.load("models/model.pt")
        model.eval()

        '''
        with torch.no_grad():
            for batch_idx, sample in enumerate(test_loader):     
                images = sample['image'].to(device, dtype=torch.float)        
                outputs = model(images)

                print("tensor: {} and {} ".format(images.size(), outputs.size()))
                images = tensor_to_numpy(images)            
                max_idx = torch.argmax(outputs, 1, keepdim=True)
                max_idx = tensor_to_numpy(max_idx)
                print("numpy: {} and {} ".format(images.shape, max_idx.shape))

                fig=plt.figure()
                fig.add_subplot(1,2,1)
                plt.imshow(max_idx)
                fig.add_subplot(1,2,2)
                plt.imshow(images)
                plt.show()
#                fig.close()
                count = count + 1
                if count==150:
                    break
        '''
#        '''
        seg_vol_2d = zeros([len(test_set),  512, 512])
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        model = torch.load("models/model.pt")
        model.eval()
        model.to(device)
        
        with torch.no_grad():
            for batch_idx, sample in enumerate(test_loader):     
                images = sample['image'].to(device, dtype=torch.float)        
                outputs = model(images)

                images = tensor_to_numpy(images)            
                max_idx = torch.argmax(outputs, 1, keepdim=True)
                max_idx = tensor_to_numpy(max_idx)
                          
              #  for k in range(outputs.size(0)): 
              #  print(max_idx.shape)
                slice_v = max_idx[:,:]   
                slice_v = slice_v.astype(float32)
                slice_v = ndimage.interpolation.zoom(slice_v, zoom=1, order=0, mode='nearest', prefilter=True)
                seg_vol_2d[count,:,:] = slice_v
                count = count + 1
               
            segmentation = sitk.GetImageFromArray(seg_vol_2d, isVector=False)
            print(segmentation.GetSize())
            sitk.WriteImage(sitk.Cast( segmentation, sitk.sitkUInt8 ), filename, True) 
Exemple #15
0
        f.write('\n')

    cfg = dict()
    cfg['batch_size'] = 64

    cfg['scale'] = 0.5
    if cfg['scale'] == 0.5:
        mnet_v2_mean = [0.4679]
        mnet_v2_std = [0.2699]
    else:
        mnet_v2_mean = [0.4679]
        mnet_v2_std = [0.2699]

    train_set = OpenEDS(root_path=root_path + 'train',
                        transform=transforms.Compose(
                            [Rescale(cfg['scale']), ToTensor(), Normalize(mnet_v2_mean, mnet_v2_std)]))  #
    val_set = OpenEDS(root_path=root_path + 'validation',
                      transform=transforms.Compose(
                          [Rescale(cfg['scale']), ToTensor(), Normalize(mnet_v2_mean, mnet_v2_std)]))  #

    test_set = OpenEDS(root_path=root_path + 'test',
                       transform=transforms.Compose(
                           [Rescale(cfg['scale']), ToTensor(), Normalize(mnet_v2_mean, mnet_v2_std)]))  #

    loaders = {'train': torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True,
                                                    num_workers=args.num_workers),
               'val': torch.utils.data.DataLoader(val_set, batch_size=args.batch_size, shuffle=False,
                                                  num_workers=args.num_workers, pin_memory=False),
               'test': torch.utils.data.DataLoader(test_set, batch_size=args.batch_size, shuffle=False,
                                                   num_workers=args.num_workers, pin_memory=False)
               }
Exemple #16
0
    def __init__(self):
        self.cfg_json = "config/MIMIC_wgan.json"
        self.cfg = self.pare_cfg(self.cfg_json)
        self.exp_name = self.cfg["EXPER_NAME"]
        self.max_epoch = self.cfg["MAX_EPOCH"]

        self.encoder_checkpoint = self.cfg["CHECKPOINT_ENCODER"]
        self.decoder_checkpoint = self.cfg["CHECKPOINT_DECODER"]

        self.D_checkpoint = self.cfg["CHECKPOINT_D"]
        self.check_create_checkpoint()

        self.encoder_resume = self.cfg["RESUME_ENCODER"]
        self.decoder_resume_F = self.cfg["RESUME_DECODER_F"]
        self.decoder_resume_L = self.cfg["RESUME_DECODER_L"]
        self.D_resume = self.cfg["RESUME_D"]

        self.train_csv = self.cfg["TRAIN_CSV"]
        self.val_csv = self.cfg["VAL_CSV"]
        self.test_csv = self.cfg["TEST_CSV"]

        self.img_csv = self.cfg["IMG_CSV"]
        self.data_root = self.cfg["DATA_ROOT"]
        self.batch_size = self.cfg["BATCH_SIZE"]
        self.image_size = tuple(self.cfg["IMAGE_SIZE"])
        self.name = self.cfg["EXPER_NAME"]
        self.pix_loss_ratio = self.cfg["PIXEL_LOSS_RATIO"]
        self.adv_loss_ratio = self.cfg["ADV_LOSS_RATIO"]
        self.checkpoint_epoch = self.cfg["CHECKPOINT_EPOCH"]

        self.beta1 = self.cfg["beta1"]
        self.word_dict = self.cfg["DICTIONARY"]
        self.writer = SummaryWriter(os.path.join("runs", self.exp_name))
        self.ENCODERS = {
            "HAttnEncoder": HAttnEncoder,
        }
        self.DECODERS = {
            "baseDECODER": baseDecoder,
            "baseDECODERv2": baseDecoderv2,
            "baseDECODERv3": baseDecoderv3
        }

        self.DISCRIMINATOR = {
            "baseDISCRIMINATOR": baseDiscriminator,
            "noconDISCRIMINATOR": noCon_Discriminator,
            "Patch": PatchDiscriminator,
            "SNDiscriminator": SNDiscriminator,
            "ResDISCRIMINATOR": ResDiscriminator,
            "PDISCRIMINATOR": PDiscriminator
        }
        self.dataset = {"OPENI": OpeniDataset2, "MIMIC-CXR": MIMICDataset2}

        ##################################################
        ################# Dataset Setup ##################
        ##################################################
        self.trainset = self.dataset[self.cfg["DATASET"]](
            csv_txt=self.train_csv,
            csv_img=self.img_csv,
            root=self.data_root,
            word_dict=self.word_dict,
            transform=transforms.Compose(
                [Rescale(self.image_size),
                 Equalize(), ToTensor()]))

        self.valset = self.dataset[self.cfg["DATASET"]](
            csv_txt=self.val_csv,
            csv_img=self.img_csv,
            root=self.data_root,
            word_dict=self.word_dict,
            transform=transforms.Compose(
                [Rescale(self.image_size),
                 Equalize(), ToTensor()]))

        self.sia_dataset = self.dataset[self.cfg["DATASET"]][1](
            csv_txt=self.train_csv,
            csv_img=self.img_csv,
            root=self.data_root,
            transform=transforms.Compose(
                [Rescale(self.image_size),
                 Equalize(), ToTensor()]))
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

        s_gpus = self.cfg["GPU_ID"].split(',')
        self.gpus = [int(ix) for ix in s_gpus]
        self.num_gpus = len(self.gpus)

        #########################################
        ############ Loss Function ##############
        #########################################
        content_losses = {"L2": nn.MSELoss(), "L1": nn.L1Loss()}
        self.G_criterion = content_losses[self.cfg["CONTENT_LOSS"]].to(
            self.device)

        #########################################
        ############ Network Init ###############
        #########################################

        self.base_size = self.image_size[0]
        self.base_ratio = int(np.log2(self.base_size))

        self.define_nets()
        if self.num_gpus > 1:
            self.encoder = nn.DataParallel(self.encoder, device_ids=self.gpus)
            self.decoder_L = nn.DataParallel(self.decoder_L,
                                             device_ids=self.gpus)
            self.decoder_F = nn.DataParallel(self.decoder_F,
                                             device_ids=self.gpus)
Exemple #17
0
import torch.nn as nn
from torchvision import transforms
from torch.utils.data import DataLoader, Dataset
from utils import SignatureDataset, Rescale, ToTensor
from model import SigNet
import argparse

DATADIR = './data/sign_data/'

rescale = Rescale((100, 100))

# TODO Speed up data loader & add cuda support
train_set = SignatureDataset(root='./data/sign_data/',
                             split='train',
                             transforms=transforms.Compose(
                                 [rescale, ToTensor()]))
test_set = SignatureDataset(root='./data/sign_Data/',
                            split='test',
                            transforms=transforms.Compose(
                                [rescale, ToTensor()]))

if __name__ == '__main__':
    # TODO: Add argparser
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    batch_size = 150
    num_workers = 8  # Set number of workers for prefetching of data

    train_loader = DataLoader(train_set,
                              batch_size=batch_size,
                              shuffle=True,
        print("Iteration: {}, len(dl): {}, len(du): {},"
              " len(dh) {}, acc: {} ".format(iteration,
                                             len(dl.sampler.indices),
                                             len(du.sampler.indices),
                                             len(hcs_idx), acc))


if __name__ == "__main__":

    dataset_train = Caltech256Dataset(
        root_dir="../caltech256/256_ObjectCategories_train",
        transform=transforms.Compose(
            [SquarifyImage(),
             RandomCrop(224),
             Normalize(),
             ToTensor()]))

    dataset_test = Caltech256Dataset(
        root_dir="../caltech256/256_ObjectCategories_test",
        transform=transforms.Compose(
            [SquarifyImage(),
             RandomCrop(224),
             Normalize(),
             ToTensor()]))

    # Creating data indices for training and validation splits:
    random_seed = 123
    validation_split = 0.1  # 10%
    shuffling_dataset = True
    batch_size = 16
    dataset_size = len(dataset_train)
def dataloader(dataset, batch_size, cuda):

    if dataset == 'CIFAR10':
        data = datasets.CIFAR10('./CIFAR10', train=True, download=True,
                       transform=transforms.Compose([
                           AddUniformNoise(0.05),
                           Transpose(),
                           ToTensor()
                       ]))

        data_hflip = datasets.CIFAR10('./CIFAR10', train=True, download=True,
                           transform=transforms.Compose([
                           HorizontalFlip(), 
                           AddUniformNoise(0.05),
                           Transpose(),
                           ToTensor()
                       ]))
        data = torch.utils.data.ConcatDataset([data, data_hflip])

        train_data, valid_data = torch.utils.data.random_split(data, [90000, 10000])

        test_data = datasets.CIFAR10('./CIFAR10', train=False, download=True,
                        transform=transforms.Compose([
                            AddUniformNoise(0.05),
                            Transpose(),
                            ToTensor()
                       ]))

    elif dataset == 'MNIST':
        data = datasets.MNIST('./MNIST', train=True, download=True,
                   transform=transforms.Compose([
                       AddUniformNoise(),
                       ToTensor()
                   ]))

        train_data, valid_data = torch.utils.data.random_split(data, [50000, 10000])
 
        test_data = datasets.MNIST('./MNIST', train=False, download=True,
                    transform=transforms.Compose([
                        AddUniformNoise(),
                        ToTensor()
                    ]))
    else:  
        print ('what network ?', args.net)
        sys.exit(1)

    #load data 
    kwargs = {'num_workers': 0, 'pin_memory': True} if cuda>-1 else {}

    train_loader = torch.utils.data.DataLoader(
        train_data,
        batch_size=batch_size, shuffle=True, **kwargs)

    valid_loader = torch.utils.data.DataLoader(
        valid_data,
        batch_size=batch_size, shuffle=True, **kwargs)
 
    test_loader = torch.utils.data.DataLoader(test_data,
        batch_size=batch_size, shuffle=True, **kwargs)
    
    return train_loader, valid_loader, test_loader
opt = CycleMcdTrainOptions().parse()

# set model

model = createModel(opt)  # create a new model
model.setup(opt)  # set model

# set dataloader

if opt.augment:
    print("with data augmentation")
    transformList = [
        RandomRotation(10),
        RandomResizedCrop(),
        Resize(opt.loadSize),
        ToTensor(),
        Normalize([.485, .456, .406], [.229, .224, .225]),
        RandomHorizontalFlip(),
    ]
else:
    print("without data augmentation")
    transformList = [
        Resize(opt.loadSize),
        ToTensor(),
        Normalize([.485, .456, .406], [.229, .224, .225])
    ]

transform = Compose(transformList)

supervisedADataset = createDataset([opt.supervisedADataset],
                                   transform=transform,
    for i in range(len(segthor_dataset)):
        sample = segthor_dataset[i]
        
    #    print(i, sample['image'].size())
        plt.imshow(sample['image'])
        plt.show()
        if i == 50:
            break

    '''
    #    '''
    ## Loading data for training phase
    segthor_dataset = SegThorDataset(
        datapath=
        "/home/WIN-UNI-DUE/smnemada/Master_Thesis/SegThor/data_sub/train",
        phase='train',
        transform=transforms.Compose([
            Rescale(1.0, labeled=True),
            Normalize(labeled=True),
            JointTransform2D(crop=(288, 288), p_flip=0.5),
            ToTensor(labeled=True)
        ]))

    for i in range(len(segthor_dataset)):
        sample = segthor_dataset[i]

        print(i, sample['image'].size(), sample['label'].size())
        if i == 5:
            break
#    '''