Ejemplo n.º 1
0
def main(_):
    parser = argparse.ArgumentParser()
    parser.add_argument('--action', dest='action', type=str, default='train',
                        help='actions: train, test, predict or store')
    args = parser.parse_args()
    if args.action not in ['train', 'test', 'predict', 'store']:
        print('invalid action: ', args.action)
        print("Please input a action: train, test, predict or store")
    else:
        if args.action == 'test':
            model = Unet(tf.InteractiveSession(), configure())
        else:
            model = Unet(tf.Session(), configure())
        getattr(model, args.action)()
    def __init__(self,
                 nets_RGBM,
                 nets_RGB=None,
                 nets_VAE=None,
                 total_samples=50000,
                 seed=0):
        self.nets_RGBM = nets_RGBM
        self.nets_RGB = nets_RGB
        self.nets_VAE = nets_VAE
        self.zs = nets_RGBM.sample_zs(total_samples, seed)
        self.total_samples = total_samples
        self.outsize = nets_RGBM.setting['outdim']

        sys.path.append('resources/PiCANet-Implementation')
        from network import Unet
        from dataset import CustomDataset
        ckpt = 'resources/PiCANet-Implementation/36epo_383000step.ckpt'
        state_dict = torch.load(ckpt)
        model = Unet().cuda()
        model.load_state_dict(state_dict)
        self.model = model
Ejemplo n.º 3
0
    def __init__(self, hparams):
        super(Unet3D, self).__init__()
        self.hparams = hparams
        self.learning_rate = hparams.learning_rate
        self.data_set_dir = hparams.data_set_dir
        self.loader_kwargs = {
            'batch_size': hparams.batch_size,
            'num_workers': hparams.num_workers,
            'pin_memory': True
        }
        self.valid_split = hparams.valid_split

        num_pool = hparams.num_pool
        num_features = hparams.num_features
        patch_size = (hparams.patch_x, hparams.patch_y, hparams.patch_z)

        def encode_kwargs_fn(level):
            num_stacks = max(level, 1)
            return {'num_stacks': num_stacks}

        paired_features = generate_paired_features(num_pool, num_features)

        self.net = Unet(in_channels=1,
                        out_channels=1,
                        paired_features=paired_features,
                        pool_block=ResBlock,
                        pool_kwargs={'stride': 2},
                        up_kwargs={'attention': True},
                        encode_block=ResBlockStack,
                        encode_kwargs_fn=encode_kwargs_fn,
                        decode_block=ResBlock)

        self.loss_fn = FocalDiceCoefLoss()
        self.metrics = {'kd_dsc': DiceCoef()}

        self.tr_transform = Compose([
            RandomRescaleCrop(0.1,
                              patch_size,
                              crop_mode='random',
                              enforce_label_indices=[1]),
            RandomMirror((0.2, 0, 0)),
            RandomContrast(0.1),
            RandomBrightness(0.1),
            RandomGamma(0.1),
            CombineLabels([1, 2], 3),
            ToTensor()
        ])
        self.vd_transform = Compose(
            [RandomCrop(patch_size),
             CombineLabels([1, 2], 3),
             ToTensor()])
        help="save result images as .jpg file. If None -> Not save",
        default=None)

    args = parser.parse_args()

    if args.logdir is None and args.save_dir is None:
        print(
            "You should specify either --logdir or --save_dir to save results!"
        )
        assert 0

    print(args)
    print(os.getcwd())
    device = torch.device(args.cuda)
    state_dict = torch.load(args.model_dir, map_location=args.cuda)
    model = Unet().to(device)
    model.load_state_dict(state_dict)
    custom_dataset = CustomDataset(root_dir=args.dataset)
    dataloader = DataLoader(custom_dataset, args.batch_size, shuffle=False)
    os.makedirs(os.path.join(args.save_dir, 'img'), exist_ok=True)
    os.makedirs(os.path.join(args.save_dir, 'mask'), exist_ok=True)
    if args.logdir is not None:
        writer = SummaryWriter(args.logdir)
    model.eval()
    for i, batch in enumerate(tqdm(dataloader)):
        img = batch.to(device)
        with torch.no_grad():
            pred, loss = model(img)
        pred = pred[5].data
        pred.requires_grad_(False)
        if args.logdir is not None:
Ejemplo n.º 5
0
                        default=0.1, type=float)
    parser.add_argument('--decay_step', help='Learning rate decrease by lr_decay time per decay_step,  default = 7000',
                        default=7000, type=int)
    parser.add_argument('--display_freq', help='display_freq to display result image on Tensorboard',
                        default=1000, type=int)


    args = parser.parse_args()
    # TODO : Add multiGPU Model
    device = torch.device(args.cuda)
    batch_size = args.batch_size
    epoch = args.epoch
    duts_dataset = PairDataset(args.dataset)
    load = args.load
    start_iter = 0
    model = Unet(cfg).cuda()
    #vgg = torchvision.models.vgg16(pretrained=True)
    #model.encoder.seq.load_state_dict(vgg.features.state_dict())
    now = datetime.datetime.now()
    start_epo = 0
    #del vgg

    if load is not None:
        state_dict = torch.load(load, map_location=args.cuda)

        #start_iter = int(load.split('epo_')[1].strip('step.ckpt')) + 1
        #start_epo = int(load.split('/')[3].split('epo')[0])
        #now = datetime.datetime.strptime(load.split('/')[2], '%m%d%H%M')

        #print("Loading Model from {}".format(load))
        #print("Start_iter : {}".format(start_iter))
Ejemplo n.º 6
0
        'lr_decay': 0.1,  # Learning rate decrease by lr_decay time per decay_step, default = 0.1
        'decay_step': 7000,  # Learning rate decrease by lr_decay time per decay_step,  default = 7000
        'batch_size': 20,  # batchsize, default = 1
        'epoch': 20,  # epochs, default = 20
        'dataset': '',  # Directory of your Dataset
        'load': None,  # Directory of pre-trained model
        'display_freq': 1000,  # display_freq to display result image on Tensorboard
    }
    torch.cuda.manual_seed_all(1234)
    torch.manual_seed(1234)
    start_iter = 0
    now = datetime.datetime.now()
    start_epo = 0

    duts_dataset = CustomDataset(config['dataset'])
    model = Unet(cfg,config['mode']).cuda()
    # vgg = torchvision.models.vgg16(pretrained=True)
    # model.encoder.seq.load_state_dict(vgg.features.state_dict())
    # del vgg

    if config['load'] is not None:
        state_dict = torch.load(config['load'], map_location='cuda')

        start_iter = 1#int(config['load'].split('epo_')[1].strip('step.ckpt')) + 1
        start_epo = 0#int(config['load'].split('/')[3].split('epo')[0])
        now = time.strftime('%Y%m%d-%H%M%S', time.localtime())

        print("Loading Model from {}".format(config['load']))
        print("Start_iter : {}".format(start_iter))
        model.load_state_dict(state_dict)
        for cell in model.decoder:
Ejemplo n.º 7
0
        # Make directory where we save model and data
        all_dir = os.path.join(CUR_DIR, 'models', f'{DATASET}', 'all')
        maybe_mkdir_p(all_dir)

        val_dataset = ImageDataset_test(root_dir, ISBI2012=ISBI2012)

        # Suffle and load the training set
        train_loader = DataLoader(train_dataset,
                                  batch_size=batch_size,
                                  shuffle=True)
        val_loader = DataLoader(val_dataset,
                                batch_size=batch_size,
                                shuffle=True)

        torch.cuda.empty_cache()
        unet = Unet().to(device)
        if START_FROM == -1:  # load latest model
            epoch_id = max([
                int(name.replace('unet_weight_save_', '').replace('.pth', ''))
                for name in os.listdir(os.path.join(all_dir, 'models'))
            ])
            PATH = os.path.join(all_dir, 'models',
                                'unet_weight_save_{}.pth'.format(epoch_id))
            unet.load_state_dict(torch.load(PATH))

        print('Number of images used for training:', len(train_dataset))
        print('                                                       ')
        print('Starting training')
        print('                                                       ')

        training(unet,
Ejemplo n.º 8
0
def main():
    #Initialize U-net model
    print("Initializing Networks...")
    haemorrhagesModel = Unet(LEARNING_RATE, IMAGE_SHAPE_3D).get_model()
    hardExudatesModel = Unet(LEARNING_RATE, IMAGE_SHAPE_3D).get_model()
    microaneurysmsModel = Unet(LEARNING_RATE, IMAGE_SHAPE_3D).get_model()
    softExudatesModel = Unet(LEARNING_RATE, IMAGE_SHAPE_3D).get_model()

    #Original images
    print("Reading original images...")
    originalTrain = read_images("Train/original_retinal_images/")
    originalTest = read_images("Test/original_retinal_images/")
    originalImages = np.concatenate((originalTrain, originalTest), axis=0)

    #Train masks
    print("Reading train masks...")
    originalHaemorrhagesTrain, haemorrhagesTrain = read_masks("Train/masks_Haemorrhages/", originalImages)
    originalHardExudatesTrain, hardExudatesTrain = read_masks("Train/masks_Hard_Exudates/", originalImages)
    originalMicroaneurysmsTrain, microaneurysmsTrain = read_masks("Train/masks_Microaneurysms/", originalImages)
    originalSoftExudatesTrain, softExudatesTrain = read_masks("Train/masks_Soft_Exudates/", originalImages)

    #Test masks
    print("Reading test masks...")
    originalHaemorrhagesTest, haemorrhagesTest = read_masks("Test/masks_Haemorrhages/", originalImages)
    originalHardExudatesTest, hardExudatesTest = read_masks("Test/masks_Hard_Exudates/", originalImages)
    originalMicroaneurysmsTest, microaneurysmsTest = read_masks("Test/masks_Microaneurysms/", originalImages)
    originalSoftExudatesTest, softExudatesTest = read_masks("Test/masks_Soft_Exudates/", originalImages)

    #Image Generators
    haemorrhagesGen = generate(originalHaemorrhagesTrain, haemorrhagesTrain)
    hardExudatesGen = generate(originalHardExudatesTrain, hardExudatesTrain)
    microaneurysmsGen = generate(originalMicroaneurysmsTrain, microaneurysmsTrain)
    softExudatesGen = generate(originalSoftExudatesTrain, softExudatesTrain)

    #Train on generated data
    print("Start Training!")
    haemorrhagesHistory = haemorrhagesModel.fit_generator(haemorrhagesGen, steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, validation_data=(originalHaemorrhagesTest, haemorrhagesTest))
    hardExudatesHistory = hardExudatesModel.fit_generator(hardExudatesGen, steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, validation_data=(originalHardExudatesTest, hardExudatesTest))
    microaneurysmsHistory = microaneurysmsModel.fit_generator(microaneurysmsGen, steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, validation_data=(originalMicroaneurysmsTest, microaneurysmsTest))
    softExudatesHistory = softExudatesModel.fit_generator(softExudatesGen, steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, validation_data=(originalSoftExudatesTest, softExudatesTest))

    #Save models
    print("Saving Models...")
    haemorrhagesModel.save("Models/haemorrhages.h5")
    hardExudatesModel.save("Models/hardExudates.h5")
    microaneurysmsModel.save("Models/microaneurysms.h5")
    softExudatesModel.save("Models/softExudates.h5")

    #Create loss and metric plots
    print("Creating Plots...")
    create_plots(haemorrhagesHistory, "Haemorrhages")
    create_plots(hardExudatesHistory, "Hard Exudates")
    create_plots(microaneurysmsHistory, "Microaneurysms")
    create_plots(softExudatesHistory, "Soft Exudates")


    #Produce results
    print("Producing results...")
    haemorrhagesResults = process_masks(haemorrhagesModel.predict(originalHaemorrhagesTest))
    write_images(haemorrhagesResults, "Result/masks_Haemorrhages/")
    hardExudatesResults = process_masks(hardExudatesModel.predict(originalHardExudatesTest))
    write_images(haemorrhagesResults, "Result/masks_Hard_Exudates/")
    microaneurysmsResults = process_masks(microaneurysmsModel.predict(originalMicroaneurysmsTest))
    write_images(haemorrhagesResults, "Result/masks_Microaneurysms/")
    softExudatesResults = process_masks(softExudatesModel.predict(originalSoftExudatesTest))
    write_images(haemorrhagesResults, "Result/masks_Soft_Exudates/")
Ejemplo n.º 9
0
     'model_dir': "./models/state_dict/07031644/",
     'dataset': '',  # Directory of your Dataset
     'batch_size': 20,  # batchsize, default = 1
     'logdir': "./log/",  # logdir, log on tensorboard
     'which_iter': 1,  # "Specific Iter to measure", default=-1
     'cont': 0,  # "Measure scores from this iter"
     'step': 10000,  # "Measure scores per this iter step"
 }
 models = sorted(os.listdir(config['model_dir']),
                 key=lambda x: int(x.split('epo_')[1].split('step')[0]))
 pairdataset = CustomDataset(root_dir=config['dataset'])
 dataloader = DataLoader(pairdataset, 8, shuffle=True)
 beta_square = 0.3
 if config['logdir'] is not None:
     writer = SummaryWriter(config['logdir'])
 model = Unet(cfg, config['mode']).cuda()
 for model_name in models:
     model_iter = int(model_name.split('epo_')[1].split('step')[0])
     if model_iter % config['step'] != 0:
         continue
     if model_iter < config['cont']:
         continue
     if config['which_iter'] > 0 and config['which_iter'] != model_iter:
         continue
     state_dict = torch.load(os.path.join(config['model_dir'], model_name))
     model.load_state_dict(state_dict)
     model.eval()
     mae = 0
     preds = []
     masks = []
     precs = []

def encode_kwargs_fn(level):
    num_stacks = max(level, 1)
    return {'num_stacks': num_stacks}


paired_features = generate_paired_features(num_pool, num_features)

model = Unet(in_channels=1,
             out_channels=3,
             paired_features=paired_features,
             pool_block=ResBlock,
             pool_kwargs={
                 'stride': 2
             },
             up_kwargs={
                 'attention': True
             },
             encode_block=ResBlockStack,
             encode_kwargs_fn=encode_kwargs_fn,
             decode_block=ResBlock).cuda()

patch_size = (160, 160, 80)
optimizer = optim.Adam(model.parameters(), lr=1e-4)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                 factor=0.2,
                                                 patience=30)

tr_transform = Compose([
    GammaTransform((0.9, 1.1)),
Ejemplo n.º 11
0
        result.append(SOUTH)

    if -11 <= dy < 0 or dy > 11:
        result.append(NORTH)

    return result


print("Loading weights (weights.pth). {:.0f} ms - Stage G".format(
    (perf_counter() - start_time) * 1e3))

#-------------- CREATE MODEL SECTION ------------------#

model = Unet(encoder_name="efficientnet-b0",
             classes=5,
             encoder_depth=2,
             decoder_channels=(64, 32),
             in_channels=64,
             encoder_weights=None)

print("Loading weights (weights.pth). {:.0f} ms - Stage H".format(
    (perf_counter() - start_time) * 1e3))

model.load_state_dict(
    torch.load("halite-imitation-learning-bot/weights.pth",
               torch.device(device))['model_state_dict'])
model.to(device)
model.eval()
torch.no_grad()
torch.set_num_threads(os.cpu_count())

Ejemplo n.º 12
0
# random initialization method
def weights_init_xavier(m):
    classname = m.__class__.__name__
    # print(classname)
    if classname.find('Conv') != -1:
        init.xavier_normal(m.weight.data, gain=1)
    elif classname.find('Linear') != -1:
        init.xavier_normal(m.weight.data, gain=1)
    elif classname.find('BatchNorm2d') != -1:
        init.uniform(m.weight.data, 1.0, 0.02)
        init.constant(m.bias.data, 0.0)



# batch_size = 24
net = Unet().cuda(0)
net.apply(weights_init_xavier)
net.train(True)


optimizerD = optim.Adam(net.parameters(), lr=0.005, betas=(0.5, 0.999))

#Binary Cross Entropy
criterion = nn.BCELoss(size_average=True).cuda(0)

#The path of the data
data_path = './data/images/'
dst = ImageNet_Dataloader(data_path, is_transform=True)
print('length of the dataset', len(dst))
trainloader = data.DataLoader(dst, batch_size=24,shuffle=True)
step_index = 0
Ejemplo n.º 13
0
                            beta=(1 - factor),
                            gamma=0)

    return blend


torch.set_printoptions(profile='full')
if __name__ == '__main__':

    model_dir = "../pretrained_models/picanet"
    models = sorted(os.listdir(model_dir),
                    key=lambda x: int(x.split('epo_')[1].split('step')[0]))

    device = torch.device("cuda")

    bdda_model = Unet().to(device)
    sage_model = Unet().to(device)

    print("Model loaded! Loading Checkpoint...")

    bdda_model_name = models[0]
    sage_model_name = models[1]

    bdda_state_dict = torch.load(os.path.join(model_dir, bdda_model_name))
    sage_state_dict = torch.load(os.path.join(model_dir, sage_model_name))

    bdda_model.load_state_dict(bdda_state_dict)
    sage_model.load_state_dict(sage_state_dict)

    print("Checkpoint loaded! Now predicting...")
Ejemplo n.º 14
0
    if args.logdir is None and args.save_dir is None:
        print(
            "You should specify either --logdir or --save_dir to save results!"
        )
        assert 0

    print(args)
    print(os.getcwd())
    device = torch.device(args.cuda)
    #torch.save(
    #    model.state_dict(),
    #    os.path.join(weight_save_dir, '{}epo_{}step.ckpt'.format(epo, iterate))
    #)
    state_dict = torch.load(args.model_dir, map_location=args.cuda)
    model = Unet().to(device)
    #model = Unet(cfg).to(device)
    #for cell in model.decoder:
    #    if cell.mode == 'G':
    #        cell.picanet.renet.vertical.flatten_parameters()
    #        cell.picanet.renet.horizontal.flatten_parameters()
    model.load_state_dict(state_dict)  #, strict=False)
    custom_dataset = CustomDataset(root_dir=args.dataset)
    dataloader = DataLoader(custom_dataset, args.batch_size, shuffle=False)
    os.makedirs(os.path.join(args.save_dir, 'img'), exist_ok=True)
    os.makedirs(os.path.join(args.save_dir, 'mask'), exist_ok=True)
    if args.logdir is not None:
        writer = SummaryWriter(args.logdir)
    model.eval()
    for i, batch in enumerate(tqdm(dataloader)):
        img = batch.to(device)