def main(args):

    savedir = '/home/shyam.nandan/NewExp/final_code/save/' + args.savedir  #change path here

    if not os.path.exists(savedir):
        os.makedirs(savedir)

    with open(savedir + '/opts.txt', "w") as myfile:
        myfile.write(str(args))

    rmodel = UNet()
    rmodel = torch.nn.DataParallel(rmodel).cuda()
    pretrainedEnc = torch.nn.DataParallel(ERFNet_imagenet(1000))
    pretrainedEnc.load_state_dict(
        torch.load(args.pretrainedEncoder)['state_dict'])
    pretrainedEnc = next(pretrainedEnc.children()).features.encoder
    model = Net(NUM_CLASSES)
    model = fill_weights(model, pretrainedEnc)
    model = torch.nn.DataParallel(model).cuda()
    #model = train(args, rmodel, model, False)

    PATH = '/home/shyam.nandan/NewExp/final_code/results/CB_iFL/rmodel_best.pth'
    rmodel.load_state_dict(torch.load(PATH))

    PATH = '/home/shyam.nandan/NewExp/final_code/results/CB_iFL/model_best.pth'

    model.load_state_dict(torch.load(PATH))

    model = train(args, rmodel, model, False)
 def get_unet(self, use_distributed_data_parallel=True):
     """ Creates a new network and returns. If machine has multiple GPUs, uses them. 
     """
     net = UNet(n_channels=self.channel_count, n_classes=1, bilinear=True,
             running_on_gpu=(self.gpu_number is not None))
     net.to(self.device)
     if not use_distributed_data_parallel:
         return net
     net = nn.parallel.DistributedDataParallel(net, device_ids=[self.gpu_number])
     return net
def test():
    device = torch.device(conf.cuda if torch.cuda.is_available() else "cpu")
    test_dataset = Testinging_Dataset(conf.data_path_test,
                                      conf.test_noise_param,
                                      conf.crop_img_size)
    test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)
    print('Loading model from: {}'.format(conf.model_path_test))
    model = UNet(in_channels=conf.img_channel, out_channels=conf.img_channel)
    print('loading model')
    model.load_state_dict(torch.load(conf.model_path_test))
    model.eval()
    model.to(device)
    result_dir = conf.denoised_dir
    if not os.path.exists(result_dir):
        os.mkdir(result_dir)
    for batch_idx, (source, img_cropped) in enumerate(test_loader):
        source_img = tvF.to_pil_image(source.squeeze(0))
        img_truth = img_cropped.squeeze(0).numpy().astype(np.uint8)
        source = source.to(device)
        denoised_img = model(source).detach().cpu()

        img_name = test_loader.dataset.image_list[batch_idx]

        denoised_result = tvF.to_pil_image(
            torch.clamp(denoised_img.squeeze(0), 0, 1))
        fname = os.path.splitext(img_name)[0]

        source_img.save(os.path.join(result_dir, f'{fname}-noisy.png'))
        denoised_result.save(os.path.join(result_dir, f'{fname}-denoised.png'))
        io.imsave(os.path.join(result_dir, f'{fname}-ground_truth.png'),
                  img_truth)
Ejemplo n.º 4
0
def select_model(model_name, init_msg):
    logger = get_logger()
    logger.info(init_msg)
    if model_name == "SETR-PUP":
        _, model = get_SETR_PUP()
    elif model_name == "SETR-MLA":
        _, model = get_SETR_MLA()
    elif model_name == "TransUNet-Base":
        model = get_TransUNet_base()
    elif model_name == "TransUNet-Large":
        model = get_TransUNet_large()
    elif model_name == "UNet":
        model = UNet(CLASS_NUM)
    model = model.to(device)

    return logger, model
Ejemplo n.º 5
0
    def __init__(self, channels=3, branches=2):
        super(PDNet, self).__init__()
        kernel_size = 3
        padding = 1
        features = 64
        self.branches = branches
        self.block1 = nn.Sequential(
            nn.Conv2d(in_channels=channels,
                      out_channels=features,
                      kernel_size=kernel_size,
                      stride=1,
                      padding=padding,
                      bias=False), nn.LeakyReLU(0.2, inplace=True))
        self.block2 = UNet()  #convBlock()
        self.block3 = nn.Conv2d(in_channels=features,
                                out_channels=channels,
                                kernel_size=kernel_size,
                                stride=1,
                                padding=padding,
                                bias=False)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
                if m.bias is not None:
                    m.bias.data.zero_()
Ejemplo n.º 6
0
 def get_unet(config):
     """
     Returns a Unet nn.module that satisifies the fcn properties stated in get_fcn() docstring
     """
     dc_source_dir = utils.getDenseCorrespondenceSourceDir()
     sys.path.append(os.path.join(dc_source_dir, 'external/unet-pytorch'))
     from unet_model import UNet
     model = UNet(num_classes=config["descriptor_dimension"]).cuda()
     return model
Ejemplo n.º 7
0
    def __init__(self, configs, env):
        self.configs = configs
        self.env = env
        self.action_size = (64, 1024)

        # n_channels=3 for RGB images
        # n_classes is the number of probabilities you want to get per pixel
        #   - For 1 class and background, use n_classes=1
        #   - For 2 classes, use n_classes=1
        #   - For N > 2 classes, use n_classes=N

        # TODO now I assume input<->output size are equal, which might not be true, so we need some modifications onto Unet if necesary

        self.actor = UNet(
            n_channels=3, n_classes=1, bilinear=True
        )  # [B,C, H_in=372, W_in=1242] -> [B, C, H_out=64, W_out=1024]
        self.optimizer = Adam(self.actor.parameters(), lr=configs['lr'])
        self.actor.to(device)
Ejemplo n.º 8
0
    def __init__(self, params):
        self.params = params

        self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
        self.model_path = params['model_path']

        self.model = UNet(in_channels=3, out_channels=1)

        self.threshold = 0.5

        self.resume()
        # self.model.eval()

        self.transform = get_transforms_3()

        self.is_resize = True
        self.image_short_side = 1024
        self.init_torch_tensor()
        self.model.eval()
Ejemplo n.º 9
0
 def __init__(self):
     super(Attention, self).__init__()
     self.im_dim = 112
     self.im_channel = 3
     self.feature_channels = 64
     self.attention_channels = 128
     self.unet = UNet(n_channels=self.im_channel)
     self.attention = GatedAttentionLayer(self.feature_channels,
                                          self.attention_channels)
     self.classifier = nn.Sequential(
         nn.Linear(self.feature_channels, 1),  # 64*1
         nn.Sigmoid())
def train():
    device = torch.device(conf.cuda if torch.cuda.is_available() else "cpu")
    dataset = Training_Dataset(conf.data_path_train, conf.gaussian_noise_param,
                               conf.crop_img_size)
    dataset_length = len(dataset)
    train_loader = DataLoader(dataset,
                              batch_size=4,
                              shuffle=True,
                              num_workers=4)
    model = UNet(in_channels=conf.img_channel, out_channels=conf.img_channel)
    criterion = nn.MSELoss()
    model = model.to(device)
    optim = Adam(model.parameters(),
                 lr=conf.learning_rate,
                 betas=(0.9, 0.999),
                 eps=1e-8,
                 weight_decay=0,
                 amsgrad=True)
    scheduler = lr_scheduler.StepLR(optim, step_size=100, gamma=0.5)
    model.train()
    print(model)
    print("Starting Training Loop...")
    since = time.time()
    for epoch in range(conf.max_epoch):
        print('Epoch {}/{}'.format(epoch, conf.max_epoch - 1))
        print('-' * 10)
        running_loss = 0.0
        scheduler.step()
        for batch_idx, (source, target) in enumerate(train_loader):

            source = source.to(device)
            target = target.to(device)
            optim.zero_grad()

            denoised_source = model(source)
            loss = criterion(denoised_source, target)
            loss.backward()
            optim.step()

            running_loss += loss.item() * source.size(0)
            print('Current loss {} and current batch idx {}'.format(
                loss.item(), batch_idx))
        epoch_loss = running_loss / dataset_length
        print('{} Loss: {:.4f}'.format('current ' + str(epoch), epoch_loss))
        if (epoch + 1) % conf.save_per_epoch == 0:
            save_model(model, epoch + 1)
    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
Ejemplo n.º 11
0
def infer(model_path, image_size, input_dir, output_dir):

    test_generator = DataGenerator(root_dir=input_dir,
                                   image_folder='input/',
                                   mask_folder='output/',
                                   batch_size=batch_size,
                                   nb_y_features=1,
                                   augmentation=None,
                                   shuffle=False)

    model = UNet.get_unet(image_size)
    model.load_weights(model_path)

    num_images = len(os.listdir(os.path.join(input_dir, 'input')))

    for i in range(num_images):
        x_test, y_test, image_name = test_generator.__getitem__(i)
        predicted = model.predict(np.expand_dims(x_test[0], axis=0)).reshape(
            image_size, image_size)
        imsave(os.path.join(output_dir, image_name[0]), predicted)
Ejemplo n.º 12
0
def train(tr_dir, test_dir, model_path, epochs, batch_size):

    train_generator = DataGenerator(root_dir=tr_dir,
                                    image_folder='input/',
                                    mask_folder='output/',
                                    batch_size=batch_size,
                                    nb_y_features=1,
                                    augmentation=aug_with_crop,
                                    is_training=True)

    test_generator = DataGenerator(root_dir=test_dir,
                                   image_folder='input/',
                                   mask_folder='output/',
                                   batch_size=batch_size,
                                   nb_y_features=1,
                                   augmentation=None,
                                   is_training=True)

    mode_autosave = ModelCheckpoint(model_path,
                                    monitor='val_iou_score',
                                    mode='max',
                                    save_best_only=True,
                                    verbose=1,
                                    period=10)

    early_stopping = EarlyStopping(patience=10, verbose=1, mode='auto')

    callbacks = [early_stopping, mode_autosave]

    model = UNet.get_unet(image_size)
    model.fit_generator(train_generator,
                        shuffle=True,
                        epochs=epochs,
                        use_multiprocessing=False,
                        validation_data=test_generator,
                        verbose=1,
                        callbacks=callbacks)
Ejemplo n.º 13
0
def submit_mnms(model_path, input_data_directory, output_data_directory,
                device):

    data_paths = load_path(input_data_directory)

    net = UNet(n_channels=1, n_classes=4, bilinear=True)
    net.load_state_dict(torch.load(model_path, map_location=device))
    net.to(device)

    for path in data_paths:
        ED_np, ES_np = load_phase(path)  # HxWxF
        ED_masks = []
        ES_masks = []
        for i in range(ED_np.shape[2]):
            img_np = ED_np[:, :, i]
            img_tensor = pre_transform(img_np)
            img_tensor = img_tensor.to(device)

            mask = predict_img(net, img_tensor)

            mask = post_transform(img_np, mask[0:3, :, :])
            ED_masks.append(mask)

        for i in range(ES_np.shape[2]):
            img_np = ES_np[:, :, i]
            img_tensor = pre_transform(img_np)
            img_tensor = img_tensor.to(device)

            mask = predict_img(net, img_tensor)

            mask = post_transform(img_np, mask[0:3, :, :])
            ES_masks.append(mask)

        ED_masks = np.concatenate(ED_masks, axis=2)
        ES_masks = np.concatenate(ES_masks, axis=2)
        save_phase(ED_masks, ES_masks, output_data_directory, path)
Ejemplo n.º 14
0
	return tot_real_error+tot_fake_error


year_test=2018
data_path = './Data/Split'+str(year_test)+'/train/'
save_dir = './Output/'+str(year_test)+'/'
model_path='./models/'+str(year_test)+'/'
bs = 6

# Create the directories if not exists
if not os.path.exists(save_dir):
	os.makedirs(save_dir)
if not os.path.exists(model_path):
	os.makedirs(model_path)

generator = UNet(n_channels=3, out_channels=1)
discriminator_g = GlobalDiscriminator()
discriminator_l = LocalDiscriminator()

resume=False
if(len(sys.argv)>1 and sys.argv[1]=='resume'):
	resume=True
	
# Load model if available
if(resume==True):
	print('Resuming training....')
	generator.load_state_dict(torch.load(os.path.join(model_path,'model_gen_latest')))
	discriminator_g.load_state_dict(torch.load(os.path.join(model_path,'model_gdis_latest')))
	discriminator_l.load_state_dict(torch.load(os.path.join(model_path,'model_ldis_latest')))

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
Ejemplo n.º 15
0
    X_train, X_test, y_train, y_test = mf.get_images()
    X_train = np.rollaxis(X_train, 3, 1)
    y_train = np.rollaxis(y_train, 3, 1)
    X_test = np.rollaxis(X_test, 3, 1)
    y_test = np.rollaxis(y_test, 3, 1) 




    train_data = ImageDataset(X_train,y_train)
    test_data = ImageDataset(X_test,y_test)
    train_dataloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, pin_memory=False) # better than for loop  
    test_dataloader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=True, pin_memory=False) # better than for loop
    

    model = UNet(n_channels=n_channels, n_classes=1)
    print("{} paramerters in total".format(sum(x.numel() for x in model.parameters())))
    if have_cuda:
        model.cuda(cuda)
    optimizer = torch.optim.Adam(model.parameters(),lr=learning_rate,  betas=(0.9, 0.999))

#    loss_all = np.zeros((2000, 4))
    for epoch in range(2000):
        lr = get_learning_rate(epoch)
        for p in optimizer.param_groups:
            p['lr'] = lr
            print("learning rate = {}".format(p['lr']))
        for batch_idx, items in enumerate(train_dataloader):
            image = items[0]
            gt = items[1]
            model.train()
Ejemplo n.º 16
0
    learning_rate = 0.001
    # momentum = 0.99
    # weight_decay = 0.0001
    batch_size = 1

    SRRFDATASET = ReconsDataset(
        test_in_path=
        "/home/star/0_code_lhj/DL-SIM-github/TESTING_DATA/microtuble/HE_X2/",
        transform=ToTensor(),
        img_type='tif',
        in_size=256)
    test_dataloader = torch.utils.data.DataLoader(
        SRRFDATASET, batch_size=batch_size, shuffle=True,
        pin_memory=True)  # better than for loop

    model = UNet(n_channels=3, n_classes=1)

    print("{} paramerters in total".format(
        sum(x.numel() for x in model.parameters())))
    model.cuda(cuda)
    model.load_state_dict(
        torch.load(
            "/home/star/0_code_lhj/DL-SIM-github/MODELS/UNet_SIM3_microtubule.pkl"
        ))
    model.eval()

    for batch_idx, items in enumerate(test_dataloader):

        image = items['image_in']
        image_name = items['image_name']
        print(image_name[0])
Ejemplo n.º 17
0
X_test = np.rollaxis(X_test, 3, 1)
y_test = np.rollaxis(y_test, 3, 1)

#train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, pin_memory=False) # better than for loop
#val_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=False, pin_memory=False) # better than for loop

#torch.autograd.set_detect_anomaly(True)
if True:  # __name__ == '__main__':
    #opt = parser.parse_args()

    #train_set = TrainDatasetFromFolder('data/DIV2K_train_HR', crop_size=CROP_SIZE, upscale_factor=UPSCALE_FACTOR)
    #val_set = ValDatasetFromFolder('data/DIV2K_valid_HR', upscale_factor=UPSCALE_FACTOR)
    #train_loader = DataLoader(dataset=train_set, num_workers=4, batch_size=64, shuffle=True)
    #val_loader = DataLoader(dataset=val_set, num_workers=4, batch_size=1, shuffle=False)

    netG = UNet(n_channels=15, n_classes=1)
    #print(summary(netG,(15,128,128)))
    print('# generator parameters:',
          sum(param.numel() for param in netG.parameters()))
    netD = Discriminator()
    print('# discriminator parameters:',
          sum(param.numel() for param in netD.parameters()))
    #print(summary(netD,(1,256,256)))

    generator_criterion = GeneratorLoss()

    if torch.cuda.is_available():
        netG.cuda()
        netD.cuda()
        generator_criterion.cuda()
Ejemplo n.º 18
0
if __name__ == '__main__':
    # underwater image
    image_u = tf.placeholder(dtype=tf.float32, shape=[1, 256, 256, 3], name='image_u')
    # correct image
    image_r = tf.placeholder(dtype=tf.float32, shape=[1, 256, 256, 3], name='image_r')
    # load test image
    test_image = normalize_image(misc.imresize(misc.imread(image_path), size=(256, 256), interp='cubic'))
    print(test_image)
    real_image = normalize_image(misc.imresize(misc.imread(gt_image_path), size=(256, 256), interp='cubic'))
    print(real_image)
    test_image_np = np.empty(shape=[1, 256, 256, 3], dtype=np.float32)
    test_image_np[0, :, :, :] = test_image
    test_image_tf = tf.convert_to_tensor(test_image_np)
    # laod model
    U_NET = UNet(input_=image_u, real_=image_r, is_training=False)
    gen_image = U_NET.u_net(inputs=test_image_tf, training=False)
    # load weight
    saver = tf.train.Saver(max_to_keep=1)
    ckpt = tf.train.get_checkpoint_state(ckpt_path)
    # generating
    begin_time = time.time()
    with tf.Session() as sess:
        saver.restore(sess=sess, save_path=ckpt.model_checkpoint_path)
        # gen_image = U_NET.u_net(inputs=test_image, training=False)
        gen = np.asarray(sess.run(gen_image), dtype=np.float32)
        print(gen.shape)
        print(gen[0])
    end_time = time.time()
    print("Time cost: %f" % (end_time - begin_time))
Ejemplo n.º 19
0
    def __init__(self, configs):
        self.batch_size = configs.get("batch_size", "16")
        self.epochs = configs.get("epochs", "100")
        self.lr = configs.get("lr", "0.0001")

        device_args = configs.get("device", "cuda")
        self.device = torch.device(
            "cpu" if not torch.cuda.is_available() else device_args)

        self.workers = configs.get("workers", "4")

        self.vis_images = configs.get("vis_images", "200")
        self.vis_freq = configs.get("vis_freq", "10")

        self.weights = configs.get("weights", "./weights")
        if not os.path.exists(self.weights):
            os.mkdir(self.weights)

        self.logs = configs.get("logs", "./logs")
        if not os.path.exists(self.weights):
            os.mkdir(self.weights)

        self.images_path = configs.get("images_path", "./data")

        self.is_resize = config.get("is_resize", False)
        self.image_short_side = config.get("image_short_side", 256)

        self.is_padding = config.get("is_padding", False)

        is_multi_gpu = config.get("DateParallel", False)

        pre_train = config.get("pre_train", False)
        model_path = config.get("model_path", './weights/unet_idcard_adam.pth')

        # self.image_size = configs.get("image_size", "256")
        # self.aug_scale = configs.get("aug_scale", "0.05")
        # self.aug_angle = configs.get("aug_angle", "15")

        self.step = 0

        self.dsc_loss = DiceLoss()
        self.model = UNet(in_channels=Dataset.in_channels,
                          out_channels=Dataset.out_channels)
        if pre_train:
            self.model.load_state_dict(torch.load(model_path,
                                                  map_location=self.device),
                                       strict=False)

        if is_multi_gpu:
            self.model = nn.DataParallel(self.model)

        self.model.to(self.device)

        self.best_validation_dsc = 0.0

        self.loader_train, self.loader_valid = self.data_loaders()

        self.params = [p for p in self.model.parameters() if p.requires_grad]

        self.optimizer = optim.Adam(self.params,
                                    lr=self.lr,
                                    weight_decay=0.0005)
        # self.optimizer = torch.optim.SGD(self.params, lr=self.lr, momentum=0.9, weight_decay=0.0005)
        self.scheduler = lr_scheduler.LR_Scheduler_Head(
            'poly', self.lr, self.epochs, len(self.loader_train))
Ejemplo n.º 20
0
class Train(object):
    def __init__(self, configs):
        self.batch_size = configs.get("batch_size", "16")
        self.epochs = configs.get("epochs", "100")
        self.lr = configs.get("lr", "0.0001")

        device_args = configs.get("device", "cuda")
        self.device = torch.device(
            "cpu" if not torch.cuda.is_available() else device_args)

        self.workers = configs.get("workers", "4")

        self.vis_images = configs.get("vis_images", "200")
        self.vis_freq = configs.get("vis_freq", "10")

        self.weights = configs.get("weights", "./weights")
        if not os.path.exists(self.weights):
            os.mkdir(self.weights)

        self.logs = configs.get("logs", "./logs")
        if not os.path.exists(self.weights):
            os.mkdir(self.weights)

        self.images_path = configs.get("images_path", "./data")

        self.is_resize = config.get("is_resize", False)
        self.image_short_side = config.get("image_short_side", 256)

        self.is_padding = config.get("is_padding", False)

        is_multi_gpu = config.get("DateParallel", False)

        pre_train = config.get("pre_train", False)
        model_path = config.get("model_path", './weights/unet_idcard_adam.pth')

        # self.image_size = configs.get("image_size", "256")
        # self.aug_scale = configs.get("aug_scale", "0.05")
        # self.aug_angle = configs.get("aug_angle", "15")

        self.step = 0

        self.dsc_loss = DiceLoss()
        self.model = UNet(in_channels=Dataset.in_channels,
                          out_channels=Dataset.out_channels)
        if pre_train:
            self.model.load_state_dict(torch.load(model_path,
                                                  map_location=self.device),
                                       strict=False)

        if is_multi_gpu:
            self.model = nn.DataParallel(self.model)

        self.model.to(self.device)

        self.best_validation_dsc = 0.0

        self.loader_train, self.loader_valid = self.data_loaders()

        self.params = [p for p in self.model.parameters() if p.requires_grad]

        self.optimizer = optim.Adam(self.params,
                                    lr=self.lr,
                                    weight_decay=0.0005)
        # self.optimizer = torch.optim.SGD(self.params, lr=self.lr, momentum=0.9, weight_decay=0.0005)
        self.scheduler = lr_scheduler.LR_Scheduler_Head(
            'poly', self.lr, self.epochs, len(self.loader_train))

    def datasets(self):
        train_datasets = Dataset(
            images_dir=self.images_path,
            # image_size=self.image_size,
            subset="train",  # train
            transform=get_transforms(train=True),
            is_resize=self.is_resize,
            image_short_side=self.image_short_side,
            is_padding=self.is_padding)
        # valid_datasets = train_datasets

        valid_datasets = Dataset(
            images_dir=self.images_path,
            # image_size=self.image_size,
            subset="validation",  # validation
            transform=get_transforms(train=False),
            is_resize=self.is_resize,
            image_short_side=self.image_short_side,
            is_padding=False)
        return train_datasets, valid_datasets

    def data_loaders(self):
        dataset_train, dataset_valid = self.datasets()

        loader_train = DataLoader(
            dataset_train,
            batch_size=self.batch_size,
            shuffle=True,
            drop_last=True,
            num_workers=self.workers,
        )
        loader_valid = DataLoader(
            dataset_valid,
            batch_size=1,
            drop_last=False,
            num_workers=self.workers,
        )

        return loader_train, loader_valid

    @staticmethod
    def dsc_per_volume(validation_pred, validation_true):
        assert len(validation_pred) == len(validation_true)
        dsc_list = []
        for p in range(len(validation_pred)):
            y_pred = np.array([validation_pred[p]])
            y_true = np.array([validation_true[p]])
            dsc_list.append(dsc(y_pred, y_true))
        return dsc_list

    @staticmethod
    def get_logger(filename, verbosity=1, name=None):
        level_dict = {0: logging.DEBUG, 1: logging.INFO, 2: logging.WARNING}
        formatter = logging.Formatter(
            "[%(asctime)s][%(filename)s][line:%(lineno)d][%(levelname)s] %(message)s"
        )
        logger = logging.getLogger(name)
        logger.setLevel(level_dict[verbosity])

        fh = logging.FileHandler(filename, "w")
        fh.setFormatter(formatter)
        logger.addHandler(fh)

        sh = logging.StreamHandler()
        sh.setFormatter(formatter)
        logger.addHandler(sh)

        return logger

    def train_one_epoch(self, epoch):

        self.model.train()
        loss_train = []
        for i, data in enumerate(self.loader_train):
            self.scheduler(self.optimizer, i, epoch, self.best_validation_dsc)
            x, y_true = data
            x, y_true = x.to(self.device), y_true.to(self.device)

            y_pred = self.model(x)
            # print('1111', y_pred.size())
            # print('2222', y_true.size())
            loss = self.dsc_loss(y_pred, y_true)

            loss_train.append(loss.item())

            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

            # lr_scheduler.step()
            if self.step % 200 == 0:
                print('Epoch:[{}/{}]\t iter:[{}]\t loss={:.5f}\t '.format(
                    epoch, self.epochs, i, loss))

            self.step += 1

    def eval_model(self, patience):
        self.model.eval()
        loss_valid = []

        validation_pred = []
        validation_true = []
        # early_stopping = EarlyStopping(patience=patience, verbose=True)

        for i, data in enumerate(self.loader_valid):
            x, y_true = data
            x, y_true = x.to(self.device), y_true.to(self.device)

            # print(x.size())
            # print(333,x[0][2])
            with torch.no_grad():
                y_pred = self.model(x)
                loss = self.dsc_loss(y_pred, y_true)

            # print(y_pred.shape)
            mask = y_pred > 0.5
            mask = mask * 255
            mask = mask.cpu().numpy()[0][0]
            # print(mask)
            # print(mask.shape())
            cv2.imwrite('result.png', mask)

            loss_valid.append(loss.item())

            y_pred_np = y_pred.detach().cpu().numpy()

            validation_pred.extend(
                [y_pred_np[s] for s in range(y_pred_np.shape[0])])
            y_true_np = y_true.detach().cpu().numpy()
            validation_true.extend(
                [y_true_np[s] for s in range(y_true_np.shape[0])])

        # early_stopping(loss_valid, self.model)
        # if early_stopping.early_stop:
        #     print('Early stopping')
        #     import sys
        #     sys.exit(1)
        mean_dsc = np.mean(
            self.dsc_per_volume(
                validation_pred,
                validation_true,
            ))
        # print('mean_dsc:', mean_dsc)
        if mean_dsc > self.best_validation_dsc:
            self.best_validation_dsc = mean_dsc
            torch.save(self.model.state_dict(),
                       os.path.join(self.weights, "unet_xia_adam.pth"))
            print("Best validation mean DSC: {:4f}".format(
                self.best_validation_dsc))

    def main(self):
        # print('train is begin.....')
        # print('load data end.....')

        # loaders = {"train": loader_train, "valid": loader_valid}

        for epoch in tqdm(range(self.epochs), total=self.epochs):
            self.train_one_epoch(epoch)
            self.eval_model(patience=10)

        torch.save(self.model.state_dict(),
                   os.path.join(self.weights, "unet_final.pth"))
Ejemplo n.º 21
0
	HE_in_norm = 5315.0
	out_norm = 15383.0

    LE_img = imgread(os.path.join(dir_path_LE, "LE_01.tif"))

    LE_512 = cropImage(LE_img, IMG_SHAPE[0],IMG_SHAPE[1])
    sample_le = {}
    for le_512 in LE_512:
        tiles = crop_prepare(le_512, CROP_STEP, IMG_SIZE)
        for n,img in enumerate(tiles):
            if n not in sample_le:
                sample_le[n] = []
            img = transform.resize(img,(IMG_SIZE*2, IMG_SIZE*2),preserve_range=True,order=3)
            sample_le[n].append(img)

	SNR_model = UNet(n_channels=15, n_classes=15)
	print("{} paramerters in total".format(sum(x.numel() for x in SNR_model.parameters())))
	SNR_model.cuda(cuda)
	SNR_model.load_state_dict(torch.load(SNR_model_path))
	# SNR_model.load_state_dict(torch.load(os.path.join(dir_path,"model","LE_HE_mito","LE_HE_0825.pkl")))
	SNR_model.eval()

	SIM_UNET = UNet(n_channels=15, n_classes=1)
	print("{} paramerters in total".format(sum(x.numel() for x in SIM_UNET.parameters())))
	SIM_UNET.cuda(cuda)
	SIM_UNET.load_state_dict(torch.load(SIM_UNET_model_path))
	# SIM_UNET.load_state_dict(torch.load(os.path.join(dir_path,"model","HE_HER_mito","HE_X2_HER_0825.pkl")))
	SIM_UNET.eval()

    SRRFDATASET = ReconsDataset(
    img_dict=sample_le,
Ejemplo n.º 22
0
def train(cont=False):

    # for tensorboard tracking
    logger = get_logger()
    logger.info("(1) Initiating Training ... ")
    logger.info("Training on device: {}".format(device))
    writer = SummaryWriter()

    # init model
    aux_layers = None
    if net == "SETR-PUP":
        aux_layers, model = get_SETR_PUP()
    elif net == "SETR-MLA":
        aux_layers, model = get_SETR_MLA()
    elif net == "TransUNet-Base":
        model = get_TransUNet_base()
    elif net == "TransUNet-Large":
        model = get_TransUNet_large()
    elif net == "UNet":
        model = UNet(CLASS_NUM)

    # prepare dataset
    cluster_model = get_clustering_model(logger)
    train_dataset = CityscapeDataset(img_dir=data_dir,
                                     img_dim=IMG_DIM,
                                     mode="train",
                                     cluster_model=cluster_model)
    valid_dataset = CityscapeDataset(img_dir=data_dir,
                                     img_dim=IMG_DIM,
                                     mode="val",
                                     cluster_model=cluster_model)
    train_loader = DataLoader(train_dataset,
                              batch_size=batch_size,
                              shuffle=True)
    valid_loader = DataLoader(valid_dataset,
                              batch_size=batch_size,
                              shuffle=False)

    logger.info("(2) Dataset Initiated. ")

    # optimizer
    epochs = epoch_num if epoch_num > 0 else iteration_num // len(
        train_loader) + 1
    optim = SGD(model.parameters(),
                lr=lrate,
                momentum=momentum,
                weight_decay=wdecay)
    # optim = Adam(model.parameters(), lr=lrate)
    scheduler = lr_scheduler.MultiStepLR(
        optim, milestones=[int(epochs * fine_tune_ratio)], gamma=0.1)

    cur_epoch = 0
    best_loss = float('inf')
    epochs_since_improvement = 0

    # for continue training
    if cont:
        model, optim, cur_epoch, best_loss = load_ckpt_continue_training(
            best_ckpt_src, model, optim, logger)
        logger.info("Current best loss: {0}".format(best_loss))
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            for i in range(cur_epoch):
                scheduler.step()
    else:
        model = nn.DataParallel(model)
        model = model.to(device)

    logger.info("(3) Model Initiated ... ")
    logger.info("Training model: {}".format(net) + ". Training Started.")

    # loss
    ce_loss = CrossEntropyLoss()
    if use_dice_loss:
        dice_loss = DiceLoss(CLASS_NUM)

    # loop over epochs
    iter_count = 0
    epoch_bar = tqdm.tqdm(total=epochs,
                          desc="Epoch",
                          position=cur_epoch,
                          leave=True)
    logger.info("Total epochs: {0}. Starting from epoch {1}.".format(
        epochs, cur_epoch + 1))

    for e in range(epochs - cur_epoch):
        epoch = e + cur_epoch

        # Training.
        model.train()
        trainLossMeter = LossMeter()
        train_batch_bar = tqdm.tqdm(total=len(train_loader),
                                    desc="TrainBatch",
                                    position=0,
                                    leave=True)

        for batch_num, (orig_img, mask_img) in enumerate(train_loader):
            orig_img, mask_img = orig_img.float().to(
                device), mask_img.float().to(device)

            if net == "TransUNet-Base" or net == "TransUNet-Large":
                pred = model(orig_img)
            elif net == "SETR-PUP" or net == "SETR-MLA":
                if aux_layers is not None:
                    pred, _ = model(orig_img)
                else:
                    pred = model(orig_img)
            elif net == "UNet":
                pred = model(orig_img)

            loss_ce = ce_loss(pred, mask_img[:].long())
            if use_dice_loss:
                loss_dice = dice_loss(pred, mask_img, softmax=True)
                loss = 0.5 * (loss_ce + loss_dice)
            else:
                loss = loss_ce

            # Backward Propagation, Update weight and metrics
            optim.zero_grad()
            loss.backward()
            optim.step()

            # update learning rate
            for param_group in optim.param_groups:
                orig_lr = param_group['lr']
                param_group['lr'] = orig_lr * (1.0 -
                                               iter_count / iteration_num)**0.9
            iter_count += 1

            # Update loss
            trainLossMeter.update(loss.item())

            # print status
            if (batch_num + 1) % print_freq == 0:
                status = 'Epoch: [{0}][{1}/{2}]\t' \
                    'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch+1, batch_num+1, len(train_loader), loss=trainLossMeter)
                logger.info(status)

            # log loss to tensorboard
            if (batch_num + 1) % tensorboard_freq == 0:
                writer.add_scalar(
                    'Train_Loss_{0}'.format(tensorboard_freq),
                    trainLossMeter.avg,
                    epoch * (len(train_loader) / tensorboard_freq) +
                    (batch_num + 1) / tensorboard_freq)
            train_batch_bar.update(1)

        writer.add_scalar('Train_Loss_epoch', trainLossMeter.avg, epoch)

        # Validation.
        model.eval()
        validLossMeter = LossMeter()
        valid_batch_bar = tqdm.tqdm(total=len(valid_loader),
                                    desc="ValidBatch",
                                    position=0,
                                    leave=True)
        with torch.no_grad():
            for batch_num, (orig_img, mask_img) in enumerate(valid_loader):
                orig_img, mask_img = orig_img.float().to(
                    device), mask_img.float().to(device)

                if net == "TransUNet-Base" or net == "TransUNet-Large":
                    pred = model(orig_img)
                elif net == "SETR-PUP" or net == "SETR-MLA":
                    if aux_layers is not None:
                        pred, _ = model(orig_img)
                    else:
                        pred = model(orig_img)
                elif net == "UNet":
                    pred = model(orig_img)

                loss_ce = ce_loss(pred, mask_img[:].long())
                if use_dice_loss:
                    loss_dice = dice_loss(pred, mask_img, softmax=True)
                    loss = 0.5 * (loss_ce + loss_dice)
                else:
                    loss = loss_ce

                # Update loss
                validLossMeter.update(loss.item())

            # print status
            if (batch_num + 1) % print_freq == 0:
                status = 'Validation: [{0}][{1}/{2}]\t' \
                    'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch+1, batch_num+1, len(valid_loader), loss=validLossMeter)
                logger.info(status)

            # log loss to tensorboard
            if (batch_num + 1) % tensorboard_freq == 0:
                writer.add_scalar(
                    'Valid_Loss_{0}'.format(tensorboard_freq),
                    validLossMeter.avg,
                    epoch * (len(valid_loader) / tensorboard_freq) +
                    (batch_num + 1) / tensorboard_freq)
            valid_batch_bar.update(1)

        valid_loss = validLossMeter.avg
        writer.add_scalar('Valid_Loss_epoch', valid_loss, epoch)
        logger.info("Validation Loss of epoch [{0}/{1}]: {2}\n".format(
            epoch + 1, epochs, valid_loss))

        # update optim scheduler
        scheduler.step()

        # save checkpoint
        is_best = valid_loss < best_loss
        best_loss_tmp = min(valid_loss, best_loss)
        if not is_best:
            epochs_since_improvement += 1
            logger.info("Epochs since last improvement: %d\n" %
                        (epochs_since_improvement, ))
            if epochs_since_improvement == early_stop_tolerance:
                break  # early stopping.
        else:
            epochs_since_improvement = 0
            state = {
                'epoch': epoch,
                'loss': best_loss_tmp,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optim.state_dict(),
            }
            torch.save(state, ckpt_src)
            logger.info("Checkpoint updated.")
            best_loss = best_loss_tmp
        epoch_bar.update(1)
    writer.close()
Ejemplo n.º 23
0
import torchvision.transforms as transforms
import torchvision
from torch.utils.data import DataLoader, Dataset
from torchvision.utils import make_grid, save_image
from PIL import Image
import os
import numpy as np
from PIL import Image
from unet_model import UNet
import random

input_dir = "../Test_Data/"
output_dir = "../Generated_Test/"
model_path = "models/model_gen_latest"

generator = UNet(n_channels=3, n_classes=2)
generator.load_state_dict(torch.load(model_path))
generator.eval()

for filename in random.sample(os.listdir(input_dir),
                              len(os.listdir(input_dir))):

    img = Image.open(os.path.join(input_dir, filename))
    # img = normalize(img)
    img = torch.stack([
        transforms.Compose(
            [transforms.Resize((75, 210)),
             transforms.ToTensor()])(img)
    ])

    output_img = generator(img)
Ejemplo n.º 24
0
 def __init__(self):
     super().__init__()
     self.convnet = UNet(in_channels=4, out_channels=12)
     self.upscaling = PixelShuffle(upscale_factor=2)
Ejemplo n.º 25
0
if __name__ == "__main__":
    trainset = datasetDSTL(dir_path,
                           inputPath,
                           channel='rgb',
                           res=(_IMAGE_SIZE_, _IMAGE_SIZE_))
    trainloader = DataLoader(trainset,
                             batch_size=4,
                             shuffle=True,
                             num_workers=4)

    classes = ('Buildings', 'MiscMan-made', 'Road', 'Track', 'Trees', 'Crops',
               'Waterway', 'Standing_Water', 'Vehicle_Large', 'Vehicle_Small')

    # Model definition
    model = UNet(n_classes=len(classes), in_channels=_NUM_CHANNELS_)
    if torch.cuda.device_count() >= 1:
        print("Training model on ", torch.cuda.device_count(), "GPUs!")
        model = nn.DataParallel(model)

    # Loss function and Optimizer definitions
    criterion = nn.BCELoss()
    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

    # Network training
    epoch_data = {}
    for epoch in range(_NUM_EPOCHS_):
        epoch_loss = 0.0
        epoch_data[epoch] = {}
        for i, data in enumerate(trainloader, 0):
            # Get the inputs for the network
Ejemplo n.º 26
0
                                maximum_intensity_4normalization_path="/home/star/0_code_lhj/DL-SIM-github/Training_codes/UNet/Max_intensity.npy",
                                transform = ToTensor(),
                                training_dataset = True,
                                in_size = 320,
                                train_in_size = input_size)
    train_dataloader = torch.utils.data.DataLoader(SRRFDATASET, batch_size=batch_size, shuffle=True, pin_memory=True) # better than for loop
    
    SRRFDATASET2 = ReconsDataset(all_data_path="/media/star/LuhongJin/UNC_data/SRRF/New_training_20190829/0NPY_Dataset/Dataset/Microtubule/",
                                maximum_intensity_4normalization_path="/home/star/0_code_lhj/DL-SIM-github/Training_codes/UNetMax_intensity.npy",
                                transform = ToTensor(),
                                training_dataset = False,
                                in_size = 320,
                                train_in_size = input_size)
    validation_dataloader = torch.utils.data.DataLoader(SRRFDATASET2, batch_size=batch_size, shuffle=True, pin_memory=True) # better than for loop

    model = UNet(n_channels=input_size, n_classes=output_size)

    print("{} paramerters in total".format(sum(x.numel() for x in model.parameters())))
    model.cuda(cuda)
    optimizer = torch.optim.Adam(model.parameters(),lr=learning_rate,  betas=(0.9, 0.999))

    loss_all = np.zeros((2000, 4))
    for epoch in range(2000):
        
        mae_m, mae_s = val_during_training(train_dataloader)
        loss_all[epoch,0] = mae_m
        loss_all[epoch,1] = mae_s
        mae_m, mae_s = val_during_training(validation_dataloader) 
        loss_all[epoch,2] = mae_m
        loss_all[epoch,3] = mae_s
        
Ejemplo n.º 27
0
    # underwater image
    image_u = tf.placeholder(dtype=tf.float32,
                             shape=[None, 256, 256, 3],
                             name='image_u')

    # correct image
    image_r = tf.placeholder(dtype=tf.float32,
                             shape=[None, 256, 256, 3],
                             name='image_r')

    training_flag = tf.placeholder(tf.bool)
    learning_rate = tf.placeholder(tf.float32, shape=[], name='learning_rate')
    lr_sum = tf.summary.scalar('lr', learning_rate)

    # generated color image by u-net
    U_NET = UNet(input_=image_u, real_=image_r, is_training=training_flag)
    gen_image = U_NET.u_net(inputs=image_u, training=training_flag)
    G_sum = tf.summary.image("gen_image", gen_image, max_outputs=10)

    # loss of u-net
    errG = U_NET.l1_loss(gt=image_r, gen=gen_image)
    # errG = U_NET.mse_loss(gt=image_r, gen=gen_image)
    # errG = U_NET.ssim_loss(gt=image_r, gen=gen_image)
    # errG = U_NET.msssim_loss(gt=image_r, gen=gen_image)
    # errG = U_NET.gdl_loss(gt=image_r, gen=gen_image)
    # errG = U_NET.l2_l1_loss(gt=image_r, gen=gen_image, alpha=0.8)
    # errG = U_NET.ssim_l1_loss(gt=image_r, gen=gen_image, alpha=0.8)
    # errG = U_NET.msssim_l1_loss(gt=image_r, gen=gen_image, alpha=0.8)
    # errG = U_NET.gdl_l1_loss(gt=image_r, gen=gen_image, alpha=0.8)

    errG_sum = tf.summary.scalar("loss", errG)
Ejemplo n.º 28
0
    transform = transforms.Compose([
        # transforms.Resize(image_size),
        # transforms.CenterCrop(image_size),
        transforms.ToTensor(),
        transforms.Normalize((0.5,), (0.5,)),
    ])

    img = transform(img)
    img = img.unsqueeze(0)


    def get_layer_param(model):
        return sum([torch.numel(param) for param in model.parameters()])


    net = UNet(1, 3).to(device)
    print(net)
    print('parameters:', get_layer_param(net))

    print("Loading checkpoint...")
    checkpoint = torch.load(ckpt_path)
    net.load_state_dict(checkpoint['net_state_dict'])
    net.eval()

    print("Starting Test...")
    # -----------------------------------------------------------
    # Initial batch
    data_A = img.to(device)
    # -----------------------------------------------------------
    # Generate fake img:
    fake_B = net(data_A)
Ejemplo n.º 29
0
    writer = SummaryWriter()

    image = Image.open('./ht2-c2.jpg')
    out = TF.to_tensor(image)
    out = out.reshape(1, 3, 640, 640)
    inp = torch.rand(1, 3, 640, 640)

    fig = plt.figure()
    plt.imshow(out[0].permute(1, 2, 0).numpy())
    # plt.show
    writer.add_figure("Ground Truth", fig)

    fig = plt.figure()
    plt.imshow(inp[0].permute(1, 2, 0).numpy())
    writer.add_figure("Input", fig)

    num_iter = 500
    writer.add_scalar("Number_of_Iterations", num_iter)

    model = UNet(3, 3)
    if torch.cuda.is_available():
        model.cuda()

    criterion = nn.MSELoss()

    learning_rate = 0.1
    writer.add_scalar("Learning_Rate", learning_rate)
    optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
    train(num_iter, inp, out, model, optimizer, criterion)
    writer.close()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

####
trainx_cropped = np.load('/home/data/IIRS/PS1-Project/src/trainX.npy')
####
trainy_hot = np.load('/home/data/IIRS/PS1-Project/src/trainYHot.npy')
####
validationx_cropped = np.load('/home/data/IIRS/PS1-Project/src/trainXVal.npy')
####
validationy_hot = np.load('/home/data/IIRS/PS1-Project/src/trainYValHot.npy')
####

print('--------------Loaded Data----------------')

# Import Unet model
model = UNet()

# Train the model
history = model.fit(trainx_cropped,
                    trainy_hot,
                    validation_data=(validationx_cropped, validationy_hot),
                    epochs=50,
                    batch_size=16,
                    verbose=1)

# Save the model
model.save("/home/data/IIRS/PS1-Project/src/trained_model.h5")

print('--------------Training Completed----------------')

# list all data in history