def train():
    device = torch.device(conf.cuda if torch.cuda.is_available() else "cpu")
    dataset = Training_Dataset(conf.data_path_train, conf.gaussian_noise_param,
                               conf.crop_img_size)
    dataset_length = len(dataset)
    train_loader = DataLoader(dataset,
                              batch_size=4,
                              shuffle=True,
                              num_workers=4)
    model = UNet(in_channels=conf.img_channel, out_channels=conf.img_channel)
    criterion = nn.MSELoss()
    model = model.to(device)
    optim = Adam(model.parameters(),
                 lr=conf.learning_rate,
                 betas=(0.9, 0.999),
                 eps=1e-8,
                 weight_decay=0,
                 amsgrad=True)
    scheduler = lr_scheduler.StepLR(optim, step_size=100, gamma=0.5)
    model.train()
    print(model)
    print("Starting Training Loop...")
    since = time.time()
    for epoch in range(conf.max_epoch):
        print('Epoch {}/{}'.format(epoch, conf.max_epoch - 1))
        print('-' * 10)
        running_loss = 0.0
        scheduler.step()
        for batch_idx, (source, target) in enumerate(train_loader):

            source = source.to(device)
            target = target.to(device)
            optim.zero_grad()

            denoised_source = model(source)
            loss = criterion(denoised_source, target)
            loss.backward()
            optim.step()

            running_loss += loss.item() * source.size(0)
            print('Current loss {} and current batch idx {}'.format(
                loss.item(), batch_idx))
        epoch_loss = running_loss / dataset_length
        print('{} Loss: {:.4f}'.format('current ' + str(epoch), epoch_loss))
        if (epoch + 1) % conf.save_per_epoch == 0:
            save_model(model, epoch + 1)
    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
示例#2
0
	out_norm = 15383.0

    LE_img = imgread(os.path.join(dir_path_LE, "LE_01.tif"))

    LE_512 = cropImage(LE_img, IMG_SHAPE[0],IMG_SHAPE[1])
    sample_le = {}
    for le_512 in LE_512:
        tiles = crop_prepare(le_512, CROP_STEP, IMG_SIZE)
        for n,img in enumerate(tiles):
            if n not in sample_le:
                sample_le[n] = []
            img = transform.resize(img,(IMG_SIZE*2, IMG_SIZE*2),preserve_range=True,order=3)
            sample_le[n].append(img)

	SNR_model = UNet(n_channels=15, n_classes=15)
	print("{} paramerters in total".format(sum(x.numel() for x in SNR_model.parameters())))
	SNR_model.cuda(cuda)
	SNR_model.load_state_dict(torch.load(SNR_model_path))
	# SNR_model.load_state_dict(torch.load(os.path.join(dir_path,"model","LE_HE_mito","LE_HE_0825.pkl")))
	SNR_model.eval()

	SIM_UNET = UNet(n_channels=15, n_classes=1)
	print("{} paramerters in total".format(sum(x.numel() for x in SIM_UNET.parameters())))
	SIM_UNET.cuda(cuda)
	SIM_UNET.load_state_dict(torch.load(SIM_UNET_model_path))
	# SIM_UNET.load_state_dict(torch.load(os.path.join(dir_path,"model","HE_HER_mito","HE_X2_HER_0825.pkl")))
	SIM_UNET.eval()

    SRRFDATASET = ReconsDataset(
    img_dict=sample_le,
    transform=ToTensor(),
示例#3
0
def train(cont=False):

    # for tensorboard tracking
    logger = get_logger()
    logger.info("(1) Initiating Training ... ")
    logger.info("Training on device: {}".format(device))
    writer = SummaryWriter()

    # init model
    aux_layers = None
    if net == "SETR-PUP":
        aux_layers, model = get_SETR_PUP()
    elif net == "SETR-MLA":
        aux_layers, model = get_SETR_MLA()
    elif net == "TransUNet-Base":
        model = get_TransUNet_base()
    elif net == "TransUNet-Large":
        model = get_TransUNet_large()
    elif net == "UNet":
        model = UNet(CLASS_NUM)

    # prepare dataset
    cluster_model = get_clustering_model(logger)
    train_dataset = CityscapeDataset(img_dir=data_dir,
                                     img_dim=IMG_DIM,
                                     mode="train",
                                     cluster_model=cluster_model)
    valid_dataset = CityscapeDataset(img_dir=data_dir,
                                     img_dim=IMG_DIM,
                                     mode="val",
                                     cluster_model=cluster_model)
    train_loader = DataLoader(train_dataset,
                              batch_size=batch_size,
                              shuffle=True)
    valid_loader = DataLoader(valid_dataset,
                              batch_size=batch_size,
                              shuffle=False)

    logger.info("(2) Dataset Initiated. ")

    # optimizer
    epochs = epoch_num if epoch_num > 0 else iteration_num // len(
        train_loader) + 1
    optim = SGD(model.parameters(),
                lr=lrate,
                momentum=momentum,
                weight_decay=wdecay)
    # optim = Adam(model.parameters(), lr=lrate)
    scheduler = lr_scheduler.MultiStepLR(
        optim, milestones=[int(epochs * fine_tune_ratio)], gamma=0.1)

    cur_epoch = 0
    best_loss = float('inf')
    epochs_since_improvement = 0

    # for continue training
    if cont:
        model, optim, cur_epoch, best_loss = load_ckpt_continue_training(
            best_ckpt_src, model, optim, logger)
        logger.info("Current best loss: {0}".format(best_loss))
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            for i in range(cur_epoch):
                scheduler.step()
    else:
        model = nn.DataParallel(model)
        model = model.to(device)

    logger.info("(3) Model Initiated ... ")
    logger.info("Training model: {}".format(net) + ". Training Started.")

    # loss
    ce_loss = CrossEntropyLoss()
    if use_dice_loss:
        dice_loss = DiceLoss(CLASS_NUM)

    # loop over epochs
    iter_count = 0
    epoch_bar = tqdm.tqdm(total=epochs,
                          desc="Epoch",
                          position=cur_epoch,
                          leave=True)
    logger.info("Total epochs: {0}. Starting from epoch {1}.".format(
        epochs, cur_epoch + 1))

    for e in range(epochs - cur_epoch):
        epoch = e + cur_epoch

        # Training.
        model.train()
        trainLossMeter = LossMeter()
        train_batch_bar = tqdm.tqdm(total=len(train_loader),
                                    desc="TrainBatch",
                                    position=0,
                                    leave=True)

        for batch_num, (orig_img, mask_img) in enumerate(train_loader):
            orig_img, mask_img = orig_img.float().to(
                device), mask_img.float().to(device)

            if net == "TransUNet-Base" or net == "TransUNet-Large":
                pred = model(orig_img)
            elif net == "SETR-PUP" or net == "SETR-MLA":
                if aux_layers is not None:
                    pred, _ = model(orig_img)
                else:
                    pred = model(orig_img)
            elif net == "UNet":
                pred = model(orig_img)

            loss_ce = ce_loss(pred, mask_img[:].long())
            if use_dice_loss:
                loss_dice = dice_loss(pred, mask_img, softmax=True)
                loss = 0.5 * (loss_ce + loss_dice)
            else:
                loss = loss_ce

            # Backward Propagation, Update weight and metrics
            optim.zero_grad()
            loss.backward()
            optim.step()

            # update learning rate
            for param_group in optim.param_groups:
                orig_lr = param_group['lr']
                param_group['lr'] = orig_lr * (1.0 -
                                               iter_count / iteration_num)**0.9
            iter_count += 1

            # Update loss
            trainLossMeter.update(loss.item())

            # print status
            if (batch_num + 1) % print_freq == 0:
                status = 'Epoch: [{0}][{1}/{2}]\t' \
                    'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch+1, batch_num+1, len(train_loader), loss=trainLossMeter)
                logger.info(status)

            # log loss to tensorboard
            if (batch_num + 1) % tensorboard_freq == 0:
                writer.add_scalar(
                    'Train_Loss_{0}'.format(tensorboard_freq),
                    trainLossMeter.avg,
                    epoch * (len(train_loader) / tensorboard_freq) +
                    (batch_num + 1) / tensorboard_freq)
            train_batch_bar.update(1)

        writer.add_scalar('Train_Loss_epoch', trainLossMeter.avg, epoch)

        # Validation.
        model.eval()
        validLossMeter = LossMeter()
        valid_batch_bar = tqdm.tqdm(total=len(valid_loader),
                                    desc="ValidBatch",
                                    position=0,
                                    leave=True)
        with torch.no_grad():
            for batch_num, (orig_img, mask_img) in enumerate(valid_loader):
                orig_img, mask_img = orig_img.float().to(
                    device), mask_img.float().to(device)

                if net == "TransUNet-Base" or net == "TransUNet-Large":
                    pred = model(orig_img)
                elif net == "SETR-PUP" or net == "SETR-MLA":
                    if aux_layers is not None:
                        pred, _ = model(orig_img)
                    else:
                        pred = model(orig_img)
                elif net == "UNet":
                    pred = model(orig_img)

                loss_ce = ce_loss(pred, mask_img[:].long())
                if use_dice_loss:
                    loss_dice = dice_loss(pred, mask_img, softmax=True)
                    loss = 0.5 * (loss_ce + loss_dice)
                else:
                    loss = loss_ce

                # Update loss
                validLossMeter.update(loss.item())

            # print status
            if (batch_num + 1) % print_freq == 0:
                status = 'Validation: [{0}][{1}/{2}]\t' \
                    'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch+1, batch_num+1, len(valid_loader), loss=validLossMeter)
                logger.info(status)

            # log loss to tensorboard
            if (batch_num + 1) % tensorboard_freq == 0:
                writer.add_scalar(
                    'Valid_Loss_{0}'.format(tensorboard_freq),
                    validLossMeter.avg,
                    epoch * (len(valid_loader) / tensorboard_freq) +
                    (batch_num + 1) / tensorboard_freq)
            valid_batch_bar.update(1)

        valid_loss = validLossMeter.avg
        writer.add_scalar('Valid_Loss_epoch', valid_loss, epoch)
        logger.info("Validation Loss of epoch [{0}/{1}]: {2}\n".format(
            epoch + 1, epochs, valid_loss))

        # update optim scheduler
        scheduler.step()

        # save checkpoint
        is_best = valid_loss < best_loss
        best_loss_tmp = min(valid_loss, best_loss)
        if not is_best:
            epochs_since_improvement += 1
            logger.info("Epochs since last improvement: %d\n" %
                        (epochs_since_improvement, ))
            if epochs_since_improvement == early_stop_tolerance:
                break  # early stopping.
        else:
            epochs_since_improvement = 0
            state = {
                'epoch': epoch,
                'loss': best_loss_tmp,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optim.state_dict(),
            }
            torch.save(state, ckpt_src)
            logger.info("Checkpoint updated.")
            best_loss = best_loss_tmp
        epoch_bar.update(1)
    writer.close()
示例#4
0
                             batch_size=4,
                             shuffle=True,
                             num_workers=4)

    classes = ('Buildings', 'MiscMan-made', 'Road', 'Track', 'Trees', 'Crops',
               'Waterway', 'Standing_Water', 'Vehicle_Large', 'Vehicle_Small')

    # Model definition
    model = UNet(n_classes=len(classes), in_channels=_NUM_CHANNELS_)
    if torch.cuda.device_count() >= 1:
        print("Training model on ", torch.cuda.device_count(), "GPUs!")
        model = nn.DataParallel(model)

    # Loss function and Optimizer definitions
    criterion = nn.BCELoss()
    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

    # Network training
    epoch_data = {}
    for epoch in range(_NUM_EPOCHS_):
        epoch_loss = 0.0
        epoch_data[epoch] = {}
        for i, data in enumerate(trainloader, 0):
            # Get the inputs for the network
            inputs = data['image'].to(_COMPUTE_DEVICE_)
            labels = data['masks'].to(_COMPUTE_DEVICE_)

            optimizer.zero_grad()  # zero the parameter gradients

            # Forward pass + Backward pass + Optimisation
            outputs = model(inputs)
示例#5
0
                                training_dataset = True,
                                in_size = 320,
                                train_in_size = input_size)
    train_dataloader = torch.utils.data.DataLoader(SRRFDATASET, batch_size=batch_size, shuffle=True, pin_memory=True) # better than for loop
    
    SRRFDATASET2 = ReconsDataset(all_data_path="/media/star/LuhongJin/UNC_data/SRRF/New_training_20190829/0NPY_Dataset/Dataset/Microtubule/",
                                maximum_intensity_4normalization_path="/home/star/0_code_lhj/DL-SIM-github/Training_codes/UNetMax_intensity.npy",
                                transform = ToTensor(),
                                training_dataset = False,
                                in_size = 320,
                                train_in_size = input_size)
    validation_dataloader = torch.utils.data.DataLoader(SRRFDATASET2, batch_size=batch_size, shuffle=True, pin_memory=True) # better than for loop

    model = UNet(n_channels=input_size, n_classes=output_size)

    print("{} paramerters in total".format(sum(x.numel() for x in model.parameters())))
    model.cuda(cuda)
    optimizer = torch.optim.Adam(model.parameters(),lr=learning_rate,  betas=(0.9, 0.999))

    loss_all = np.zeros((2000, 4))
    for epoch in range(2000):
        
        mae_m, mae_s = val_during_training(train_dataloader)
        loss_all[epoch,0] = mae_m
        loss_all[epoch,1] = mae_s
        mae_m, mae_s = val_during_training(validation_dataloader) 
        loss_all[epoch,2] = mae_m
        loss_all[epoch,3] = mae_s
        
        file = Workbook(encoding = 'utf-8')
        table = file.add_sheet('loss_all')
#num_train = math.ceil(train_ratio*len(image_dataset))
#num_val = len(image_dataset) - num_train
#train_dataset, val_dataset = torch.utils.data.random_split(image_dataset, [num_train, num_val])

# Set up tensor board
writer = SummaryWriter(tfboard_dir)

# Define a loss function and optimizer
weights = torch.ones(num_class)
#Give a larger weight to SRLM for SRLM
#weights[-1] = 5
weights = weights.to(device)
#ignore_index?
#criterion = nn.CrossEntropyLoss(weights)
criterion = l.GeneralizedDiceLoss(num_classes=num_class, weight=weights)
optimizer = optim.Adam(net.parameters(), lr=c.learning_rate)
scheduler = optim.lr_scheduler.StepLR(optimizer=optimizer,
                                      step_size=c.step_size,
                                      gamma=0.1)

## Save and genearte patches for datasets
tp_dir = dir_names.patch_dir + "/training_data"
vp_dir = dir_names.patch_dir + "/validation_data"

#c.force_create(tp_dir)
#c.force_create(vp_dir)
#if os.path.exists(dir_names.train_patch_csv):
#    os.remove(dir_names.train_patch_csv)
#if os.path.exists(dir_names.val_patch_csv):
#    os.remove(dir_names.val_patch_csv)
#
示例#7
0
    train_loader = torch.utils.data.DataLoader(X_train, batch_size=batch_size, shuffle=True, pin_memory=False) # better than for loop  
    val_loader = torch.utils.data.DataLoader(y_train, batch_size=batch_size, shuffle=False, pin_memory=False) # better than for loop
    X_train,y_train,X_test,y_test = None,None,None,None

else: 
    train_set = TrainDatasetFromFolder('data/DIV2K_train_HR', crop_size=CROP_SIZE, upscale_factor=UPSCALE_FACTOR)
    val_set = ValDatasetFromFolder('data/DIV2K_valid_HR', crop_size=CROP_SIZE, upscale_factor=UPSCALE_FACTOR)
    train_loader = DataLoader(dataset=train_set, num_workers=4, batch_size=64, shuffle=True)
    val_loader = DataLoader(dataset=val_set, num_workers=4, batch_size=1, shuffle=False)
    #x,y = next(iter(val_loader)),next(iter(train_loader))
    #print(x[0].shape,x[1].shape,x[2].shape,y[0].shape)
#netG = Generator(UPSCALE_FACTOR,in_channels,out_channels) 
netG = UNet(n_channels=in_channels, n_classes=out_channels)
#print(summary(netG,(in_channels,128,128)))
print('# generator parameters:', sum(param.numel() for param in netG.parameters()))
netD = Discriminator(out_channels)
print('# discriminator parameters:', sum(param.numel() for param in netD.parameters()))
#print(summary(netD,(out_channels,256,256)))
generator_criterion = GeneratorLoss()
#print(summary(generator_criterion,(3,256,256)))
if torch.cuda.is_available():
    netG.cuda()
    netD.cuda()
    generator_criterion.cuda()

optimizerG = optim.Adam(netG.parameters())
optimizerD = optim.Adam(netD.parameters())

results = {'d_loss': [], 'g_loss': [], 'd_score': [], 'g_score': [], 'psnr': [], 'ssim': []}
示例#8
0
    batch_size = 1

    SRRFDATASET = ReconsDataset(
        test_in_path=
        "/home/star/0_code_lhj/DL-SIM-github/TESTING_DATA/microtuble/HE_X2/",
        transform=ToTensor(),
        img_type='tif',
        in_size=256)
    test_dataloader = torch.utils.data.DataLoader(
        SRRFDATASET, batch_size=batch_size, shuffle=True,
        pin_memory=True)  # better than for loop

    model = UNet(n_channels=3, n_classes=1)

    print("{} paramerters in total".format(
        sum(x.numel() for x in model.parameters())))
    model.cuda(cuda)
    model.load_state_dict(
        torch.load(
            "/home/star/0_code_lhj/DL-SIM-github/MODELS/UNet_SIM3_microtubule.pkl"
        ))
    model.eval()

    for batch_idx, items in enumerate(test_dataloader):

        image = items['image_in']
        image_name = items['image_name']
        print(image_name[0])
        model.train()

        image = np.swapaxes(image, 1, 3)
示例#9
0
n_classes = 2

# instance_model = ReSeg(n_classes=n_classes, use_instance_seg=True, pretrained=False, usegpu=True).to(args.device)
segmenter_model = UNet(n_channels=1, n_classes=2).to(args.device)
instance_model = UNet(n_channels=1, n_classes=6).to(args.device)

loss_binary = torch.nn.BCEWithLogitsLoss()
discriminative_loss = DiscriminativeLoss(delta_var=delta_var,
                                         delta_dist=delta_dist,
                                         norm=2,
                                         usegpu=True)
cross_entropy_fn = torch.nn.CrossEntropyLoss()

optimizer = torch.optim.Adam(
    list(instance_model.parameters()) + list(segmenter_model.parameters()),
    0.001)


def stack_iterator(n_portions, block_size, stacks=[]):
    for y in range(n_portions):
        for x in range(n_portions):
            y1 = y * block_size
            x1 = x * block_size
            y2 = (y + 1) * block_size
            x2 = (x + 1) * block_size

            crops = [s[:, :, y1:y2, x1:x2] for s in stacks]

            yield crops, (x1, y1, x2, y2)
示例#10
0
class PG(object):
    def __init__(self, configs, env):
        self.configs = configs
        self.env = env
        self.action_size = (64, 1024)

        # n_channels=3 for RGB images
        # n_classes is the number of probabilities you want to get per pixel
        #   - For 1 class and background, use n_classes=1
        #   - For 2 classes, use n_classes=1
        #   - For N > 2 classes, use n_classes=N

        # TODO now I assume input<->output size are equal, which might not be true, so we need some modifications onto Unet if necesary

        self.actor = UNet(
            n_channels=3, n_classes=1, bilinear=True
        )  # [B,C, H_in=372, W_in=1242] -> [B, C, H_out=64, W_out=1024]
        self.optimizer = Adam(self.actor.parameters(), lr=configs['lr'])
        self.actor.to(device)

    def get_action(self, state, deterministic=False):
        """Given the state, produces an action, the probability of the action, the log probability of the action, and
        the argmax action"""
        action_probabilities = self.actor(
            state)  # output size should be [B*H*W]
        action_probabilities = torch.sigmoid(
            action_probabilities)  # make sure the probs are in range [0,1]

        # B, _, _, _ = action_probabilities.shape
        action_probabilities = action_probabilities[:, :, :self.
                                                    action_size[0], :self.
                                                    action_size[1]]
        action_probabilities = torch.squeeze(action_probabilities, 1)
        # assert action_probabilities.size()[1, 2] == self.action_size, "Actor output the wrong size"
        # action_probabilities_flat = action_probabilities.contiguous().view(B, -1)
        # TODO leave this to future process; seems it will get the index
        max_probability_action = torch.argmax(action_probabilities, dim=-1)

        if deterministic:
            # using deteministic policy during test time
            action = action_probabilities(action_probabilities > 0.5).cpu()
        else:
            # using stochastic policy during traning time
            action_distribution = Bernoulli(
                action_probabilities
            )  # this creates a distribution to sample from
            action = action_distribution.sample().cpu(
            )  # sample the discrete action and copy it to cpu

        # Have to deal with situation of 0.0 probabilities because we can't do log 0
        z = action_probabilities == 0.0
        z = z.float() * 1e-8
        log_action_probabilities = torch.log(action_probabilities + z)

        return action, action_probabilities, log_action_probabilities, max_probability_action

    def compute_loss(self, obs, act, rew):
        """make loss function whose gradient, for the right data, is policy gradient"""
        # TODO we may do not need to calculate it for the second time.
        act_baseline, _, logp, _ = self.get_action(obs, deterministic=True)

        # advantage
        _, rew_baseline, _, _ = self.env.step(act_baseline, obs=obs)
        advantage = rew.to(device).float() - rew_baseline.to(device).float()

        loss = logp * Variable(advantage).expand_as(act)
        loss = loss.mean()
        return loss

    def update(self, batch_obs, batch_acts, batch_rews):
        """take a single policy gradient update step for a batch"""
        self.optimizer.zero_grad()
        batch_loss = self.compute_loss(
            obs=torch.as_tensor(batch_obs, dtype=torch.float32),
            act=torch.as_tensor(batch_acts, dtype=torch.int32),
            rew=torch.as_tensor(batch_rews, dtype=torch.int32),
        )
        batch_loss.backward()
        self.optimizer.step()
        return batch_loss
示例#11
0
        if convert_to_2d:
            inp_set = get_2d_converted_data(inp_set)
        inp_set = torch.from_numpy(inp_set).float()
        file_name = os.path.basename(inp_file)
        out_file = os.path.join(out_dir, file_name)
        data.append((inp_set, out_file))
    return data

def save_pred(model, data):
    model.eval()
    for image, file_path in data:
        img = image.cuda(cuda)
        pred = model(img)        
        pred = pred.detach().cpu().numpy()[0]
        pred = (pred * 255) .astype(np.uint8)
        # save_path = file_path.replace('.mat', '.png')
        # cv2.imwrite(save_path, pred)
        pred = pred.transpose((1, 2, 0))
        savemat(file_path, {'crop_g': pred})

if __name__ == '__main__':
    cuda = torch.device('cuda')
    model = UNet(n_channels=45, n_classes=3)
    print("{} Parameters in total".format(sum(x.numel() for x in model.parameters())))
    model.cuda(cuda)
    model.load_state_dict(torch.load(model_loc+"Model_Final_999_3_5.pkl"))
    model.eval()
    model.cuda(cuda)
    data = get_images()
    save_pred(model, data)
示例#12
0
class UNetObjPrior(nn.Module):
    """ 
    Wrapper around UNet that takes object priors (gaussians) and images 
    as input.
    """
    def __init__(self, params, depth=5):
        super(UNetObjPrior, self).__init__()
        self.in_channels = 4
        self.model = UNet(1, self.in_channels, depth, cuda=params['cuda'])
        self.params = params
        self.device = torch.device('cuda' if params['cuda'] else 'cpu')

    def forward(self, im, obj_prior):
        x = torch.cat((im, obj_prior), dim=1)
        return self.model(x)

    def train(self, dataloader_train, dataloader_val):

        since = time.time()
        best_loss = float("inf")

        dataloader_train.mode = 'train'
        dataloader_val.mode = 'val'
        dataloaders = {'train': dataloader_train, 'val': dataloader_val}

        optimizer = optim.SGD(self.model.parameters(),
                              momentum=self.params['momentum'],
                              lr=self.params['lr'],
                              weight_decay=self.params['weight_decay'])

        train_logger = LossLogger('train', self.params['batch_size'],
                                  len(dataloader_train),
                                  self.params['out_dir'])

        val_logger = LossLogger('val', self.params['batch_size'],
                                len(dataloader_val), self.params['out_dir'])

        loggers = {'train': train_logger, 'val': val_logger}

        # self.criterion = WeightedMSE(dataloader_train.get_classes_weights(),
        #                              cuda=self.params['cuda'])
        self.criterion = nn.MSELoss()

        for epoch in range(self.params['num_epochs']):
            print('Epoch {}/{}'.format(epoch, self.params['num_epochs'] - 1))
            print('-' * 10)

            # Each epoch has a training and validation phase
            for phase in ['train', 'val']:
                if phase == 'train':
                    #scheduler.step()
                    self.model.train()
                else:
                    self.model.eval()  # Set model to evaluate mode

                running_loss = 0.0
                running_corrects = 0

                # Iterate over data.
                samp = 1
                for i, data in enumerate(dataloaders[phase]):
                    # zero the parameter gradients
                    optimizer.zero_grad()

                    # forward
                    # track history if only in train
                    with torch.set_grad_enabled(phase == 'train'):
                        out = self.forward(data.image, data.obj_prior)
                        loss = self.criterion(out, data.truth)

                        # backward + optimize only if in training phase
                        if phase == 'train':
                            loss.backward()
                            optimizer.step()

                    loggers[phase].update(epoch, samp, loss.item())

                    samp += 1

                loggers[phase].print_epoch(epoch)

                # Generate train prediction for check
                if phase == 'train':
                    path = os.path.join(self.params['out_dir'], 'previews',
                                        'epoch_{:04d}.jpg'.format(epoch))
                    data = dataloaders['val'].sample_uniform()
                    pred = self.forward(data.image, data.obj_prior)
                    im_ = data.image[0]
                    truth_ = data.truth[0]
                    pred_ = pred[0, ...]
                    utls.save_tensors(im_, pred_, truth_, path)

                if phase == 'val' and (loggers['val'].get_loss(epoch) <
                                       best_loss):
                    best_loss = loggers['val'].get_loss(epoch)

                loggers[phase].save('log_{}.csv'.format(phase))

                # save checkpoint
                if phase == 'val':
                    is_best = loggers['val'].get_loss(epoch) <= best_loss
                    path = os.path.join(self.params['out_dir'],
                                        'checkpoint.pth.tar')
                    utls.save_checkpoint(
                        {
                            'epoch': epoch + 1,
                            'state_dict': self.model.state_dict(),
                            'best_loss': best_loss,
                            'optimizer': optimizer.state_dict()
                        },
                        is_best,
                        path=path)

    def load_checkpoint(self, path, device='gpu'):

        if (device != 'gpu'):
            checkpoint = torch.load(path,
                                    map_location=lambda storage, loc: storage)
        else:
            checkpoint = torch.load(path)

        self.model.load_state_dict(checkpoint['state_dict'])
示例#13
0
    LE_512 = cropImage(LE_img, IMG_SHAPE[0], IMG_SHAPE[1])
    sample_le = {}
    for le_512 in LE_512:
        tiles = crop_prepare(le_512, CROP_STEP, IMG_SIZE)
        for n, img in enumerate(tiles):
            if n not in sample_le:
                sample_le[n] = []
            img = transform.resize(img, (IMG_SIZE * 2, IMG_SIZE * 2),
                                   preserve_range=True,
                                   order=3)
            sample_le[n].append(img)

    SC_UNET = UNet(n_channels=15, n_classes=1)
    print("{} paramerters in total".format(
        sum(x.numel() for x in SC_UNET.parameters())))
    SC_UNET.cuda(cuda)
    SC_UNET.load_state_dict(torch.load(model_path))
    # SC_UNET.load_state_dict(torch.load(os.path.join(dir_path,"model","HE_HER_mito","HE_X2_HER_0825.pkl")))
    SC_UNET.eval()

    SRRFDATASET = ReconsDataset(img_dict=sample_he,
                                transform=ToTensor(),
                                in_norm=LE_in_norm,
                                img_type=".tif",
                                in_size=256)
    test_dataloader = torch.utils.data.DataLoader(
        SRRFDATASET, batch_size=1, shuffle=False,
        pin_memory=True)  # better than for loop
    result = np.zeros((256, 256, len(SRRFDATASET)))
    for batch_idx, items in enumerate(test_dataloader):
示例#14
0
class Train(object):
    def __init__(self, configs):
        self.batch_size = configs.get("batch_size", "16")
        self.epochs = configs.get("epochs", "100")
        self.lr = configs.get("lr", "0.0001")

        device_args = configs.get("device", "cuda")
        self.device = torch.device(
            "cpu" if not torch.cuda.is_available() else device_args)

        self.workers = configs.get("workers", "4")

        self.vis_images = configs.get("vis_images", "200")
        self.vis_freq = configs.get("vis_freq", "10")

        self.weights = configs.get("weights", "./weights")
        if not os.path.exists(self.weights):
            os.mkdir(self.weights)

        self.logs = configs.get("logs", "./logs")
        if not os.path.exists(self.weights):
            os.mkdir(self.weights)

        self.images_path = configs.get("images_path", "./data")

        self.is_resize = config.get("is_resize", False)
        self.image_short_side = config.get("image_short_side", 256)

        self.is_padding = config.get("is_padding", False)

        is_multi_gpu = config.get("DateParallel", False)

        pre_train = config.get("pre_train", False)
        model_path = config.get("model_path", './weights/unet_idcard_adam.pth')

        # self.image_size = configs.get("image_size", "256")
        # self.aug_scale = configs.get("aug_scale", "0.05")
        # self.aug_angle = configs.get("aug_angle", "15")

        self.step = 0

        self.dsc_loss = DiceLoss()
        self.model = UNet(in_channels=Dataset.in_channels,
                          out_channels=Dataset.out_channels)
        if pre_train:
            self.model.load_state_dict(torch.load(model_path,
                                                  map_location=self.device),
                                       strict=False)

        if is_multi_gpu:
            self.model = nn.DataParallel(self.model)

        self.model.to(self.device)

        self.best_validation_dsc = 0.0

        self.loader_train, self.loader_valid = self.data_loaders()

        self.params = [p for p in self.model.parameters() if p.requires_grad]

        self.optimizer = optim.Adam(self.params,
                                    lr=self.lr,
                                    weight_decay=0.0005)
        # self.optimizer = torch.optim.SGD(self.params, lr=self.lr, momentum=0.9, weight_decay=0.0005)
        self.scheduler = lr_scheduler.LR_Scheduler_Head(
            'poly', self.lr, self.epochs, len(self.loader_train))

    def datasets(self):
        train_datasets = Dataset(
            images_dir=self.images_path,
            # image_size=self.image_size,
            subset="train",  # train
            transform=get_transforms(train=True),
            is_resize=self.is_resize,
            image_short_side=self.image_short_side,
            is_padding=self.is_padding)
        # valid_datasets = train_datasets

        valid_datasets = Dataset(
            images_dir=self.images_path,
            # image_size=self.image_size,
            subset="validation",  # validation
            transform=get_transforms(train=False),
            is_resize=self.is_resize,
            image_short_side=self.image_short_side,
            is_padding=False)
        return train_datasets, valid_datasets

    def data_loaders(self):
        dataset_train, dataset_valid = self.datasets()

        loader_train = DataLoader(
            dataset_train,
            batch_size=self.batch_size,
            shuffle=True,
            drop_last=True,
            num_workers=self.workers,
        )
        loader_valid = DataLoader(
            dataset_valid,
            batch_size=1,
            drop_last=False,
            num_workers=self.workers,
        )

        return loader_train, loader_valid

    @staticmethod
    def dsc_per_volume(validation_pred, validation_true):
        assert len(validation_pred) == len(validation_true)
        dsc_list = []
        for p in range(len(validation_pred)):
            y_pred = np.array([validation_pred[p]])
            y_true = np.array([validation_true[p]])
            dsc_list.append(dsc(y_pred, y_true))
        return dsc_list

    @staticmethod
    def get_logger(filename, verbosity=1, name=None):
        level_dict = {0: logging.DEBUG, 1: logging.INFO, 2: logging.WARNING}
        formatter = logging.Formatter(
            "[%(asctime)s][%(filename)s][line:%(lineno)d][%(levelname)s] %(message)s"
        )
        logger = logging.getLogger(name)
        logger.setLevel(level_dict[verbosity])

        fh = logging.FileHandler(filename, "w")
        fh.setFormatter(formatter)
        logger.addHandler(fh)

        sh = logging.StreamHandler()
        sh.setFormatter(formatter)
        logger.addHandler(sh)

        return logger

    def train_one_epoch(self, epoch):

        self.model.train()
        loss_train = []
        for i, data in enumerate(self.loader_train):
            self.scheduler(self.optimizer, i, epoch, self.best_validation_dsc)
            x, y_true = data
            x, y_true = x.to(self.device), y_true.to(self.device)

            y_pred = self.model(x)
            # print('1111', y_pred.size())
            # print('2222', y_true.size())
            loss = self.dsc_loss(y_pred, y_true)

            loss_train.append(loss.item())

            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

            # lr_scheduler.step()
            if self.step % 200 == 0:
                print('Epoch:[{}/{}]\t iter:[{}]\t loss={:.5f}\t '.format(
                    epoch, self.epochs, i, loss))

            self.step += 1

    def eval_model(self, patience):
        self.model.eval()
        loss_valid = []

        validation_pred = []
        validation_true = []
        # early_stopping = EarlyStopping(patience=patience, verbose=True)

        for i, data in enumerate(self.loader_valid):
            x, y_true = data
            x, y_true = x.to(self.device), y_true.to(self.device)

            # print(x.size())
            # print(333,x[0][2])
            with torch.no_grad():
                y_pred = self.model(x)
                loss = self.dsc_loss(y_pred, y_true)

            # print(y_pred.shape)
            mask = y_pred > 0.5
            mask = mask * 255
            mask = mask.cpu().numpy()[0][0]
            # print(mask)
            # print(mask.shape())
            cv2.imwrite('result.png', mask)

            loss_valid.append(loss.item())

            y_pred_np = y_pred.detach().cpu().numpy()

            validation_pred.extend(
                [y_pred_np[s] for s in range(y_pred_np.shape[0])])
            y_true_np = y_true.detach().cpu().numpy()
            validation_true.extend(
                [y_true_np[s] for s in range(y_true_np.shape[0])])

        # early_stopping(loss_valid, self.model)
        # if early_stopping.early_stop:
        #     print('Early stopping')
        #     import sys
        #     sys.exit(1)
        mean_dsc = np.mean(
            self.dsc_per_volume(
                validation_pred,
                validation_true,
            ))
        # print('mean_dsc:', mean_dsc)
        if mean_dsc > self.best_validation_dsc:
            self.best_validation_dsc = mean_dsc
            torch.save(self.model.state_dict(),
                       os.path.join(self.weights, "unet_xia_adam.pth"))
            print("Best validation mean DSC: {:4f}".format(
                self.best_validation_dsc))

    def main(self):
        # print('train is begin.....')
        # print('load data end.....')

        # loaders = {"train": loader_train, "valid": loader_valid}

        for epoch in tqdm(range(self.epochs), total=self.epochs):
            self.train_one_epoch(epoch)
            self.eval_model(patience=10)

        torch.save(self.model.state_dict(),
                   os.path.join(self.weights, "unet_final.pth"))
    SRRFDATASET2 = ReconsDataset(
        train_in_path=
        "/media/star/LuhongJin/UNC_data/SIM/ALL_data/microtubule/Training_Testing/testing_HE_X2/",
        train_gt_path=
        "/media/star/LuhongJin/UNC_data/SIM/ALL_data/microtubule/Training_Testing/testing_HER/",
        transform=ToTensor(),
        img_type='tif',
        in_size=256)
    validation_dataloader = torch.utils.data.DataLoader(
        SRRFDATASET2, batch_size=batch_size, shuffle=True,
        pin_memory=True)  # better than for loop

    model = UNet(n_channels=15, n_classes=1)

    print("{} paramerters in total".format(
        sum(x.numel() for x in model.parameters())))
    model.cuda(cuda)
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=learning_rate,
                                 betas=(0.9, 0.999))

    loss_all = np.zeros((2000, 4))
    for epoch in range(2000):

        mae_m, mae_s = val_during_training(train_dataloader)
        loss_all[epoch, 0] = mae_m
        loss_all[epoch, 1] = mae_s
        mae_m, mae_s = val_during_training(validation_dataloader)
        loss_all[epoch, 2] = mae_m
        loss_all[epoch, 3] = mae_s
示例#16
0
#train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, pin_memory=False) # better than for loop
#val_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=False, pin_memory=False) # better than for loop

#torch.autograd.set_detect_anomaly(True)
if True:  # __name__ == '__main__':
    #opt = parser.parse_args()

    #train_set = TrainDatasetFromFolder('data/DIV2K_train_HR', crop_size=CROP_SIZE, upscale_factor=UPSCALE_FACTOR)
    #val_set = ValDatasetFromFolder('data/DIV2K_valid_HR', upscale_factor=UPSCALE_FACTOR)
    #train_loader = DataLoader(dataset=train_set, num_workers=4, batch_size=64, shuffle=True)
    #val_loader = DataLoader(dataset=val_set, num_workers=4, batch_size=1, shuffle=False)

    netG = UNet(n_channels=15, n_classes=1)
    #print(summary(netG,(15,128,128)))
    print('# generator parameters:',
          sum(param.numel() for param in netG.parameters()))
    netD = Discriminator()
    print('# discriminator parameters:',
          sum(param.numel() for param in netD.parameters()))
    #print(summary(netD,(1,256,256)))

    generator_criterion = GeneratorLoss()

    if torch.cuda.is_available():
        netG.cuda()
        netD.cuda()
        generator_criterion.cuda()

    optimizerG = optim.Adam(netG.parameters())
    optimizerD = optim.Adam(netD.parameters())
示例#17
0
            nn.init.constant_(m.bias.data, 0)

    net = UNet(3, 1).to(device)

    # Apply the weights_init function to randomly initialize all weights
    net.apply(weights_init)

    print(net)
    print('parameters:', get_layer_param(net))

    # Initialize BCELoss function
    criterion = nn.MSELoss()

    # Setup Adam optimizers for both G and D
    # optimizer = optim.Adam(net.parameters(), lr=lr)
    optimizer = optim.Adam(net.parameters(), lr=lr, betas=(0.5, 0.999))

    # optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)

    # define a method to save loss image
    def save_loss_image(list_loss, path):
        plt.figure(figsize=(10, 5))
        plt.title("Generator and Discriminator Loss During Training")
        plt.plot(list_loss, label="loss")
        plt.xlabel("iterations")
        plt.ylabel("Loss")
        plt.legend()
        plt.savefig(os.path.join(path, 'loss.jpg'))
        plt.close()

    list_loss = []
示例#18
0
	
# Load model if available
if(resume==True):
	print('Resuming training....')
	generator.load_state_dict(torch.load(os.path.join(model_path,'model_gen_latest')))
	discriminator_g.load_state_dict(torch.load(os.path.join(model_path,'model_gdis_latest')))
	discriminator_l.load_state_dict(torch.load(os.path.join(model_path,'model_ldis_latest')))

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
generator = generator.to(device)
discriminator_g = discriminator_g.to(device)
discriminator_l = discriminator_l.to(device)

optimizer_g = optim.Adam(discriminator_g.parameters(), lr=0.00005)
optimizer_l = optim.Adam(discriminator_l.parameters(), lr=0.00005)
gen_optimizer = optim.Adam(generator.parameters(), lr=0.0002)

lossdis = nn.BCELoss()
lossgen = FocalLoss()
lamda = 75

data_loader = load_images(data_path)
num_epochs = 2000


for epoch in range(num_epochs):
	print()
	for n_batch, (real_data, gt_data) in enumerate(data_loader):
		
		# 1. Train Discriminator
		N = real_data.size(0)
示例#19
0
    return model


if __name__ == '__main__':
    lr = 0.001
    model = UNet(n_channels=1)

    #num_ftrs = model.fc.in_features
    # Here the size of each output sample is set to 2
    # Alternatively it can be generalized to nn.Linear(num_ftrs, len(class_names))
    #model.fc = nn.Linear(num_ftrs, 2)

    model = model.to(device)

    criterion = nn.CrossEntropyLoss()

    # Observe that all parameters are being optimized
    optimizer_ft = optim.Adam(model.parameters(), lr=lr)  #, momentum=0.9)

    # Decay LR by a factor of 0.1 every 7 epochs
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft,
                                           step_size=7,
                                           gamma=0.1)

    # Finetune training the convnet and evaluation
    model = train_model(model,
                        criterion,
                        optimizer_ft,
                        exp_lr_scheduler,
                        num_epochs=25)
示例#20
0
    writer = SummaryWriter()

    image = Image.open('./ht2-c2.jpg')
    out = TF.to_tensor(image)
    out = out.reshape(1, 3, 640, 640)
    inp = torch.rand(1, 3, 640, 640)

    fig = plt.figure()
    plt.imshow(out[0].permute(1, 2, 0).numpy())
    # plt.show
    writer.add_figure("Ground Truth", fig)

    fig = plt.figure()
    plt.imshow(inp[0].permute(1, 2, 0).numpy())
    writer.add_figure("Input", fig)

    num_iter = 500
    writer.add_scalar("Number_of_Iterations", num_iter)

    model = UNet(3, 3)
    if torch.cuda.is_available():
        model.cuda()

    criterion = nn.MSELoss()

    learning_rate = 0.1
    writer.add_scalar("Learning_Rate", learning_rate)
    optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
    train(num_iter, inp, out, model, optimizer, criterion)
    writer.close()
示例#21
0
                            num_workers=1,
                            pin_memory=True)

    # logging training overview
    print('-----\n Start training:')
    print(
        f'epochs: {args.epochs} \t batch size: {args.batch_size} \t learning rate: {args.learning_rate} \t'
    )
    print(
        f'training size: {n_train} \t validation size: {n_val} \t checkpoints_dir: {args.checkpoints_dir} \t images downscale: {args.down_scale}'
    )
    print('-----')

    ## --- Set up training
    global_step = 0
    optimizer = optim.RMSprop(net.parameters(),
                              lr=args.learning_rate,
                              weight_decay=1e-8)
    if net.n_classes > 1:
        criterion = nn.CrossEntropyLoss()
    else:
        criterion = nn.BCEWithLogitsLoss()

    ## --- Start training
    epoch_loss_list = []
    val_score_list = []
    num_batches_per_epoch = len(dataset) // args.batch_size
    for epoch in range(args.epochs):
        net.train()

        epoch_loss = 0