コード例 #1
0
    def train(self):

        start_time = time.time()
        for epoch in range(self.n_epochs):

            self.build_dataloader(epoch)

            # loss change
            if epoch < self.loss_change:
                criterion = WeightedDiceLoss(self.n_classes, self.class_weights).to(self.device)
            else:
                criterion = LovaszLoss().to(self.device)

            for step, (images_, labels_, _, _) in enumerate(self.train_loader):
                # Data shape: (N, C, D, H, W)
                images, labels = images_.to(self.device), labels_.to(self.device)

                preds = self.net(images)
                # images.shape (N, 1, D, H, W)
                # labels.shape (N, 1, D, H, W)
                # preds.shape (N, 4, D, H, W)

                preds = nn.Softmax(dim=1)(preds) # 이걸 잊었다!!!!!!!
                preds_flat = preds.permute(0, 2, 3, 4, 1).contiguous().view(-1, self.n_classes)
                labels_flat = labels.squeeze(1).view(-1).long()
                # preds_flat.shape (N*D*H*W, 4)
                # labels_flat.shape (N*D*H*W, 1)

                del images_, labels_, images, labels, preds
                torch.cuda.empty_cache()

                self.net.zero_grad()
                loss = criterion(preds_flat, labels_flat)
                loss.backward()
                self.optimizer.step()

                step_end_time = time.time()
                print('[%d/%d][%d/%d] - time_passed: %.2f, Loss: %.2f'
                      % (epoch, self.n_epochs, step, self.num_steps, step_end_time - start_time, loss))

                del preds_flat, labels_flat
                torch.cuda.empty_cache()

            # validation
            val_dice = self.validate()
            if val_dice > self.best_val_dice:
                self.best_val_dice = val_dice
                torch.save(self.net.state_dict(), os.path.join(self.checkpoint_dir, '%s.ckpt' % self.data_type))
                print('Val Dice Improved! Saved checkpoint: %s.ckpt' % self.data_type)

            # if epoch >= self.loss_change:
            self.scheduler.step()
            print('Learning rate: %f' % self.optimizer.param_groups[0]['lr'])
コード例 #2
0
    def _init_model(self):
        self.model = ShellNet(in_channels=len(self.config['data']['all_mods']), 
                              init_n_kernels=self.config['search']['init_n_kernels'], 
                              out_channels=len(self.config['data']['labels']), 
                              depth=self.config['search']['depth'], 
                              n_nodes=self.config['search']['n_nodes'],
                              normal_w_share=self.config['search']['normal_w_share'], 
                              channel_change=self.config['search']['channel_change']).to(self.device)
        print('Param size = {:.3f} MB'.format(calc_param_size(self.model)))
        self.loss = WeightedDiceLoss().to(self.device)

        self.optim_shell = Adam(self.model.alphas()) # lr=3e-4
        self.optim_kernel = Adam(self.model.kernel.parameters())
        self.shell_scheduler = ReduceLROnPlateau(self.optim_shell,verbose=True,factor=0.5)
        self.kernel_scheduler = ReduceLROnPlateau(self.optim_kernel,verbose=True,factor=0.5)
コード例 #3
0
ファイル: train.py プロジェクト: woodywff/nas_3d_unet
    def _init_model(self):
        geno_file = self.config['search']['geno_file']
        with open(geno_file, 'rb') as f:
            gene = eval(pickle.load(f)[0])
        self.model = SearchedNet(
            in_channels=len(self.config['data']['all_mods']),
            init_n_kernels=self.config['search']['init_n_kernels'],
            out_channels=len(self.config['data']['labels']),
            depth=self.config['search']['depth'],
            n_nodes=self.config['search']['n_nodes'],
            channel_change=self.config['search']['channel_change'],
            gene=gene).to(self.device)
        print('Param size = {:.3f} MB'.format(calc_param_size(self.model)))
        self.loss = WeightedDiceLoss().to(self.device)

        self.optim = Adam(self.model.parameters())
        self.scheduler = ReduceLROnPlateau(self.optim,
                                           verbose=True,
                                           factor=0.5)
コード例 #4
0
def train():
    # settings
    learning_rate = 1e-4
    total_epoches = 1000
    batchsize = 3
    device = "cuda"
    modelsavepath = "./checkpoint/"

    # data transform
    input_transform = Compose([
        ToFloatTensor(),
    ])
    target_transform = Compose([
        ToFloatTensor(),
    ])
    # dataloader
    trainloader = data.DataLoader(BladderOneHot(
        imageroot="../Data/KingData/Image/",
        labelroot="../Data/KingData/Label/",
        img_transform=input_transform,
        label_transform=target_transform),
                                  batch_size=batchsize,
                                  shuffle=True)
    #valloader = data.DataLoader(BladderOneHot(imageroot="../../GithubData/Image/",labelroot="../../GithubData/Label/",img_transform=input_transform,label_transform=target_transform),batch_size=batchsize,shuffle=True)

    # model
    G = UNet(in_channel=1, n_classes=3).to(device)

    # optimizer
    optimizer_G = Adam(G.parameters(),
                       lr=learning_rate,
                       betas=(0.5, 0.9),
                       eps=10e-8)

    # train
    best_val_loss = np.inf
    for epoch in range(total_epoches):
        G.train()
        batch_num = 0
        ave_train_loss = 0.0
        for real_imgs, real_labels in trainloader:
            real_imgs = real_imgs.to(device)
            real_labels = real_labels.to(device)

            G.zero_grad()
            optimizer_G.zero_grad()

            pred_labels = G(real_imgs)
            seg_loss = 0.5 * WeightedDiceLoss(
                pred_labels, real_labels) + 0.5 * WeightedCrossEntropy(
                    pred_labels, real_labels)
            seg_loss.backward()

            # calculate matric
            batch_num += 1
            ave_train_loss += seg_loss.cpu().detach().numpy()
            optimizer_G.step()
        ave_train_loss /= batch_num

        # eval
        G.eval()
        if epoch % 10 == 0:
            modelname = "G_epoch" + str(epoch) + ".pth"
            Gname = os.path.join(modelsavepath, modelname)
            torch.save(G.state_dict(), Gname)
        present_time = time.strftime("%Y-%m-%d %H:%M:%S",
                                     time.localtime(time.time()))
        print("epoch[%d/%d]  TrainDiceLoss :%.8f ; Time: %s" %
              (epoch, total_epoches, ave_train_loss, present_time))