コード例 #1
0
def test_img(net_g, datatest, logwriter,args, step):
    net_g.eval()
    # testing
    test_loss = 0
    correct = 0
    data_loader = datatest
    loss_func = additional_losses.CombinedLoss()
    for batch_idx, (images, labels, w) in enumerate(data_loader):
        images, labels, w = images.type(torch.FloatTensor), labels.type(torch.LongTensor), w.type(torch.FloatTensor)
        if args.gpu != -1:
            images, labels, w = images.cuda(), labels.cuda(), w.cuda()
        log_probs = net_g(images)
        
        if (batch_idx%200 ==0):
            log_images(images, labels, log_probs,logwriter,step, batch_idx)
        # sum up batch loss
        loss = loss_func(log_probs, labels, w).item()
        test_loss += loss
        
        #logwriter.add_scalar('Test Loss', loss, batch_idx)

        # get the index of the max log-probability
        
    test_loss /= len(data_loader.dataset)
    logwriter.add_scalar('Test Loss', test_loss, step)
コード例 #2
0
ファイル: solver_sgd.py プロジェクト: ai-med/AbdomenNet
    def __init__(self,
                 model,
                 exp_name,
                 device,
                 num_class,
                 optim=torch.optim.SGD,
                 optim_args={},
                 loss_func=additional_losses.CombinedLoss(),
                 model_name='quicknat',
                 labels=None,
                 num_epochs=10,
                 log_nth=5,
                 lr_scheduler_step_size=5,
                 lr_scheduler_gamma=0.5,
                 use_last_checkpoint=True,
                 exp_dir='experiments',
                 log_dir='logs',
                 arch_file_path=None):

        self.device = device
        self.model = model
        # self.swa_model = torch.optim.swa_utils.AveragedModel(self.model)
        self.model_name = model_name
        self.labels = labels
        self.num_epochs = num_epochs
        if torch.cuda.is_available():
            self.loss_func = loss_func.cuda(device)
        else:
            self.loss_func = loss_func
        self.optim = optim(model.parameters(), **optim_args)
        # self.scheduler = lr_scheduler.StepLR(self.optim, step_size=lr_scheduler_step_size,
        #                                      gamma=lr_scheduler_gamma)
        self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optim, T_max=100)
        # self.swa_start = -1 #int(np.round(self.num_epochs*0.75))
        # print(self.swa_start)
        # self.swa_scheduler = torch.optim.swa_utils.SWALR(self.optim, swa_lr=0.05)

        exp_dir_path = os.path.join(exp_dir, exp_name)
        common_utils.create_if_not(exp_dir_path)
        common_utils.create_if_not(os.path.join(exp_dir_path, CHECKPOINT_DIR))
        self.exp_dir_path = exp_dir_path

        self.save_architectural_files(arch_file_path)
        
        self.log_nth = log_nth
        self.logWriter = LogWriter(num_class, log_dir, exp_name, use_last_checkpoint, labels)
        # self.wandb = wandb

        self.use_last_checkpoint = use_last_checkpoint

        self.start_epoch = 1
        self.start_iteration = 1

        self.best_ds_mean = 0
        self.best_ds_mean_epoch = 0

        if use_last_checkpoint:
            self.load_checkpoint()

        print(self.best_ds_mean, self.best_ds_mean_epoch, self.start_epoch)
コード例 #3
0
ファイル: solver.py プロジェクト: brennanaba/quickNAT_pytorch
    def __init__(self,
                 model,
                 exp_name,
                 device,
                 num_class,
                 optim=torch.optim.Adam,
                 optim_args={},
                 loss_func=additional_losses.CombinedLoss(),
                 model_name='quicknat',
                 labels=None,
                 num_epochs=10,
                 log_nth=5,
                 lr_scheduler_step_size=5,
                 lr_scheduler_gamma=0.5,
                 use_last_checkpoint=True,
                 exp_dir='experiments',
                 log_dir='logs'):

        self.device = device
        self.model = model

        self.model_name = model_name
        self.labels = labels
        self.num_epochs = num_epochs
        if torch.cuda.is_available():
            self.loss_func = loss_func.cuda(device)
        else:
            self.loss_func = loss_func
        self.optim = optim(model.parameters(), **optim_args)
        self.scheduler = lr_scheduler.StepLR(self.optim,
                                             step_size=lr_scheduler_step_size,
                                             gamma=lr_scheduler_gamma)

        exp_dir_path = os.path.join(exp_dir, exp_name)
        common_utils.create_if_not(exp_dir_path)
        common_utils.create_if_not(os.path.join(exp_dir_path, CHECKPOINT_DIR))
        self.exp_dir_path = exp_dir_path

        self.log_nth = log_nth
        self.logWriter = LogWriter(num_class, log_dir, exp_name,
                                   use_last_checkpoint, labels)

        self.use_last_checkpoint = use_last_checkpoint

        self.start_epoch = 1
        self.start_iteration = 1

        self.best_ds_mean = 0
        self.best_ds_mean_epoch = 0

        if use_last_checkpoint:
            self.load_checkpoint()
コード例 #4
0
 def test_img(self, net_g, dataLoader, logwriter, args, user, step):
     net_g.eval()
     # testing
     test_loss = 0
     data_loader = dataLoader
     loss_func = additional_losses.CombinedLoss()
     for batch_idx, (images, labels, w) in enumerate(data_loader):
         images, labels, w = images.type(torch.FloatTensor), labels.type(
             torch.LongTensor), w.type(torch.FloatTensor)
         if args.gpu != -1:
             images, labels, w = images.cuda(), labels.cuda(), w.cuda()
         log_probs = net_g(images)
         #if (batch_idx%500 ==0 and (step - self.epoch)%3 == 0):
         #log_images(images, labels, log_probs,logwriter, step)
         # sum up batch loss
         loss = loss_func(log_probs, labels, w).item()
         test_loss += loss
     test_loss /= len(data_loader.dataset)
     logwriter.add_scalar('Test_Loss_User {:3d}'.format(user), test_loss,
                          step)
コード例 #5
0
 def __init__(self,
              args,
              dataset=None,
              idxs=None,
              logwriter=None,
              user_id=None,
              testLoader=None,
              epoch=None):
     self.args = args
     self.logwriter = logwriter
     self.user_id = user_id
     self.test_loader = testLoader
     self.epoch = epoch
     #weights = [1.80434783, 0.34583333, 1.25757576, 0.84693878, 1.06410256, 1.09210526, 0.25, 3.45833333, 0.32170543, 0.94318182]
     #class_weights = torch.FloatTensor(weights).cuda()
     self.loss_func = additional_losses.CombinedLoss()
     self.selected_clients = []
     #self.ldr_train = DataLoader(DatasetSplit(dataset, idxs), batch_size=self.args.local_bs, shuffle=True)
     self.ldr_train = DataLoader(dataset,
                                 batch_size=self.args.local_bs,
                                 shuffle=True)