def Train(self, epochs): finfo = FireInfo() acc = 0 for epoch in range(epochs): for i, (images, labels) in enumerate(self.loader): images = images.to(self.device) labels = labels.to(self.device) self.optimizer.zero_grad() out = self.net(images) loss = self.criterion(out, labels) loss.backward() self.optimizer.step() cur_lr = AuxF.get_lr(self.optimizer) self.lr_scheduler.step() if i % 2 == 0: print("Epoch [{}/{}], Step [{}/{}] Loss: {:.4f} Lr: {:e}". format(epoch + 1, epochs, i + 1, len(self.loader), loss.item(), cur_lr)) finfo.update(loss.item(), acc, cur_lr) if i + 1 == len(self.loader): self.save_model(loss, epoch) out_v = out.detach().data _, predicted = torch.max(out_v, 1) total = labels.size(0) correct = (predicted == labels).sum().item() acc = 100 * correct / total print('Accuracy of the model on the test images: {} %'. format(acc)) finfo.save() finfo.display()
def Train(self, epochs): data_length = len(self.loader) for epoch in range(epochs): for i, (image, label) in enumerate(self.loader): image = image.to(self.device) label = label.to(self.device) self.optimizer.zero_grad() out = self.net(image) loss = self.criterion(out, label) loss.backward() self.optimizer.step() self.lr_scheduler.step() if i % 2 == 0: n_iter = epoch * data_length + i + 1 self.summary.add_scalar('unet/loss', loss.item(), n_iter) self.summary.add_scalar('unet/lr', AuxF.get_lr(self.optimizer), n_iter) print('loss', loss.item()) if i % 4 == 0: n_iter = epoch * data_length + 1 + i im = torchvision.utils.make_grid(image.detach().cpu(), normalize=True) self.summary.add_image('unet/image', im, n_iter) hot = torchvision.utils.make_grid(out.detach().cpu(), normalize=True) self.summary.add_image('unet/final', hot, n_iter) if epoch % 10 == 0: self.save_model(epoch)
def __init__(self, image_size=(3, 224, 224), classes=1, lr=0.01): self.gourd = os.path.join(AuxF.project_path(), 'gourd') self.boy = os.path.join(AuxF.project_path(), 'boy') self.num_class = classes self.image_size = image_size hedbsds = hedBSDS() self.loader = hedbsds.get_loader(1, self.image_size[1]) print('init dataset.md done...') self.device = AuxF.device() self.net = UNet(self.image_size[0], classes) self.net.to(self.device) self.lr = lr self.criterion = nn.BCEWithLogitsLoss() self.optimizer = torch.optim.SGD(self.net.parameters(), lr=lr, momentum=0.9) self.lr_scheduler = self.LRScheduler() self.summary = SummaryWriter(logdir=AuxF.log_name(self.boy, 'unet'))
def __init__(self, num_class=100, lr=0.1): self.gourd = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gourd') self.num_class = num_class self.imagenet = miniImagenet() self.loader = self.imagenet.get_loader(128, 112, "train") print("init data") self.device = AuxF.device() self.net = MobileNetV2(num_class) self.net.to(self.device) self.lr = lr self.criterion = nn.CrossEntropyLoss() self.optimizer = torch.optim.SGD(self.net.parameters(), lr=lr, momentum=0.9) self.lr_scheduler = self.LRScheduler() print("init net")
def Lmbda_cosine_annealing(self): from samhi.auxiliary import AuxFunction as AuxF return AuxF.CosineAnnealing(self.optim)
def Lmbda_warm_restart(self): from samhi.auxiliary import AuxFunction as AuxF return AuxF.WarmRestart(self.optim)
def LRScheduler(self): return AuxF.WarmRestart(self.optimizer, T_max=20 * len(self.loader), factor=0.75)