Exemplo n.º 1
0
    def solve(self,iterations):
        for i in tqdm(range(iterations),desc='fitting pde'):
            X_samples = self.sample_domain(self.bs)
            X_samples += torch.zeros_like(X_samples,requires_grad=True)
            dX_samples = self.sample_boundary(self.bs)
            dX_samples += torch.zeros_like(dX_samples,requires_grad=True)
            iX_samples = self.sample_inner_boundary(self.bs)
            iX_samples += torch.zeros_like(iX_samples,requires_grad=True)
            self.optimizer.zero_grad()
            loss = self.action(X_samples,dX_samples,iX_samples)
            loss.backward()
            self.optimizer.step()
            with self.logger as do_log:
                if do_log: self.log(loss,i)

# @export
# class Poisson(NeuralPDE):
#     def sample_domain(self,N):
#         return torch.rand(N,2)
#     def sample_boundary(self,N):
#         side = torch.randint(low=0,high=4,size=(N,))
#         walls = torch.stack([torch.rand(N)
#     def action(self,X,B):
#         phi = self.model(X)
#         dphi = grad(phi,X,create_graph=True)
#         phi_B = self.model(B)
#         lagrangian = (dphi**2).sum(-1)/2 + phi*self.rho(X)
#         boundary = (phi_B**2)
#         return lagrangian.mean()+boundary.mean()

#%% print("hello")
# %%
#print("hello")
# %%
Exemplo n.º 2
0
 def solve(self,iterations):
     for i in tqdm(range(iterations),desc='fitting pde'):
         X_samples = self.sample_domain(self.bs)
         X_samples += torch.zeros_like(X_samples,requires_grad=True)
         dX_samples = self.sample_boundary(self.bs)
         dX_samples += torch.zeros_like(dX_samples,requires_grad=True)
         self.optimizer.zero_grad()
         loss = self.action(X_samples,dX_samples)
         loss.backward()
         self.optimizer.step()
         with self.logger as do_log:
             if do_log: self.log(loss,i)
Exemplo n.º 3
0
def main(args):
    model = SmallE2(channel_in=1, n_classes=10, rot_n=10)

    dataset = datasets.RotMNIST("~/datasets/", train=True)
    trainloader = DataLoader(dataset, batch_size=args.batch_size)

    # optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
    #                                 weight_decay=args.wd)
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.wd)
    use_cuda = torch.cuda.is_available()
    if use_cuda:
        model = model.cuda()
        print("Using Cuda")

    ## save init model ##
    fname = "/e2_init.pt"
    torch.save(model.state_dict(), args.dir + fname)

    criterion = torch.nn.CrossEntropyLoss()

    for epoch in tqdm(range(
            args.epochs)):  # loop over the dataset multiple times

        epoch_loss = 0
        batches = 0
        for i, data in enumerate(trainloader, 0):
            # get the inputs; data is a list of [inputs, labels]
            inputs, labels = data

            if use_cuda:
                inputs, labels = inputs.cuda(), labels.cuda()

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            # print(inputs.shape)
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.detach().item()
            batches += 1
            print(loss.item())
        print("Epoch = ", epoch)
        print("\n")

    fname = "/e2_epoch" + str(epoch + 1) + ".pt"
    torch.save(model.state_dict(), args.dir + fname)
Exemplo n.º 4
0
 def train(self, num_epochs=100):
     """ The main training loop"""
     start_epoch = self.epoch
     steps_per_epoch = len(self.dataloaders['train'])
     step = 0
     for self.epoch in tqdm(range(start_epoch, start_epoch + num_epochs),
                            desc='train'):
         for i, minibatch in enumerate(self.dataloaders['train']):
             step = i + self.epoch * steps_per_epoch
             self.step(self.epoch + i / steps_per_epoch, minibatch)
             with self.logger as do_log:
                 if do_log: self.logStuff(step, minibatch)
     self.epoch += 1
     self.logStuff(step)
Exemplo n.º 5
0
 def train(self, num_epochs=100):
     """ The main training loop"""
     start_epoch = self.epoch
     steps_per_epoch = len(self.dataloaders['train'])
     step = 0
     for self.epoch in tqdm(range(start_epoch + 1,
                                  start_epoch + num_epochs + 1),
                            desc='train'):
         for i, minibatch in enumerate(self.dataloaders['train']):
             step = i + (self.epoch - 1) * steps_per_epoch
             with self.logger as do_log:
                 if do_log: self.logStuff(step, minibatch)
             self.step(minibatch, step)
             [
                 sched.step(step / steps_per_epoch)
                 for sched in self.lr_schedulers
             ]
     self.logStuff(step)
Exemplo n.º 6
0
def main(args):
    net = smallnet(in_channels=1, num_targets=10)
    augerino = models.UniformAug()
    model = models.AugAveragedModel(net, augerino, ncopies=args.ncopies)

    start_widths = torch.ones(6) * -5.
    start_widths[2] = 1.
    model.aug.set_width(start_widths)

    softplus = torch.nn.Softplus()

    dataset = datasets.RotMNIST("~/datasets/", train=True)
    trainset, valset = Subset(dataset,
                              range(50000)), Subset(dataset,
                                                    range(50000, 60000))
    trainloader = DataLoader(trainset, batch_size=args.batch_size)
    valloader = DataLoader(valset, batch_size=args.batch_size)

    optimizer = torch.optim.Adam([{
        'name': 'model',
        'params': model.model.parameters(),
        "weight_decay": args.wd
    }, {
        'name': 'aug',
        'params': model.aug.parameters(),
        "weight_decay": 0.
    }],
                                 lr=args.lr)
    use_cuda = torch.cuda.is_available()
    if use_cuda:
        model = model.cuda()
        print("Using Cuda")

    ## save init model ##
    fname = "/model" + str(args.aug_reg) + "_init.pt"
    torch.save(model.state_dict(), args.dir + fname)

    criterion = losses.safe_unif_aug_loss
    logger = []
    for epoch in range(args.epochs):  # loop over the dataset multiple times

        epoch_loss = 0
        batches = 0
        trainbar = tqdm(trainloader, desc=f"Training epoch {epoch}")
        for i, data in enumerate(trainbar, 0):
            # get the inputs; data is a list of [inputs, labels]
            inputs, labels = data

            if use_cuda:
                inputs, labels = inputs.cuda(), labels.cuda()

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            # print(inputs.shape)
            outputs = model(inputs)
            loss = criterion(outputs, labels, model, reg=args.aug_reg)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.detach().item()
            batches += 1
            # print(epoch, loss.item(), softplus(model.aug.width).detach().data)
            log = model.aug.width.tolist()
            log += model.aug.width.grad.data.tolist()
            log += [loss.item()]
            logger.append(log)
            if not i % 10:
                train_acc = (
                    outputs.argmax(-1) == labels).float().mean().cpu().item()
                trainbar.set_postfix(train_acc=train_acc)
        with torch.no_grad():
            model.eval()
            val_accs = []
            for i, data in enumerate(valloader):
                inputs, labels = data
                if use_cuda:
                    inputs, labels = inputs.cuda(), labels.cuda()
                outputs = model(inputs)
                acc = (
                    outputs.argmax(-1) == labels).float().mean().cpu().item()
                val_accs.append(acc)
            print(f"Epoch {epoch} val accuracy: {np.mean(val_accs)}")
            model.train()

    fname = "/model" + str(args.aug_reg) + ".pt"
    torch.save(model.state_dict(), args.dir + fname)
    df = pd.DataFrame(logger)
    df.to_pickle(args.dir + "/auglog_" + str(args.aug_reg) + ".pkl")