Пример #1
0
def train(net, train_set, NUM_EPOCHS):
    print("Started training")
    net = Autoencoder()
    criterion = nn.MSELoss()
    train_loss = []
    optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE)
    if torch.cuda.is_available():
        net = net.cuda()
        criterion = criterion.cuda()

    for epoch in range(NUM_EPOCHS):
        running_loss = 0.0
        for i, data in enumerate(train_set, 0):
            img = data.get("image")
            img = img.cuda()
            img = img.view(img.size(0), -1)
            optimizer.zero_grad()
            outputs = net(img)
            loss = criterion(outputs, img)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
        loss = running_loss / len(train_set)
        train_loss.append(loss)
        train_loss.append(loss)
        print('Epoch {} of {}, Train Loss: {:.3f}'.format(
            epoch + 1, NUM_EPOCHS, loss))

        #if epoch % 5 == 0:
        #save_decoded_image(outputs.cpu().data, epoch)
    print("Finished training")
    return train_loss
Пример #2
0
import matplotlib.pyplot as plt
import torch.nn.functional as F

from torchvision import datasets
from torch.utils.data import DataLoader
from torchvision.utils import save_image

from autoencoder import Autoencoder
from VRUDataset import VRUDataset

NUM_EPOCHS = 3
LEARNING_RATE = 1e-3
BATCH_SIZE = 32
net = Autoencoder()
if torch.cuda.is_available():
    net = net.cuda()
transform = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize((0.5, ), (0.5, ))])
train_set = VRUDataset(
    transform=transform,
    json_path=
    "C:\\Users\\shubh\\OneDrive\\Desktop\\ENTR 390\\Code\\project-pineapple\\train.json",
    data_path="C:\\Users\\shubh\\OneDrive\\Desktop\\ENTR 390\\Dataset\\Train")
val_set = VRUDataset(
    transform=transform,
    json_path=
    "C:\\Users\\shubh\\OneDrive\\Desktop\\ENTR 390\\Code\\project-pineapple\\val.json",
    data_path="C:\\Users\\shubh\\OneDrive\\Desktop\\ENTR 390\\Dataset\\Val")
train_set = DataLoader(train_set, shuffle=True, batch_size=BATCH_SIZE)
val_set = DataLoader(val_set, shuffle=True, batch_size=BATCH_SIZE)
Пример #3
0
class Net(BaseNet):
    def __init__(self,
                 name,
                 input_dim,
                 output_dim,
                 n_hid,
                 n_bottleneck=None,
                 lr=1e-4,
                 weight_decay=0,
                 cuda=True):
        super(Net, self).__init__()
        cprint('c', '\nNet:')
        self.name = name
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.n_bottleneck = n_bottleneck
        self.n_hid = n_hid
        self.cuda = cuda
        self.create_net()
        self.create_opt(lr, weight_decay)

    def create_net(self):

        if self.name == 'feed_forward':
            self.model = feed_forward(self.input_dim, self.output_dim,
                                      self.n_hid)
            self.J = nn.MSELoss(size_average=True, reduce=True)
        elif self.name == 'autoencoder':
            self.model = Autoencoder(self.input_dim, self.output_dim,
                                     self.n_hid, self.n_bottleneck)
            self.J = nn.MSELoss(size_average=True, reduce=True)
        elif self.name == 'ff_mlpg':
            self.model = ff_mlpg(self.input_dim, self.output_dim, self.n_hid)
            self.J = Nloss_GD(self.input_dim)
        else:
            pass

        if self.cuda:
            self.model = self.model.cuda()
            self.J = self.J.cuda()
        print('    Total params: %.2fM' %
              (sum(p.numel() for p in self.model.parameters()) / 1000000.0))

    def create_opt(self, lr=1e-4, weight_decay=0):
        self.lr = lr
        self.weight_decay = weight_decay
        self.optimizer = torch.optim.Adam(self.model.parameters(),
                                          lr=self.lr,
                                          weight_decay=self.weight_decay)
        self.schedule = None  # [-1] #[50,200,400,600]

    def fit(self, x, y):
        x, y = to_variable(var=(x, y), volatile=False, cuda=self.cuda)

        self.optimizer.zero_grad()
        if self.name == 'ff_mlpg':
            out, sq_Beta = self.model(x)
            loss = self.J(out, y, sq_Beta)
            loss.backward()
            self.optimizer.step()
            return loss.data[0], sq_Beta.abs().mean().data
        elif self.name == 'feed_forward':
            out = self.model(x)
            loss = self.J(out, y)
        else:
            out = self.model(x)
            loss = self.J(out, y)
        loss.backward()
        self.optimizer.step()

        return loss.data[0]

    def eval(self, x, y, train=False):
        with torch.no_grad():
            x, y = to_variable(var=(x, y), volatile=True, cuda=self.cuda)

            if self.name == 'ff_mlpg':
                out, sq_Beta = self.model(x)
                loss = self.J(out, y, sq_Beta)
                return loss.data[0], sq_Beta.abs().mean().data
            elif self.name == 'feed_forward':
                out = self.model(x)
                loss = self.J(out, y)
            else:
                out = self.model(x)
                loss = self.J(out, y)
            return loss.data[0]

    def predict(self, x, train=False):
        with torch.no_grad():
            x, = to_variable(var=(x, ), volatile=False, cuda=self.cuda)

            if self.name == 'ff_mlpg':
                out, sq_Beta = self.model(x)
                return out.data, sq_Beta.abs().data
            else:
                out = self.model(x)
                return out.data