Ejemplo n.º 1
0
def train():
    ann = myModel.Net(inputSize, firstHiddenSize, secondHiddenSize,
                      outputSize).double()

    # we set up the lossFunction as the mean square error
    lossFunction = torch.nn.MSELoss()

    # we use an optimizer that implements stochastic gradient descent
    optimizer_batch = torch.optim.SGD(ann.parameters(), lr=0.05)

    inputTensor, outputTensor = loadData()

    lossList = []
    avgLossList = []

    noBatches = int(len(inputTensor) / batchSize)

    for epoch in range(noEpochs):
        totalLoss = 0

        for batch in range(noBatches):
            # we prepare the current batch  -- please observe the slicing for tensors
            batchInput, batchOutput = inputTensor[batch * batchSize:(batch + 1) * batchSize, ], \
                                      outputTensor[batch * batchSize:(batch + 1) * batchSize, ]

            # we compute the output for this batch
            prediction = ann(batchInput.double())

            # we compute the loss for this batch
            loss = lossFunction(prediction, batchOutput)

            # we save it for graphics
            lossList.append(loss)
            totalLoss += loss.item()

            # we set up the gradients for the weights to zero (important in pytorch)
            optimizer_batch.zero_grad()

            # we compute automatically the variation for each weight (and bias) of the network
            loss.backward()

            # we compute the new values for the weights
            optimizer_batch.step()

        avgLossList.append(totalLoss / noBatches)

        # we print the loss for all the dataset for each 10th epoch
        if epoch % 100 == 99:
            y_pred = ann(inputTensor.double())
            loss = lossFunction(y_pred, outputTensor)
            print('\repoch: {}\tLoss =  {:.5f}'.format(epoch, loss))

    plt.plot(avgLossList)
    plt.savefig("lossPlot.png")
    plt.show()
    saveData(ann)
Ejemplo n.º 2
0
    def __init__(self, filePath="myNet.pt"):
        self._filePath = filePath
        self.ann = myModel.Net()

        # self.ann.load_state_dict(torch.load(filePath))
        # self.ann.eval()

        self.commands = {
            '1': (self.visualizeParameters, 'Visualize parameters'),
            '2': (self.inputValues, 'Input values'),
            '3': (self.plot, 'Plot values'),
            '4': (self.train, 'Train'),
            '5': (self.loadAnn, 'Load ann')
        }
Ejemplo n.º 3
0
import torch

import myModel
import math
filePath = "myNet.pt"
ann = myModel.Net(2, 100, 1)

ann.load_state_dict(torch.load(filePath))
ann.eval()

while True:
    x = float(input("x = "))
    y = float(input("y = "))
    print(ann(torch.tensor((x, y))).tolist())
    print(math.sin(x + y / math.pi))
Ejemplo n.º 4
0
# from -1 to 1, inclusive, and is placeing the values in a tensor like
# so x = [ [-1], [-0.98], ..., [0.98]. 1]

# print(x)

y = x.pow(2) + 0.2
# the function to be optimised

# print(y)


# we set up the lossFunction as the mean square error
lossFunction = torch.nn.MSELoss()

# we create the ANN
ann = myModel.Net(n_feature=1, n_hidden=10, n_output=1)

print(ann)
# we use an optimizer that implements stochastic gradient descent
optimizer_batch = torch.optim.SGD(ann.parameters(), lr=0.2)

# we memorize the losses for some graphics
loss_list = []
avg_loss_list = []

# we set up the environment for training in batches
batch_size = 16
n_batches = int(len(x) / batch_size)
print(n_batches)

for epoch in range(2000):
Ejemplo n.º 5
0
    def loadAnn(self):
        self.ann = myModel.Net()

        self.ann.load_state_dict(torch.load(self._filePath))
        self.ann.eval()
Ejemplo n.º 6
0
def main():
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print('Using {} device'.format(device))

    pairs = torch.load('mydataset1.dat')
    # print(pairs)

    x = torch.empty(0, 2).to(device)
    y = torch.empty(0, 1).to(device)

    for pair in pairs:
        x_ = torch.tensor([pair[0], pair[1]]).to(device)
        x = torch.vstack((x, x_))
        y = torch.vstack((y, pair[2].to(device)))

    # print(y)
    # return

    # we set up the lossFunction as the mean square error
    loss_fn = torch.nn.MSELoss()

    # we create the ANN
    ann = myModel.Net(N_INPUT, N_HIDDEN, N_OUTPUT, HIDDEN_LAYERS).to(device)

    # print(ann)
    # we use an optimizer that implements stochastic gradient descent
    optimizer = torch.optim.SGD(ann.parameters(), lr=LEARNING_RATE)

    # we memorize the losses forsome graphics
    loss_list = []
    avg_loss_list = []

    # we set up the environment for training in batches
    batch_size = BATCH_SIZE
    n_batches = int(math.ceil(len(x) / batch_size))
    print(n_batches)

    for epoch in range(EPOCHS):
        for batch in range(n_batches):
            # we prepare the current batch  -- please observe the slicing for tensors
            batch_X, batch_y = x[batch * batch_size:(batch + 1) *
                                 batch_size, ], y[batch *
                                                  batch_size:(batch + 1) *
                                                  batch_size, ]

            # we compute the output for this batch
            prediction = ann(batch_X)

            # we compute the loss for this batch
            loss = loss_fn(prediction, batch_y)

            # we save it for graphics
            loss_list.append(loss.item())

            # we set up the gradients for the weights to zero (important in pytorch)
            optimizer.zero_grad()

            # we compute automatically the variation for each weight (and bias) of the network
            loss.backward()

            # we compute the new values for the weights
            optimizer.step()

        # we print the loss for all the dataset for each 10th epoch
        if (epoch + 1) % 500 == 0:
            y_pred = ann(x)
            loss = loss_fn(y_pred, y)
            print('\repoch: {}\tLoss =  {:.5f}'.format(epoch, loss.item()))

        avg_loss_list.append(sum(loss_list) / len(loss_list))
        loss_list.clear()

    # save the model to file
    torch.save(ann.state_dict(), CURRENT_NETWORK_PATH)

    # visualise the parameters for the ann (aka weights and biases)
    # for name, param in ann.named_parameters():
    #     if param.requires_grad:
    #         print(name, param.data)

    # loss_list = [l.item() for l in loss_list]

    plt.plot(avg_loss_list)
    plt.title('Loss VS Epoch')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.show()
Ejemplo n.º 7
0
# Creates a one-dimensional tensor of size 100 whose values are evenly spaced
# from -1 to 1, inclusive, and is placeing the values in a tensor like
# so x = [ [-1], [-0.98], ..., [0.98]. 1]

# print(x)

y = x.pow(2) + 0.2
# the function to be optimised

# print(y)

# we set up the lossFunction as the mean square error
lossFunction = torch.nn.MSELoss()

# we create the ANN
ann = myModel.Net(n_input=1, n_hidden=10, n_output=1)

print(ann)
# we use an optimizer that implements stochastic gradient descent
optimizer_batch = torch.optim.SGD(ann.parameters(), lr=0.2)

# we memorize the losses forsome graphics
loss_list = []
avg_loss_list = []

# we set up the environment for training in batches
batch_size = 16
n_batches = int(len(x) / batch_size)
print(n_batches)

for epoch in range(2000):
Ejemplo n.º 8
0
with open("mydataset.txt", "r") as file:
    n = int(file.readline()[:-1])
    for _ in range(n):
        line = file.readline().strip()
        line_elements = line.split(',')
        x.append((float(line_elements[0]), float(line_elements[1])))
        y.append(float(line_elements[2]))

x = torch.tensor(x)
y = torch.unsqueeze(torch.tensor(y), dim=1)

# we set up the lossFunction as the mean square error
lossFunction = torch.nn.MSELoss()

# we create the ANN
ann = myModel.Net()

# we use an optimizer that implements stochastic gradient descent
optimizer_batch = torch.optim.SGD(ann.parameters(), lr=0.001)

# we memorize the losses forsome graphics
loss_list = []

# we set up the environment for training in batches
batch_size = 50
n_batches = int(len(x) / batch_size)

for epoch in range(2500):

    for batch in range(n_batches):
        # we prepare the current batch  -- please observe the slicing for tensors
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 27 14:20:51 2021

@author: tudor
"""

import torch
import torch.nn.functional as F

import myModel



# we load the model

filepath = "myNet.pt"
ann = myModel.Net(1,10,1)

ann.load_state_dict(torch.load(filepath))
ann.eval()

# visualise the parameters for the ann (aka weights and biases)
# for name, param in ann.named_parameters():
#     if param.requires_grad:
#         print (name, param.data)


x =float( input("x = "))
x = torch.tensor([x])
print(ann(x).tolist())
Ejemplo n.º 10
0
import torch

import myModel
from constants import *
import matplotlib.pyplot as plt


def loadData():
    return torch.load("mydataset.dat")


lossFunction = torch.nn.MSELoss()

ann = myModel.Net(n_feature=2, n_hidden=128, n_output=1).double()
data = loadData()
print(ann)

# stochastic gradient descent
optimizer_batch = torch.optim.SGD(ann.parameters(), lr=LEARNING_RATE)

loss_list = []
average_loss_list = []

n_batches = len(data) // BATCH_SIZE

data_points = torch.tensor([(x[0], x[1]) for x in data])
data_values = torch.unsqueeze(torch.tensor([x[2] for x in data]), dim=1)

splitDataPoints = torch.split(data_points, BATCH_SIZE)
splitDataValues = torch.split(data_values, BATCH_SIZE)
for epoch in range(NUMBER_EPOCHS):