Esempio n. 1
0
    def update_network(self, network):
        new_model_dict = copy.deepcopy(self.model_dict)
    
        for i, layer in enumerate(self.model_dict['network']['hidden_layer']):
            if i == 0:

                new_model_dict['network']['hidden_layer'][i]['units'] = network.state_dict()['input_layer.weight'].shape[0]
            else:
                new_model_dict['network']['hidden_layer'][i]['units'] = network.state_dict()['hidden_layers.{}.weight'.format(i-1)].shape[0]

        # Create new model based on updated network definition
        updated_model = Network(new_model_dict)
        updated_model.load_state_dict(network.state_dict())

        self.network = updated_model
        self.network = self.network.to(self.device)
        self.new_optimizer = torch.optim.SGD(self.network.parameters(), lr=self.optimizer.state_dict()['param_groups'][0]['lr'])
        self.new_optimizer.load_state_dict(self.optimizer.state_dict())
        self.optimizer = self.new_optimizer
Esempio n. 2
0
# -*- coding: utf-8 -*-
"""
Created on Wed Jun  8 23:21:17 2016

@author: Guanhao Wu
"""

from NetworkClass import Network
from Neuron import Neuron
from random import random, randint
from NeuronParameters import NeuronParameters
import networkx as nx
import matplotlib.pyplot as plt

Net = Network()
P = NeuronParameters()

n_excit = 800
n_inhib = 200
n_total = n_excit + n_inhib  #number of neurons
p_excit = .15
p_inhib = .25
I_clamp = 150
n_clamp = 50

w_excitmax = 250
w_inhibmax = -500
d_excitmax = 25
d_inhibmax = 10

T = 500
from NetworkClass import Network
from neuronClass import Neuron

if __name__ == '__main__':
    topology = [2, 3, 2]
    net = Network(topology)
    Neuron.eta = 0.09
    Neuron.alpha = 0.015
    while True:

        err = 0
        inputs = [[0, 0], [0, 1], [1, 0], [1, 1]]
        outputs = [[0, 0], [1, 0], [1, 0], [0, 1]]
        for i in range(len(inputs)):
            net.setInput(inputs[i])
            net.feedForword()
            net.backPropagate(outputs[i])
            err = err + net.getError(outputs[i])
        print("error: ", err)
        if err < 0.02:
            break

    while True:
        a = int(input("type 1st input :"))
        b = int(input("type 2nd input :"))

        net.setInput([a, b])
        net.feedForword()
        print(net.getThResults())
Esempio n. 4
0
            "activation": "relu",
            "type": "Linear"
        }, {
            "units": 300,
            "activation": "relu",
            "type": "Linear"
        }],
        'output_layer': {
            "units": 10,
            "activation": "softmax",
            "type": "Linear"
        }
    }
}

model = Network(model_dict)

train_loader = torch.utils.data.DataLoader(torchvision.datasets.MNIST(
    '../data/',
    train=True,
    download=True,
    transform=torchvision.transforms.Compose([
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize((0.1307, ), (0.3081, )),
        ReshapeTransform((-1, ))
    ])),
                                           batch_size=batch_size_train,
                                           shuffle=True)

test_loader = torch.utils.data.DataLoader(torchvision.datasets.MNIST(
    '../data/',
Esempio n. 5
0
 def set_network(self, model_dict):
     self.model_dict = model_dict
     network = Network(model_dict)
     self.network = network
     self.network = self.network.to(self.device)
     self.set_optimizer(torch.optim.SGD(self.network.parameters(), lr=self.params_dict["learning_rate"]))
Esempio n. 6
0
            "units": 168,
            "activation": "relu",
            "type": "Linear"
        }, {
            "units": 168,
            "activation": "relu",
            "type": "Linear"
        }],
        'output_layer': {
            "units": 10,
            "activation": "softmax",
            "type": "Linear"
        }
    }
}
model = Network(model_dict)
model

# In[ ]:

import logging
import string
import random
import os
import torch
import torchvision
from sklearn.model_selection import KFold

from Experiment import Experiment
from train_utils import ReshapeTransform
Esempio n. 7
0
                }, 
                {
                    "units": 300, 
                    "activation": "relu",
                    "type": "Linear"

                }],
            'output_layer': {
                "units": 10,
                "activation": "softmax",
                "type": "Linear"
                }
        }
    }

model = Network(model_dict)



for (layer, param) in enumerate(model.parameters()):
    print("Layer {} , Parameters: {}".format(layer, param.shape))




# Load Datasets

train_loader = torch.utils.data.DataLoader(
  torchvision.datasets.MNIST('../data/', train=True, download=True,
                             transform=torchvision.transforms.Compose([
                               torchvision.transforms.ToTensor(),
Esempio n. 8
0
from Experiment import Experiment
from train_utils import ReshapeTransform



loaded = torch.load(sys.argv[1], map_location=torch.device('cpu'))
model_dict = copy.deepcopy(loaded["params"]["model"])

for i, layer in enumerate(loaded["params"]["model"]['network']['hidden_layer']):
    if i == 0:

        model_dict['network']['hidden_layer'][i]['units'] = loaded["state_dict"]['input_layer.weight'].shape[0]
    else:
        model_dict['network']['hidden_layer'][i]['units'] = loaded["state_dict"]['hidden_layers.{}.weight'.format(i-1)].shape[0]

model = Network(model_dict)
model.load_state_dict(loaded["state_dict"])


def randomString(stringLength=10):
    """Generate a random string of fixed length """
    letters = string.ascii_lowercase
    return ''.join(random.choice(letters) for i in range(stringLength))

logging.basicConfig(level=logging.INFO)

if not(os.path.isdir('models')):
  os.mkdir('models')

params_dict = {
  "batch_size_train": 100,