Esempio n. 1
0
	def __init__(self, parent=None):
		# Create the toplevel window
		gtk.Window.__init__(self)
		#self.set_screen(parent.get_screen())

		self.set_title("GLTest")
		self.set_default_size(200, 200)

		area = GtkGlDrawingArea()
		self.add(area)

		area.grab_focus()
		
		
		
		#testlayer
		l1 = layer("testlayer")
		l2 = layer("testlayer2")
		
		l1.color = array([1,0,1])
		l2.color = array([1,1,0])
		
		l1.addLine(line(0, 0,  0.5, 0.5))
		l2.addLine(line(0, 0, -0.5, 0.5))
		l1.addPoint(point(-0.5, -0.5))
		l2.addPoint(point(-0.5, -0.4))
		
		l1.addArc( arc(0,0,0.3) )
		
		area.addLayer(l1)
		area.addLayer(l2)
		
		self.show_all()
		area.showAll()
Esempio n. 2
0
    def __init__(self, parent=None):
        # Create the toplevel window
        gtk.Window.__init__(self)
        #self.set_screen(parent.get_screen())

        self.set_title("GLTest")
        self.set_default_size(200, 200)

        area = GtkGlDrawingArea()
        self.add(area)

        area.grab_focus()

        #testlayer
        l1 = layer("testlayer")
        l2 = layer("testlayer2")

        l1.color = array([1, 0, 1])
        l2.color = array([1, 1, 0])

        l1.addLine(line(0, 0, 0.5, 0.5))
        l2.addLine(line(0, 0, -0.5, 0.5))
        l1.addPoint(point(-0.5, -0.5))
        l2.addPoint(point(-0.5, -0.4))

        l1.addArc(arc(0, 0, 0.3))

        area.addLayer(l1)
        area.addLayer(l2)

        self.show_all()
        area.showAll()
Esempio n. 3
0
    def __init__(self, nb_neur, learning_rate):

        self.input_layer_size = nb_neur
        self.hidden_layer_1_size = 25  # 25
        self.output_layer_size = 1
        self.learning_rate = learning_rate

        # les 3 couches
        self.input_layer = layer(self.input_layer_size, "input", self.hidden_layer_1_size)
        self.hidden_layer_1 = layer(self.hidden_layer_1_size, "hidden_1", self.hidden_layer_1_size)
        self.hidden_layer_2 = layer(self.hidden_layer_1_size, "hidden_2", self.output_layer_size)
        self.output_layer = layer(self.output_layer_size, "output", 0)
Esempio n. 4
0
    def __init__(self, input, output, hidden_count=2):
        # set up layers
        self.layers = []
        self.layers.insert(INPUT, layer([input_node(val) for val in input]))
        self.layers.insert(HIDDEN, layer([active_node(0.0)] * hidden_count))
        self.layers.insert(OUTPUT, layer([active_node(val) for val in output]))

        # set up weights - [layer id][from node id][to node id] = random initial weight in [-1,1]
        self.weights = []
        self.weights.insert(
            INPUT, [[random.triangular(-1, 1) for tuple in self.layers[HIDDEN]] for val in self.layers[INPUT]]
        )
        self.weights.insert(
            HIDDEN, [[random.triangular(-1, 1) for tuple in self.layers[OUTPUT]] for val in self.layers[HIDDEN]]
        )
def load_data(name: str) -> []:
    '''
    reads a saved .txt file containing weights and biases 
    and translates into a list of layer objects
    '''
    L = []
    save_f = Path("../data/" + name + ".txt")

    if save_f.exists():
        f = open(save_f, "r")
        for line in f:
            if line[0] == "L":
                line = line.rstrip()
                line = line.split(",")
                L.append(layer(int(line[1]), line[0][1]))
            elif line[0] != "\n":
                if L != []:
                    index = line.strip().split(":")[0]
                    data = line.strip().split(":")[1]

                    index = index.split(",")
                    L[int(index[0])][int(index[1])].\
                                    set_weights(vector(eval(data.split("/")[0])))
                    L[int(index[0])][int(index[1])].set_bias(
                        float(data.split("/")[1]))
            else:
                pass
        f.close()
    else:
        raise AttributeError("save file does not exist")
    return L
Esempio n. 6
0
 def generate_classifier(self,euris=False,mean_w=0.0,std_w=1.0,dropout=False,keep_prob_dropout=0.5):
     for i in range(len(self.units)-1):
         self.layers.append(layer.layer([self.units[i],self.units[i+1]],activation=self.act_func[i],mean=mean_w,std=std_w,eur=euris))
     if(euris):
         self.use_euristic = True
         
     self.use_dropout = dropout
     self.keep_prob_dropout = keep_prob_dropout
Esempio n. 7
0
 def generate_layers(self,units_for_layers):
     layers = []
     
     for i in range(len(units_for_layers)-1):
         
         layers.append(layer.layer(units_for_layers[i],units_for_layers[i+1],None,None,'sigmoid'))
         
         
     return layers
Esempio n. 8
0
    def __init__(self, rng):

        network.__init__(self, n_hidden_layer=2)
        # network.__init__(self, n_hidden_layer = 0)

        self.layer.append(ReLU_layer(rng=rng, n_inputs=784, n_units=1000))
        self.layer.append(ReLU_layer(rng=rng, n_inputs=1000, n_units=1000))
        # self.layer.append(ReLU_layer(rng = rng, n_inputs = 1000, n_units = 1000))
        self.layer.append(layer(rng=rng, n_inputs=1000, n_units=10))
Esempio n. 9
0
	def __init__( self ):
		self.selectionLasso = layer.layer("poly_lasso_selection")
		self.selectionLasso.line_style = 1 #self.gtk.gdk.LINE_ON_OFF_DASH
		self.selectionLasso.line_width = 3 
		self.selectionLasso.color = [0.5, 0.5, 0.5]
		
		self.selectionPin = layer.layer("poly_pin_selection")
		self.selectionPin.point_size = 10 
		self.selectionPin.color = [0.5, 0.5, 0.5]
		
		self.lastPosition = None
		self.firstPosition = None
		self.closed = False
		self.active = False
		self.img_pnts = []
		self.wrl_pnts = []
		
		self.mode = "LASSO"
Esempio n. 10
0
	def initLayers(self):
		self.highlight = layer.layer("highlight")
		self.highlight.line_width = 10
		self.highlight.point_size = 10
		#self.highlight.color = array([0.16667, 1, 1])
		self.highlight.color = [1, 0.2, 0.2]
		self.highlight.caps = 2#self.gtk.gdk.CAP_ROUND

		self.ObjectLayer = layer.layer("objects")
		self.ZoneLayer = layer.layer("zones")

		self.ObjectLayer.line_width = 6
		self.ObjectLayer.color = [0.2, 0.7, 0.2]
		self.ObjectLayer.visible = False
		self.ObjectLayer.point_size = 15
		self.ZoneLayer.line_width = 10
		self.ZoneLayer.color = [0.2, 0.7, 0.2]
		self.ZoneLayer.visible = False
Esempio n. 11
0
	def getLayer(self, v_name, l_name):
		# get viewport
		if not v_name in self.viewports:
			self.viewports[v_name] = viewport(v_name)
		v = self.viewports[v_name]
		
		# get layer
		if not l_name in v.layers:
			v.layers[l_name] = layer(l_name)
			
		return v.layers[l_name]
Esempio n. 12
0
 def generate_decoder(self):
     decoder = []
     for l in reversed(range(self.n_layers)):
         b = np.zeros((self.layers[l].n_in,),dtype=theano.config.floatX)
         l_t = layer.layer(self.layers[l].n_out,self.layers[l].n_in,(self.layers[l].W.get_value()).T,b,'sigmoid')
         #l_t = layer.layer(self.layers[l].n_out,self.layers[l].n_in,(self.layers[l].W.get_value()).T,b,'relu')
         ###activation for decodor not squashing as in Bengio et al. 2010
         decoder.append(l_t)
     self.layers.extend(decoder)
     
     
     
     for la in self.layers:
         self.param += la.param        
Esempio n. 13
0
def main():
    run_count = 0

    # network layout for a MNIST training set images
    # Input layer - takes in the 784 grey scale pixels (range 0 - 1) each node contains the value of a single pixel
    # Hidden layer 1 - contains 16 nodes 784 weights per node
    # Hidden layer 2 - contains 16 nodes 16 weights per node
    # Output layer   - contains 10 nodes and 16 weights per node
    # largest interval of the output layer's activations corresponds to the network's guess of what that number is

    #-----------------------change here----------------------------
    Input = get_data(run_count)[0]
    hidden_1 = layer(16, "h")
    hidden_1.init_weights(len(Input))

    hidden_2 = layer(16, "h")
    hidden_2.init_weights(len(hidden_1))

    output = layer(10, "o")
    output.init_weights(len(hidden_2))

    layers = [hidden_1, hidden_2, output]
    #--------------------------------------------------------------

    #checks for saved weights and bias file
    if Path("../data/weights.txt").exists():
        layers = networkm.load_data("weights")

    print("f) forward prop")
    print("b) backward prop")
    print("s) sketch")
    mode = input()

    if mode == "b":
        back_propagation(layers)
    if mode == "f":
        forward_propagation(layers, run_count)
Esempio n. 14
0
 def generate_decoder(self,symmetric=True,act=None):
     
     decoder = []
     if(symmetric):
         self.is_sym=True
         for i in reversed(range(self.enc_length)):
             if (act is None):
                 temp_l = layer.layer([self.units[i+1],self.units[i]],activation=self.act_func[i],eur=self.use_euristic)
                 self.act_func.append(self.act_func[i])
             else:
                temp_l = layer.layer([self.units[i+1],self.units[i]],activation=act[i],eur=self.use_euristic)
                self.act_func.append(act[i])
             if(i==0):
                 temp_l.W = tf.transpose(self.layers[i].W)
             else:
                 temp_l.W = tf.transpose(self.layers[i].W)
                 temp_l.b = self.layers[i-1].b
            
             decoder.append(temp_l)
     else: #####NON SERVE AD UNA CEPPA DI NIENTE!!
         for i in reversed(range(self.enc_length)):
             if (act is None):
                 temp_l = layer.layer([self.units[i+1],self.units[i]],activation=self.act_func[i],eur=self.use_euristic)
                 self.act_func.append(self.act_func[i])
             else:
                temp_l = layer.layer([self.units[i+1],self.units[i]],activation=act[i],eur=self.use_euristic)
                self.act_func.append(act[i])
             if(i==0):
                 temp_l.assign_W(self.layers[i].W,T=True)
             else:
                 temp_l.assign(self.layers[i].W,self.layers[i-1].b,T=True)
            
             decoder.append(temp_l)
     
     self.layers.extend(decoder)
     self.full_connected = True
Esempio n. 15
0
 def generate_decoder(self,symmetric=True,act=None):
     
     decoder = []
     if(symmetric):
         self.is_sym=True
         for i in reversed(range(self.enc_length)):
             if (act is None):
                 temp_l = layer.layer([self.units[i+1],self.units[i]],activation=self.act_func[i])
                 self.act_func.append(self.act_func[i])
             else:
                temp_l = layer.layer([self.units[i+1],self.units[i]],activation=act[i])
                self.act_func.append(act[i])
             if(i==0):
                 temp_l.W = tf.transpose(self.layers[i].W)
             else:
                 temp_l.W = tf.transpose(self.layers[i].W)
                 temp_l.b = self.layers[i-1].b
            
             decoder.append(temp_l)
     else: #####NON SERVE AD UNA CEPPA DI NIENTE!!
         for i in reversed(range(self.enc_length)):
             if (act is None):
                 temp_l = layer.layer([self.units[i+1],self.units[i]],activation=self.act_func[i])
                 self.act_func.append(self.act_func[i])
             else:
                temp_l = layer.layer([self.units[i+1],self.units[i]],activation=act[i])
                self.act_func.append(act[i])
             if(i==0):
                 temp_l.assign_W(self.layers[i].W,T=True)
             else:
                 temp_l.assign(self.layers[i].W,self.layers[i-1].b,T=True)
            
             decoder.append(temp_l)
     
     self.layers.extend(decoder)
     self.full_connected = True
Esempio n. 16
0
 def create_graph(self):
     batch_size = self.information['batchsize_placeholder']
     xhatk = tf.zeros(shape=[batch_size, self.NT], dtype=tf.float32)
     rk = tf.cast(self.features['y'], tf.float32)
     onsager = tf.zeros(shape=[batch_size, self.NR], dtype=tf.float32)
     xhat = []
     helper = {}
     for k in range(1, self.L + 1):
         xhatk, rk, onsager, helperk, den_output = layer(
             xhatk, rk, onsager, self.features, self.linear_name,
             self.denoiser_name, self.information)
         xhat.append(den_output)
         helper['layer' + str(k)] = helperk
     print("Total number of trainable variables:", self.get_n_vars())
     return xhat, helper
Esempio n. 17
0
 def layer(self, num_nodes, function, input_num=None):
     errvalue = False
     if (input_num == None and self.layers != []):
         innum = len((self.layers[len(self.layers) - 1]).nnodes)
     elif (self.layers != [] and input_num != len(
         (self.layers[len(self.layers) - 1]).nnodes)):
         print("ERR: Input dimension mismatch, layer not created.")
         errvalue = True
     elif (self.layers == [] and input_num == None):
         print("ERR: No input dimension specified.")
         errvalue = True
     elif (self.layers == [] and input_num != None):
         innum = input_num
     if (errvalue == False):
         self.layers += [layer.layer(innum, num_nodes, function)]
     return errvalue
def main():
    X, Y, m = get_data()
    m = X.shape[0]
    layer_sizes = [400, 25, 10]
    N = len(layer_sizes)
    print('Initializing Neural Network Parameters')
    layers = [
        L.layer(layer_sizes[i], layer_sizes[i + 1]) for i in range(0, N - 1)
    ]
    nn = NN.neuralNetwork(layers, regularization=1, batch_size=m)
    print(nn)
    print('Training Neural Network ')
    sys.stdout.flush()
    nn = NN.gradient_decent_adam_new(nn, X, Y)
    pred = nn.predict(X)
    array_correct = [1 if x == p else 0 for x, p in zip(pred, Y)]
    array_incorrect = [1 if x != p else 0 for x, p in zip(pred, Y)]
    print('Training Set Correct:   {}'.format(np.sum(array_correct)))
    print('Training Set Incorrect: {}'.format(np.sum(array_incorrect)))
    print('Training Set Accuracy:  {}'.format(np.mean(array_correct) * 100))
 def create(self):
     self.layers.append(inputLayer(self.layerSizes[0]))
     for i in range(1, self.size):
         self.layers.append(
             layer(self.layerSizes[i], self.layerSizes[i - 1]))
Esempio n. 20
0
 def generate_encoder(self):
      for i in range(self.enc_length):
         self.layers.append(layer.layer([self.units[i],self.units[i+1]],activation=self.act_func[i],mean=0.0,std=2.0))
def test_network(features,labels,mb_size,learn_rate, max_iterations, test_features = None):
	#Create the features and all
	sets = create_validation_sets(features,labels,10)


	training_features = [i[0] for i in sets[1]]
	training_labels = np.asarray([i[1] for i in sets[1]])
	training_labels.resize(len(training_labels),)
	validation_features = [i[0] for i in sets[0]]
	validation_labels = np.asarray([i[1] for i in sets[1]])
	validation_labels.resize(len(validation_labels),)


	#print validation_labels
	training_set_size = len(training_features)
	validation_set_size = len(validation_features)
	test_set_size = len(test_features)
	#Cast to shared variables
	training_features = cast_data(training_features,False)
	training_labels = cast_data(training_labels,True)
	validation_features = cast_data(validation_features,False)
	validation_labels = cast_data(validation_labels,True)
	test_features = cast_data(test_features,False)

	# print training_labels.type
	# print training_features[0].type
	# print validation_labels.ndim
	#print training_features[0*int(mb_size):(0+1)*int(mb_size)].shape

	x = T.tensor3('x')
	y =T.ivector('y')
	input_layer = x.reshape((mb_size,1,60,60))
	
	#First layer reduces from 60*60 to 4 channells of 56/2 * 56/2 = 28*28/2.
	# 4 5*5 kernels. With a downscale of half.
	num_kernels1 = 256
	layer1 = layer.layer(input = input_layer, image_size = (mb_size,1,60,60), filter_size = (num_kernels1,1,5,5), downscale = (2,2))
	#layer1.load_params(pickle.load(file('layer_1_small.pkl','r')))
	
	num_kernels2 = 128
	layer2 = layer.layer(input = layer1.output,image_size=(mb_size,num_kernels1,28,28), filter_size = (num_kernels2,num_kernels1,5,5), downscale = (2,2))
	#layer2.load_params(pickle.load(file('layer_2_small.pkl','r')))

	# num_kernels3 = 12
	# layer3 = layer.layer(input = layer2.output,image_size=(mb_size,num_kernels2,12,12), filter_size = (num_kernels3,num_kernels2,5,5), downscale = (2,2))
	# layer3.load_params(pickle.load(file('layer_3_small.pkl','r')))
	flattened = layer2.output.flatten(2)
	#depending on time constrain, might want to increase downsampling or add an extra layer if have more than enough time.
	#num inputs = 
	layer4 = connected_layer.connected_layer(input = flattened, input_size = (num_kernels2 * 12 * 12),output_size = 100)
	#layer4.load_params(pickle.load(file('layer_4_small.pkl','r')))
	layer5 = logistic_layer.logistic_layer(input=layer4.output, input_size = 100, output_size = 19)
	#layer5.load_params(pickle.load(file('layer_5_small.pkl','r')))
	cost = layer5.cost(y)
	#Params to be updated:
	full_params = layer5.params + layer4.params + layer2.params + layer1.params

	#Create a function that cooresponds to the gradients for each parameter
	gradients = T.grad(cost,full_params)

	#From these gradients we get different updates for each parameter
	update_list = [(p,p - learn_rate * d) for p,d in zip(full_params,gradients) ]

	batch_number = T.scalar('bn', dtype='int32')

	#This function updates based on a batch of features and labels. 
	training_function = theano.function(
		[batch_number], 
		cost,
		updates=update_list,
		givens={ x: training_features[batch_number*int(mb_size):(batch_number+1)*int(mb_size)],
				 y: training_labels[batch_number*int(mb_size):(batch_number+1)*int(mb_size)]}
		)
	training_accuracy_function = theano.function(
		[batch_number],
		layer5.accuracy(y),
		givens = {x: training_features[batch_number*int(mb_size):(batch_number+1)*int(mb_size)],
				 y: training_labels[batch_number*int(mb_size):(batch_number+1)*int(mb_size)]}
		)
	validation_function = theano.function(
		[batch_number],
		layer5.accuracy(y),
		givens = {x: validation_features[batch_number*int(mb_size):(batch_number+1)*int(mb_size)],
				  y: validation_labels[batch_number*int(mb_size):(batch_number+1)*int(mb_size)]}
		)


	test_function = theano.function(
		[batch_number],
		layer5.prediction,
		givens = {x: test_features[batch_number*int(mb_size):(batch_number+1)*int(mb_size)]}
		)
	#Keep track of the training err

	training_acc = []
	
	#Keep track of the validation err

	validation_acc = []

	#PERFORM THE LEARNING OF THE MODEL
	num_batches = training_set_size / mb_size
	num_validate_batches = validation_set_size / mb_size
	num_test_batches = test_set_size/mb_size
	for iter in range(0, max_iterations):
		#Perform the batch updates
		for batch in range(0, num_batches):
			#print "LEN" + str((batch+1)* mb_size)
			training_function(batch)

		#print "Trained"
		if iter%1 == 0:
			#for batch in range(0, num_batches):
			t_acc = np.mean([training_accuracy_function(batch) for batch in range(0,num_batches)])
			v_acc = np.mean([validation_function(batch) for batch in range(0,num_validate_batches)])
			
			training_acc.append(t_acc)
			validation_acc.append(v_acc)
			print "ITERATION "+ str(iter) + " TRAIN ACC " + str(t_acc) + " VAL ACC " + str(v_acc)

			final_predictions = []
			for batch in range(0,num_test_batches):
				final_predictions.append( test_function(batch))

			pickle.dump(final_predictions,file('predictions.pkl','w'))
			pickle.dump(training_acc,file('training.pkl','w'))
			pickle.dump(validation_acc,file('validation.pkl','w'))

		if v_acc > 0.9 :
			print "COMPLETED AFTER " + str(iter) + "ITERATIONS "
			break


	
	t_acc = np.mean([training_accuracy_function(batch) for batch in range(0,num_batches)])
	v_acc = np.mean([validation_function(batch) for batch in range(0,num_validate_batches)])

	training_acc.append(t_acc)
	validation_acc.append(v_acc)
	print " TRAIN ACC " + str(t_acc) + " VAL ACC " + str(v_acc)
	
	final_predictions = []
	for batch in range(0,num_test_batches):
		final_predictions.append( test_function(batch))

	#print final_predictions
	pickle.dump(layer1.save_params(),file('layer_1.pkl','w'))
	pickle.dump(layer2.save_params(),file('layer_2.pkl','w'))
	#pickle.dump(layer3.save_params(),file('layer_3.pkl','w'))
	pickle.dump(layer4.save_params(),file('layer_4.pkl','w'))
	pickle.dump(layer5.save_params(),file('layer_5.pkl','w'))
	pickle.dump(training_acc,file('training.pkl','w'))
	pickle.dump(validation_acc,file('validation.pkl','w'))
	pickle.dump(final_predictions,file('predictions.pkl','w'))

	return final_predictions
Esempio n. 22
0
 def add_layer(self, firewall_layer):
     self.layers.append(layer(firewall_layer))
Esempio n. 23
0
from node import node
from layer import layer
from network import network
import math
import numpy as np


#initialising network parameters
batch_size=5 			#number of year (each year will give one example)
regularisation=2500
descent_rate=0.000008

#creating network graph
input_layer=layer('input',[(1,1,'biased'),(1,2,'un_biased')],batch_size)
output_layer=layer('output',[(1,1,'un_biased')],batch_size)

#initializing the network
input_layer.initialize_layer()
output_layer.initialize_layer()

input_layer.create_connection(output_layer,[('one_to_all',),('one_to_all',)])
all_layer_list=[input_layer,output_layer]

net=network(all_layer_list,'stochastic',batch_size,regularisation,descent_rate)


temp=0
while temp<50:
	list_of_input=[(),(302,296)]
	list_of_output=[(294,)]
	net.in_batch_initialize_network()
Esempio n. 24
0
 def generate_encoder(self,euris=False,mean_w=0.0,std_w=1.0):
      for i in range(self.enc_length):
         self.layers.append(layer.layer([self.units[i],self.units[i+1]],activation=self.act_func[i],mean=mean_w,std=std_w,eur=euris,))
      if(euris):
         self.use_euristic = True
Esempio n. 25
0
from layer import layer
from gaussLobatto import gausLobatto

import matplotlib.pyplot as plt
import layerlab as ll
import numpy as np

n = 128
mu, w = gausLobatto(n)

m = 12
eta = complex(1.1, 0.0)
alpha = 0.6
run = 2
if run == 1:
    diffuseLayer = layer(mu, w, m)
    diffuseLayer.setDiffuse(0.8)
    plt.figure()
    plt.imshow(diffuseLayer.scatteringMatrix[:, :, 0])
    plt.savefig('images/diffuse' + str(0) + '.png')

    diffuseLayer = ll.Layer(mu, w, m)
    diffuseLayer.setDiffuse(0.8)
    i = 0
    topRow = np.concatenate((diffuseLayer[i].transmissionBottomTop, diffuseLayer[i].reflectionTop), axis=1)
    bottomRow = np.concatenate((diffuseLayer[i].reflectionBottom, diffuseLayer[i].transmissionTopBottom), axis=1)
    SM = np.concatenate((topRow, bottomRow), axis=0)

    plt.figure()
    plt.imshow(SM)
    plt.savefig('images/lldiffuse' + str(i) + '.png')
def create_model(model_json, train):
    model = tf.keras.Sequential()
    for l in model_json['layers']:
        model.add(layer(l, train))
    load_weights(model_json['name'], model)
    return model, optimizer(model_json)