def __init__(self, initializationType, layerTypeList, activationTypeList, weightSizeList): # All lists has to be the same number of elements. numberOfLayers = len(layerTypeList) for layerID in range(numberOfLayers): self.layers.append( Layer.Layer(layerID, initializationType, layerTypeList[layerID], activationTypeList[layerID], weightSizeList[layerID]))
def expireTile (self, tile): bbox = tile.bounds() layer = tile.layer for z in range(len(layer.resolutions)): bottomleft = layer.getClosestCell(z, bbox[0:2]) topright = layer.getClosestCell(z, bbox[2:4]) for y in range(bottomleft[1], topright[1] + 1): for x in range(bottomleft[0], topright[0] + 1): coverage = Layer.Tile(layer,x,y,z) self.cache.delete(coverage)
def Initialize(self, Data): self.__ToFloat(Data) self.__Normalize(Data) num_input = len(Data[0][:-1]) outcomes = set([row[-1] for row in Data]) num_output = len(outcomes) self.__SetOutcomes(list(outcomes)) Network = [] Network.append(Layer(num_input)) Network.append( Layer(ceil((len(Network[-1]) + num_output) / 2.0), initialization=len(Network[-1]))) Network.append(Layer(num_output, initialization=len(Network[-1]))) return Network
class HeadShoulderDetector: layers = list(Layer()) def predict(self, x: np.array()): for layer in self.layers: if layer.predict(x) > -1: continue else: return -1 return 1
def __init__(self, w, h, mario, entities, images, total_width): self.total_width = total_width self.w = w self.h = h self.mario = mario self.m_group = pygame.sprite.Group(self.mario) self.entities = entities self.background = Layer.Background(w, h, images) self.world_shift_x = self.world_shift_y = 0 self.left_viewbox = int(w / 2 - w / 8) self.right_viewbox = int(w / 2 + w / 12)
def __init__(self, X, Y, neuronsLayer,batchSize, activaitonF, activationLast ): self.X = X self.Y = Y self.activationF = activaitonF self.neuronsLayer = neuronsLayer #list with number of neurons per layer (each index represents one layer) self.activationLast = activationLast #activation func in last layer self.Layers = [] #will cointain all created layers (list of Layers objects) #initializing Layers instances for i in range(len(self.neuronsLayer)): if (i == 0): #First Layer layer = (Layer(self.neuronsLayer[i], self.X.shape[1],self.activationF,batchSize,0.8, False,)) elif (i == (len(self.neuronsLayer)-1)): #Last Layer layer = (Layer(self.Y.shape[1], self.neuronsLayer[i-1],self.activationLast,batchSize,1, True,)) else: #Mid Layer layer =(Layer(self.neuronsLayer[i], self.neuronsLayer[i-1],self.activationF,batchSize,0.8, False,)) self.Layers.append(layer)
def newGraphicLayer(__, tileSize=(DEFAULT_TILE_WIDTH, DEFAULT_TILE_HEIGHT), length=DEFAULT_MAP_LENGTH): __.tab.insertTab(__.idGraphic, Layer(Layer.GRAPHIC, tileSize, __, length), str(__.idGraphic + 1)) __.tab.setCurrentIndex(__.idGraphic) __.idGraphic +=1 if __.length < length: __.length = length __.reinitCollisionLayer(length * int((float(tileSize[0] / DEFAULT_TILE_WIDTH)) * 4)) if __.lengthGraphic < length * tileSize[0]: __.lengthGraphic = length * tileSize[0] Layer.field.setFixedWidth(__.lengthGraphic) return __.tab.widget(__.idGraphic - 1)
def sentence_pair_rep(self): lstm = tf.nn.rnn_cell.LSTMCell(self.parameters["num_units"], state_is_tuple=True) lstm = tf.nn.rnn_cell.DropoutWrapper(lstm, output_keep_prob=self.parameters["keep_prob"]) output_layer1 = Layer.ff_w(2 * self.parameters["num_units"], self.parameters["output_dim"], name='Output1') output_bias1 = Layer.ff_b(self.parameters["output_dim"], 'OutputBias1') output_layer2 = Layer.ff_w(self.parameters["output_dim"], self.parameters["output_dim"], 'Output2') output_bias2 = Layer.ff_b(self.parameters["output_dim"], 'OutputBias2') output_layer3 = Layer.ff_w(self.parameters["output_dim"], self.parameters["num_classes"], 'Output3') output_bias3 = Layer.ff_b(self.parameters["num_classes"], 'OutputBias3') outputs1, fstate1 = tf.nn.dynamic_rnn(lstm, self.premises_ph, sequence_length=self.premise_lengths, dtype=tf.float32) with tf.variable_scope(tf.get_variable_scope(), reuse=True): outputs2, fstate2 = tf.nn.dynamic_rnn(lstm, self.hypotheses_ph, sequence_length=self.hyp_lengths, dtype=tf.float32) logits1 = tf.nn.dropout( tf.nn.tanh(tf.matmul(tf.concat(1, [fstate1[0], fstate2[0]]), output_layer1)) + output_bias1, self.parameters["keep_prob"]) logits2 = tf.nn.dropout( tf.nn.tanh(tf.matmul(logits1, output_layer2) + output_bias2), self.parameters["keep_prob"]) logits3 = tf.nn.tanh(tf.matmul(logits2, output_layer3) + output_bias3) return logits3
def main(): Log.i('Sphinx is starting....') # Load MNIST data x, y = loadlocal_mnist(images_path='tests/images', labels_path='tests/labels') Log.i('Dimensions: %s x %s' % (x.shape[0], x.shape[1])) input_layer = Layer(neuron_count=784, layer_type=LayerType.INPUT) hidden_layer = Layer(neuron_count=392, synapse_count=392, weight_per_synapse=784, layer_type=LayerType.HIDDEN) output_layer = Layer(neuron_count=10, synapse_count=10, weight_per_synapse=392, layer_type=LayerType.OUTPUT) layers = [input_layer, hidden_layer, output_layer] network = Network(layers, 0.3) Log.only = False if len(sys.argv) == 2: Log.debug = bool(sys.argv[1]) else: Log.debug = False patterns = [] Log.i('Loading data...') for i in range(0, len(x)): cl_label = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] cl_label[y[i]] = 1 np_x = np.array(x[i]) c_x = np.divide(np_x, 255) patterns.append(TrainingPattern(c_x, cl_label)) Log.i('Loading data finished') network.train(patterns, 10, save_weight_per_ite=1) network.predict(x[0])
def __init__(self, TrainX, TrainY, numLayers, batchsize, dropOut=1.0, activationFunction=ActivationType.SIGMOID, lastLayerActivationFunction=ActivationType.SIGMOID): self.TrainX = TrainX self.TrainY = TrainY self.numLayers = numLayers self.batchsize = batchsize self.Layers = [] ## Python List with all Layers. self.lastLayerActivationFunction = lastLayerActivationFunction self.gradDescType = 1 self.LROptimization = LROptimizerType.ADAM self.BatchNormMode = BatchNormMode for i in range(len(self.numLayers)): if (i == 0): # first layer layer = Layer(numLayers[i], TrainX.shape[1], batchsize, False, dropOut, activationFunction) elif (i == len(numLayers) - 1): # last layer layer = Layer(TrainY.shape[1], numLayers[i - 1], batchsize, True, dropOut, lastLayerActivationFunction) else: # intermediate layers layer = Layer(numLayers[i], numLayers[i - 1], batchsize, False, dropOut, activationFunction) self.Layers.append(layer)
def __init__(self, neurons_per_layer, activation_function, problem_type): self.layers = [] self.num_layers = len(neurons_per_layer) self.problem_type = problem_type # Create the layers of the network for i in range(self.num_layers - 1): # Create input layer, the +1 in neurons_per_layer[i]+1 is to hold a bias value if i == 0: self.layers.append( Layer.layer( [neurons_per_layer[i] + 1, neurons_per_layer[i + 1]], "linear", input_layer=True)) # Create hidden layers, with user selected activation function else: self.layers.append( Layer.layer( [neurons_per_layer[i] + 1, neurons_per_layer[i + 1]], activation_function)) # Create output layer, with linear output as the activation function if problem_type == "classification": self.layers.append( Layer.layer([neurons_per_layer[-1], None], "softmax", output_layer=True)) elif problem_type == "regression": self.layers.append( Layer.layer([neurons_per_layer[-1], None], "linear", output_layer=True)) self.previous_weight_change = [ np.zeros(l.weights.shape) for l in self.layers[:-1] ]
def addInnerLayer(self, numOfNodes): eLL = [] for l in self.layers: eLL.append(l) layer = Layer() layer.generateInnerNodes(numOfNodes, self.layers[-1]) layer.createWeightMatrix(self.layers[-1]) eLL.append(layer) self.layers = eLL
def __init__(self, X, y, layers): """Initialize a network given at input and output shapes and network structure of hidden and output layers""" # check shape consistency n_input, _m_i = X.shape n_output, _m_o = y.shape if _m_i == _m_o: # Input layer self.layers[0] = Layer(n_input, layers["n1"], 'input', 0, 0, 0, 0, False) # Hidden layers for layer in range(1, layers["num_layers"] - 1): self.layers[layer] = Layer( layers["n" + str(layer)], layers["n" + str(layer + 1)], layers["act" + str(layer)], layers["alpha" + str(layer)], layers["lambda" + str(layer)], layers["p" + str(layer)], layers["gamma" + str(layer)], False) # Output layer layer += 1 self.layers[layer] = Layer(layers["n" + str(layer)], n_output, layers["act" + str(layer)], layers["alpha" + str(layer)], layers["lambda" + str(layer)], layers["p" + str(layer)], layers["gamma" + str(layer)], True)
def add_layer(self, im): if self.focus > -1: # exist previous layer # that layer looses focus self.layer_thumb[self.focus].lose_focus() # New layer instace layer = Layer.Layer(im, self.instance.layers, self) # add records self.layers = [im] + self.layers self.layerscoords = [(450, 300)] + self.layerscoords self.layer_thumb = [layer] + self.layer_thumb # add thumbnail layer.cv.pack() layer.change_focus() # focus on new layer self.focus = self.layer_thumb.index(layer) # record focus self.instance.redraw() # redraw canvas
def rolloutActions(self, layers): num_input = self.input_size num_hidden = self.hidden_size num_layers = self.num_layers num_directions = 2 if (('bidirectional' in self.kwargs) and (self.kwargs['bidirectional'])) else 1 hn = Variable(torch.zeros(num_layers * num_directions, 1, num_hidden)) cn = Variable(torch.zeros(num_layers * num_directions, 1, num_hidden)) input = Variable(torch.Tensor(len(layers), 1, num_input)) for i in range(len(layers)): input[i] = Layer( layers[i]).toTorchTensor(skipSupport=self.skipSupport) actions = self.controller(input, (hn, cn)) self.actionSeqs.append(actions) return actions
def add_layer(self, W, B, activation, index=None): if index == None: index = self._size if index > self._size or index < 0: print("out of boundary") return if activation < 0 or activation >= 5: print("activation func not defined") return target_layer = self._traverse(index) new_layer = Layer.Layer(W, B, activation) target_layer.pre.next = new_layer new_layer.pre = target_layer.pre target_layer.pre = new_layer new_layer.next = target_layer self._size += 1
def __init__(self, column_number): self.column_number = column_number self.layer1 = Layer(1, self.column_number) self.layer2 = Layer(2, self.column_number) self.layer3 = Layer(3, self.column_number) self.basal_ganglia_recommendation_weights = [] # Where 10 is the number of basal for i in range(10): # TODO determine how these weights are initialised self.basal_ganglia_recommendation_weights = 1.0
def applyActionsShrinkage(m, action, inp, lookup): #if m.fixed: # return resizeToFit(Layer(m), inp) # Get representation # Perform updates _, k, s, o, p = Layer(m).getRepresentation() k = max(int(k * lookup[action[1]]), 1) if m.fixed[1] == False else k s = max(int(s * lookup[action[2]]), 1) if m.fixed[2] == False else s o = max(int(o * lookup[action[3]]), 10) if m.fixed[3] == False else o p = int(p * lookup[action[4]]) if m.fixed[4] == False else p in_channels = inp.size(1) cn = m.__class__.__name__ if cn == 'Linear': in_channels = inp.view(inp.size(0), -1).size(1) if in_channels > LINEAR_THRESHOLD or in_channels < 10: print('Linear layer too large') return None return resizeLayer(m, in_channels, o, kernel_size=k, stride=s, padding=p)
def __init__(self, nns: Tuple[int] = (1, 2), isFirst: bool = True, prevBlocks: List[Block] = None, prevBlocksWeights: List[np.array] = None): # nns -- кол-во нейронов в каждом слое, первый элемент -- кол-во входов, # последний -- кол-во выходов self.layers = [ Layer(ins=nns[i], outs=nns[i + 1]) for i in range(len(nns) - 1) ] self.isFirst = isFirst self.prevBlocks = prevBlocks self.prevBlocksWeights = prevBlocksWeights self.res = None # выходы блока self.nIns = self.layers[0].nIns self.nOuts = self.layers[-1].nOuts
def childConstructor(self,parent1,parent2): p1w = parent1.getWeights() p2w = parent2.getWeights() cw = np.empty(len(p1w),dtype=object) percentage= random.random() tmpPercentage = 0 cont = 0 i = 0 while i < len(p1w): cw[i] = np.empty(len(p1w[i]),dtype=object) j = 0 while j < len(p1w[i]): cw[i][j] = np.empty(len(p1w[i][j]),dtype=object) k = 0 while k < len(p1w[i][j]): tmpPercentage = random.uniform(0,1); if tmpPercentage < percentage: cw[i][j][k] = p1w[i][j][k] else: cw[i][j][k] = p2w[i][j][k] cont += 1 k += 1 j += 1 i += 1 #mutation cw = self.mutate(cw,cont) #creation of the new genome self.fitness = 0 self.layers = np.empty(len(parent1.layers),dtype=object) i = 0 while i < len(self.layers): self.layers[i] = Layer.Layer([cw[i]]) i += 1
def build(self): tf.reset_default_graph() self.input = tf.placeholder(dtype=tf.float64, shape=[None, self.input_layer_size], name='input_layer') # Image data invar = self.input input_size = self.input_layer_size for i, outsize in enumerate(self.layer_sizes[1:]): iwr = self.iwr_function(outsize) print("Constructing layer: " + str(i) + ", insize= " + str(input_size) + ", outsize=" + str( outsize) + ", iwr=" + str(iwr)) iwr_l = iwr[0] iwr_u = iwr[1] layer = Layer(net=self, index=i, input=invar, input_size=input_size, output_size=outsize, activation_function=self.oaf if i is self.number_of_layers - 2 else self.haf, iwr_lower_bound=iwr_l, iwr_upper_bound=iwr_u) invar = layer.output input_size = layer.output_size # Process grabvars w = layer.weights b = layer.biases o = layer.output if i in self.config.dw: self.add_grabvar(w) if i in self.config.db: self.add_grabvar(b) if i in self.config.mdend: self.add_grabvar(o) if i in self.config.map_layers and i not in self.config.mdend: self.add_grabvar(o) self.output = layer.output # Output of last module is output of whole network self.target = tf.placeholder(dtype=tf.float64, shape=[None, self.output_layer_size], name='output_layer') # Image data self.grabvars.sort(key=lambda x: x.name) self.configure_training()
def addLayer(self, fileName, color): layer = Layer(fileName, color) if layer != 0: self.layers.append(layer) if self.bbxset: #bounding box for map is reset each time a layer is added if self.minx > layer.minx: self.minx = layer.minx if self.miny > layer.miny: self.miny = layer.miny if self.maxx < layer.maxx: self.maxx = layer.maxx if self.maxy < layer.maxy: self.maxy = layer.maxy else: self.minx = layer.minx self.miny = layer.miny self.maxx = layer.maxx self.maxy = layer.maxy self.bbxset = True return layer
def __init__(self, units, alfa, v_lambda, learning_rate_init, type_learning_rate, num_layers, function, fun_out, type_weight, type_problem = "Regression", early_stopping = False): self.alfa = alfa self.v_lambda = v_lambda self.learning_rate_init = learning_rate_init self.learning_rate = learning_rate_init self.type_learning_rate = type_learning_rate self.units=units self.function=function self.fun_out = fun_out self.struct_layers = np.empty(num_layers, Layer.Layer) self.num_layers = num_layers self.type_weight=type_weight self.type_problem = type_problem self.early_stopping = early_stopping for i in range(0,self.num_layers): self.struct_layers[i] = Layer.Layer(self.units[i+1],self.units[i],type_weight) self.epochs_retraining = -1
def competitive_learning(data_set, eta, num_clusters, iterations, score_funcs): ''' The main competitive learning algorithm. Creates a two layer network, then trains the weights of the network by updating the weights of the node with the strongest output for each training example ''' #Initialize variables num_inputs = len( data_set[0]) # Number of inputs is equal to the number of features weight_layer = Layer.Layer(num_inputs, num_clusters, eta) results = [] for iteration in range(iterations): #Train the network, score the resulting clustering, append the score #to the list of scores, and move on to next iteration weight_layer = _train_network(data_set, weight_layer, num_clusters) clustering = _cluster(data_set, weight_layer) result = Analyze.analyze_clusters(clustering, score_funcs) results.append(result) return results
def __init__(self, layers): ''' 初始化一个全连接神经网络 layers: 二维数组,描述神经网络每层节点数 ''' self.connections = Connections() self.layers = [] layer_count = len(layers) node_count = 0 for i in range(layer_count): self.layers.append(Layer(i, layers[i])) for layer in range(layer_count - 1): connections = [ Connection(upstream_node, downstream_node) for upstream_node in self.layers[layer].nodes for downstream_node in self.layers[layer + 1].nodes[:-1] ] for conn in connections: self.connections.add_connection(conn) conn.downstream_node.append_upstream_connection(conn) conn.upstream_node.append_downstream_connection(conn)
def __init__(self, numLayers, AlgChoice, AlgParams, NumNodesPerLayer, PatchMode='Adjacent', ImageType='Color'): self.NetworkBelief = {} self.LowestLayer = 1 self.NetworkBelief['Belief'] = np.array( []) # this is going to store beliefs for every image DeSTIN sees self.saveBeliefOption = 'True' self.BeliefFileName = 'Beliefs.mat' self.NumberOfLayers = numLayers self.AlgorithmChoice = AlgChoice self.AlgorithmParams = AlgParams self.NumberOfNodesPerLayer = NumNodesPerLayer self.PatchMode = PatchMode self.ImageType = ImageType self.Layers = [[ Layer(j, NumNodesPerLayer[j], self.PatchMode, self.ImageType) for j in range(numLayers) ]]
def main(): myLayer = Layer(0, [8, 8], 'Adjacent', image_type='Gray') N = 1 feats = 16 img = np.random.rand(32, 32) Label = 1 Ratio = 4 myLayer.load_input(img, Ratio) # Initialize Layerlearning_algorithm by specifying the ff # algorithm_choice,alg_params,InitNodebelief,InitNodeLearnedFeatures algorithm_choice = 'LogRegression' alg_params = {} # alg_params['N'] = D = (rng.randn(N, feats), rng.randint(size=N, low=0, high=2)) alg_params['D'] = D alg_params['N'] = N alg_params['feats'] = feats alg_params['training_steps'] = 1 w = theano.shared(rng.randn(feats), name="w") alg_params['w'] = w InitNodeLearnedFeatures = w InitNodebelief = w * img myLayer.init_layer_learning_params(algorithm_choice, alg_params)
def main(): myLayer = Layer(0, [8, 8], 'Adjacent', ImageType='Gray') N = 1 feats = 16 img = np.random.rand(32, 32) Label = 1 Ratio = 4 myLayer.loadInput(img, Ratio) # Initialize LayerLearningAlgorithm by specifying the ff # AlgorithmChoice,AlgParams,InitNodeBelief,InitNodeLearnedFeatures AlgorithmChoice = 'LogRegression' AlgParams = {} # AlgParams['N'] = D = (rng.randn(N, feats), rng.randint(size=N, low=0, high=2)) AlgParams['D'] = D AlgParams['N'] = N AlgParams['feats'] = feats AlgParams['training_steps'] = 1 w = theano.shared(rng.randn(feats), name="w") AlgParams['w'] = w InitNodeLearnedFeatures = w InitNodeBelief = w * img myLayer.initLayerLearningParams(AlgorithmChoice, AlgParams)
def main(): def Train(dcnn): start_time = time.time() print("start training...") dcnn.Train(30, 0.1, batchSize) print("Done Training, the training time is ", (time.time() - start_time)) InputTestingList, OutputTestingLabels = ReadMNISTTestingData() accuracyCount = 0 for i in range(len(InputTestingList)): # do forward pass res = dcnn.Evaluate(InputTestingList[i], 0) # determine index of maximum output value maxindex = res.argmax(axis=0) if (OutputTestingLabels[i][maxindex] == 1): accuracyCount = accuracyCount + 1 print("done training.., accuracy = ", (accuracyCount / len(InputTestingList) * 100)) def SelectImage(dcnn): global ImagePath img = np.empty((28, 28), dtype='float64') ImagePath = tk.filedialog.askopenfilename(filetypes=(("file", "*.bmp"), ("All Files", "*.*"))) image = Image.open(ImagePath) photo = ImageTk.PhotoImage(image) w = tk.Label(mainwindow, image=photo) w.photo = photo w.pack() img = cv2.imread(ImagePath, 0) / 255.0 OutputLabel = np.zeros((10, 1)) filename = (os.path.basename(ImagePath)) y = int(filename[0]) OutputLabel[y] = 1.0 res = dcnn.Evaluate(img, 0) maxindex = res.argmax(axis=0) Text_Label = tk.Label(mainwindow, text="the digit is... ").pack() Digit_Label = tk.Label(mainwindow, text=str(maxindex)).pack() TestList = [] LablesList = [] batchSize = 1 numFeatureMapsLayer1 = 6 CNNList = [] NNLayerList = [] numFeatureMapsLayer2 = 12 C1 = CNNLayer(numFeatureMapsLayer1, 1, 28, 5, ActivationType.RELU, PoolingType.AVGPooling, batchSize) C2 = CNNLayer(numFeatureMapsLayer2, numFeatureMapsLayer1, 12, 5, ActivationType.RELU, PoolingType.AVGPooling, batchSize) CNNList.append(C1) CNNList.append(C2) l1 = Layer(50, 4 * 4 * numFeatureMapsLayer2, ActivationType.RELU, batchSize, 0.8) l2 = Layer(10, 50, ActivationType.SOFTMAX, batchSize) NNLayerList.append(l1) NNLayerList.append(l2) InputTrainingList, OutputTrainingLabels = ReadMNISTTrainingData() dcnn = DeepCNN(CNNList, NNLayerList, InputTrainingList, OutputTrainingLabels, batchSize) # Creating GUI mainwindow = tk.Tk() mainwindow.geometry('640x340') mainwindow.title(" Convolutional Neural Networks ") b_SelectImage = tk.Button(mainwindow, text="Select an image for testing", command=lambda: SelectImage(dcnn)) b_SelectImage.pack() b_Train = tk.Button(mainwindow, text="Train the Network", command=lambda: Train(dcnn)) b_Train.pack() mainwindow.mainloop()
def identity(x): return x def derivata_identity(x): return 1 topology = 1 n_units = 20 for tries in range(0, 5): layers = [] layers.append( Layer(inputs=n_inputs, sorta=logistic, derivata=derivata_logistic, num_unit=n_units)) layers.append( Output_Layer(inputs=n_units, sorta=identity, derivata=derivata_identity, num_unit=n_outputs)) n = Net(layers, name='Net_' + str(topology) + '_try_' + str(tries)) pkl.dump(n, open( folder + n.name, 'wb')) #+'_'+str(dt.datetime.now()).replace(' ','_').split('.')[0]) start_time = time.time() teta = -1 for mode in ['minibatch']: for eta in [.05, .1, .15]:
import MXD import CSVFile import Layer import Logger data_source_map_list = CSVFile.csv_to_list() map_document = MXD.Document() sde_layers = map_document.retrieve_sde_layers() repath_candidates = Layer.repath_candidates_list_from_map_layers(sde_layers) for candidate in repath_candidates: matches = [data_source_map for data_source_map in data_source_map_list if Layer.is_candidate_data_source_map(data_source_map, candidate)] if len(matches) == 1: match = matches[0] is_repath_success = candidate.repath(match) if is_repath_success: Logger.log_message("Resolved: " + matches[0][2] + "." + matches[0][3] + " to " + matches[0][1] + " using " + matches[0][0]) else: Logger.log_warning(matches[0][2] + "." + matches[0][3] + " could not be mapped to " + matches[0][1] + " using " + matches[0][0]) else: