def __init__(self, num_inputs, num_hidden, num_outputs):
        """Init a neural network with:
           - num_inputs input units
           - 1 hidden layer with num_hidden units
           - num_outputs output units.
        """

        self.num_inputs = num_inputs
        self.num_hidden = num_hidden
        self.num_outputs = num_outputs

        # Sigmoidal logistic function.
        self.activation_function = lambda x: 1 / (1 + np.exp(-x))

        self.layers = []
        self.layers.append(Layer(
            num_inputs, self.activation_function))  # Temporary input layer.
        self.layers.append(Layer(num_hidden, self.activation_function))
        self.layers.append(Layer(num_outputs, self.activation_function))

        # Initialize layers weights except the input one.
        for i in range(1, len(self.layers)):
            self.init_layer_weights(self.layers[i], self.layers[i - 1])

        self.layers.pop(0)  # Remove input layer.
Beispiel #2
0
    def test_update(self):
        option = Option()

        force_self_prediction=True
        
        layer0 = Layer(pd_unit_size=30, layer_type=LAYER_TYPE_BOTTOM, option=option,
                       force_self_prediction=force_self_prediction)
        layer1 = Layer(pd_unit_size=20, layer_type=LAYER_TYPE_HIDDEN, option=option,
                       force_self_prediction=force_self_prediction)
        layer2 = Layer(pd_unit_size=10, layer_type=LAYER_TYPE_TOP, option=option,
                       force_self_prediction=force_self_prediction)
        
        layer0.connect_to(layer1)
        layer1.connect_to(layer2)
        
        dt = 0.1

        for i in range(2):
            layer0.update_potential(dt)
            layer1.update_potential(dt)
            layer2.update_potential(dt)
        
            layer0.update_weight(dt)
            layer1.update_weight(dt)
            layer2.update_weight(dt)
Beispiel #3
0
 def initialize_network(self):
     """Initializes the layers of the network."""
     self.network = list()
     self.n_layers = len(self.n_hidden)
     for j in range(self.n_layers):
         if j == 0:
             self.network.append(
                 Layer(j,
                       self.n_inputs,
                       self.n_hidden[j],
                       bias=self.bias,
                       activation=self.activation))
         else:
             self.network.append(
                 Layer(j,
                       self.n_hidden[j - 1],
                       self.n_hidden[j],
                       bias=self.bias,
                       activation=self.activation))
     else:
         j = len(self.network)
         self.network.append(
             Layer(j,
                   self.n_hidden[j - 1],
                   self.n_outputs,
                   bias=self.bias,
                   activation=self.activation,
                   state="output"))
 def __init__(
     self,
     layer_structure: List[int],
     learning_rate: float,
     activation_function: Callable[[float], float] = sigmoid,
     derivative_activation_function: Callable[[float],
                                              float] = derivative_sigmoid,
 ) -> None:
     if len(layer_structure) < 3:
         raise ValueError(
             "Error: Should be at least 3 layers (1 input, 1 hidden, 1 output)"
         )
     self.layers: List[Layer] = []
     # input layer
     input_layer: Layer = Layer(
         None,
         layer_structure[0],
         learning_rate,
         activation_function,
         derivative_activation_function,
     )
     self.layers.append(input_layer)
     # hidden layers and output layer
     for previous, num_neurons in enumerate(layer_structure[1::]):
         next_layer = Layer(
             self.layers[previous],
             num_neurons,
             learning_rate,
             activation_function,
             derivative_activation_function,
         )
         self.layers.append(next_layer)
Beispiel #5
0
    def Open(cls, path):
        """
        """
        # change the arcpy workspace for listing, but save the current setting
        workspace = env.workspace
        env.workspace = path

        cls.validate_geodatabase(path)

        # TODO: Need a generic workspace class, and a dataset class
        datasets = ListDatasets()
        fcs_names = ListFeatureClasses()
        rasters_names = ListRasters()
        tables_names = ListTables()

        # take all the found layers and make into layer objects
        fcs = []
        for fc in fcs_names:
            fcs.append(Layer(os.path.join(path, fc)))

        rasters = []
        for raster in rasters_names:
            rasters.append(Layer(os.path.join(path, raster)))

        tables = []
        for table in tables_names:
            tables.append(Layer(os.path.join(path, table)))

        # set the workspace back for the user
        env.workspace = workspace

        return Geodatabase(path, datasets, fcs, rasters, tables)
Beispiel #6
0
    def new_network(self,
                    layers,
                    input,
                    rng=numpy.random.RandomState(),
                    bias=False,
                    activation="tanh",
                    output_activation="linear"):
        hidden_layer = []
        for layer in xrange(len(layers) - 2):
            hiddenLayer = Layer(name=('hidden%d') % (layer),
                                rng=rng,
                                input=input,
                                n_in=layers[layer],
                                n_out=layers[layer + 1],
                                bias=bias,
                                activation=activation)
            hidden_layer.append(hiddenLayer)
            input = hidden_layer[layer].output

        output_layer = Layer(name="output",
                             rng=rng,
                             input=hidden_layer[-1].output,
                             n_in=layers[-2],
                             n_out=layers[-1],
                             bias=bias,
                             activation=output_activation)
        self.create_network(hidden_layer, output_layer)
Beispiel #7
0
    def build_network(self):
        print("Starting to build XOR Model")
        self.input_layer = [State(), State()]
        self.output_layer = [State()]

        hidden_layer1 = Layer(learn_rate=self.learn_rate, name='layer1')
        node_list1 = [Node(2, name="n11"), Node(2, name="n21")]

        hidden_layer2 = Layer(learn_rate=self.learn_rate, name='layer2')
        node_list2 = [Node(2, name="n12")]

        for il in self.input_layer:
            for nd in node_list1:
                self.connect_state_to_node(il, nd)

        for n1 in node_list1:
            for n2 in node_list2:
                self.connect_pipeline(n1, n2)

        self.connect_node_to_state(node_list2[0], self.output_layer[0])

        for nd in node_list1:
            hidden_layer1.add_node(nd)

        for nd in node_list2:
            hidden_layer2.add_node(nd)

        self.add_layer(hidden_layer1)
        self.add_layer(hidden_layer2)

        print("finish building")
Beispiel #8
0
    def __init__(self,
                 in_features,
                 num_targets,
                 layer_sizes=[5, 10, 5],
                 continuous=False):
        self.continuous = continuous  # CONTINUOUS DATA IS WORK IN PROGRESS -- DOES NOT WORK
        self.num_targets = num_targets
        self.saved_error = None  # Used during training

        hidden_activation = ReLUActivation  # Chosen activation function for Activated units

        # Build input layer
        self.layers = [
            Layer(ActivatedUnit, in_features - 1, layer_sizes[0],
                  hidden_activation, INPUT_LAYER)
        ]

        # Build hidden layers
        for layer_size in layer_sizes[1:]:
            self.layers.append(
                Layer(ActivatedUnit, self.layers[-1], layer_size,
                      hidden_activation, HIDDEN_LAYER))

        # Build output layers
        if continuous:
            self.layers.append(
                Layer(ActivatedUnit, self.layers[-1], num_targets,
                      LinearActivation, OUTPUT_LAYER))
        else:
            self.layers.append(
                Layer(ActivatedUnit, self.layers[-1], num_targets,
                      hidden_activation, OUTPUT_LAYER))
Beispiel #9
0
def main():

    dataset_reader = DatasetReader("/scratch/cpillsb1/cs66/data/")

    # uncomment for cancer
    # X, y, X_final, y_final, dataset = dataset_reader.load_cancer()

    X, y, X_final, y_final, dataset = dataset_reader.load_higgs()

    skf = StratifiedKFold(y, n_folds=4, shuffle=True, random_state=42)

    ii = 0
    for train, test in skf:
        x_train = X[train]
        x_test = X[test]

        y_train = y[train]
        y_test = y[test]
        nums = [5, 10, 30, 50]
        layer = Layer(RandomForestClassifier, {
            "max_depth": 1,
            "n_estimators": nums[ii]
        }, x_train, y_train, 10)
        predictions = layer.predictAll(x_train)
        lr = Layer(LogisticRegression, {
            "n_jobs": -1,
            "max_iter": 1000
        }, predictions, y_train, 1)
        network = Network([layer, lr])

        evaluate_test(network, X_final, y_final, nums[ii], dataset)

        ii += 1
Beispiel #10
0
    def __init__(
        self,
        layer_structure: List[int],
        learning_rate: float,
        activation_function: Callable[[float], float] = sigmoid,
        derivative_activation_function: Callable[[float],
                                                 float] = derivatie_sigmoid,
    ) -> None:
        if len(layer_structure) < 3:
            raise ValueError("Error: Should be at least 3 layers (1 input, \
                1 hidden, 1 output")
        # create a list to add layers
        self.layers: List[Layer] = []
        # input layer
        # it has None previous layer, layer_structure[0] neurons etc
        input_layer: Layer = Layer(
            None,
            layer_structure[0],
            learning_rate,
            activation_function,
            derivative_activation_function,
        )
        self.layers.append(input_layer)

        # hidden layers and output layer
        # we use enumerate to get the index of the previous layer
        for previous, num_neurons in enumerate(layer_structure[1::]):
            next_layer = Layer(
                self.layers[previous],
                num_neurons,
                learning_rate,
                activation_function,
                derivative_activation_function,
            )
            self.layers.append(next_layer)
Beispiel #11
0
Datei: nn.py Projekt: jhd/nntest
    def randomLayersInit(self, layers, startInputs):
        #layers format: [number of input neurons, number of sigs for this layer...]

        if len(layers) <= 2:

            return

        self.network = []
        
        inputs = []
        for input in range(0, layers[0]):
            
            inputs.append(InputNeuron(startInputs[input]))

        self.network.append(Layer(inputs))

        for layerLength in layers[1:]:

            neurons = []

            for neuron in range(0, layerLength):

                neurons.append(Sigmoid([numpy.random.uniform(0, 1) for startWeight in range(0, len(self.network[len(self.network)-1].neurons))], numpy.random.uniform(0, 1)))

            self.network.append(Layer(neurons))
Beispiel #12
0
    def __init__(self, nbNeurons, nbHiddenLayers, learningRate):
        self.inputLayer = Layer(nbNeurons)
        self.outputLayer = Layer(nbNeurons)

        for i in range(0, nbHiddenLayers):
            self.hiddenLayers.append(Layer(nbNeurons))

        self.learningRate = learningRate
Beispiel #13
0
def perceptron(size, weights):
    """Creates a perceptron network
    """
    inputLayer = Layer(LayerType.INPUT, size[0], activation=None)
    outputLayer = Layer(LayerType.OUTPUT, size[1], activation=Neuron.HEAVISIDE)
    pp.pprint(weights)
    inputLayer.connect(outputLayer, weights)
    return Ann({'input': inputLayer, 'hidden': [], 'output': outputLayer})
Beispiel #14
0
def test_old_results():
    layers = [
        Layer(name="MgF2", thickness=100.0, filename="../Materials/Al2.txt"),
        Layer(name="ZnS", thickness=200.0, filename="../Materials/Si.txt"),
        Layer(name="InGaP", thickness=300.0, filename="../Materials/Al2.txt"),
        Layer(name="GaAs", thickness=400.0, filename="../Materials/Si.txt"),
    ]
    cell2 = Bulk(*layers)
    v1, v2 = cell2.RT()
Beispiel #15
0
def createLayers(layers: list):
    layerList = []
    l = Layer(layers[0])
    layerList.append(l)
    for i in range(1, len(layers)):
        l = Layer(layers[i], l)
        layerList.append(l)
        layerList[i - 1].next = l
    return layerList
Beispiel #16
0
def test_initialize_weights():
    lay = Layer(10, 10)
    check_weight_range(lay, 1)
    check_weight_range(lay, 10)
    check_weight_range(lay, 100)

    lay = Layer(20, 20)
    check_weight_range(lay, 1)
    check_weight_range(lay, 10)
    check_weight_range(lay, 100)
Beispiel #17
0
 def __init__(self, noInputs, hiddens, noOutputs):
     self.__noInputs = noInputs
     self.__noOutputs = noOutputs
     self.__noHL = len(hiddens)
     layers = [Layer(noInputs, 1)]  # input layer
     layers += [Layer(hiddens[0], noInputs)]  # first hidden layer
     for i in range(1, self.__noHL):  # additional hidden layers
         layers.append(Layer(hiddens[i], hiddens[i - 1]))
     layers.append(Layer(noOutputs, hiddens[-1]))  # output layer
     self.__layers = layers
Beispiel #18
0
 def __init__(self, *args):
     QtWidgets.QWidget.__init__(self, *args)
     sample = [
         Layer(nsld=5),
         Layer(thickness=2., nsld=3),
         Layer(nsld=5),
         Layer(nsld=4., thickness=np.inf)
     ]
     self.m = PlotCanvas(sample, self)
     self.m.move(0, 0)
Beispiel #19
0
    def train(self, indata, ideal, isprint=False):
        '''
        Train the neural network.
        
        :param indata: (*array_like*) Input data.
        :param ideal: (*array_like*) Ideal data.
        :param isprint: (*bool*) Print tain step or not. Default is False.
        '''
        if isinstance(indata, (list, tuple)):
            indata = np.array(indata)
        if isinstance(ideal, (list, tuple)):
            ideal = np.array(ideal)
        if indata.ndim == 1:
            indata = indata.reshape(indata.shape[0], 1)
        if ideal.ndim == 1:
            ideal = ideal.reshape(ideal.shape[0], 1)
        trainset = BasicMLDataSet(indata.tojarray('double'),
                                  ideal.tojarray('double'))

        self.n_in = indata.shape[1]
        self.n_out = ideal.shape[1]
        network = BasicNetwork()
        network.addLayer(Layer(self.n_in, None)._layer)
        for layer in self._layers:
            network.addLayer(layer._layer)
        network.addLayer(
            Layer(self.n_out, actname=self.out_activation,
                  bias_neuron=False)._layer)
        network.getStructure().finalizeStructure()
        network.reset()
        self.network = network

        trainf = None
        if self.train_fcn == 'trainrp':
            trainf = ResilientPropagation(network, trainset)
        elif self.train_fcn == 'trainlm':
            trainf = LevenbergMarquardtTraining(network, trainset)
        elif self.train_fcn == 'trainscg':
            trainf = ScaledConjugateGradient(network, trainset)
        else:
            raise ValueError('Training function not exist: %s' %
                             self.train_fcn)

        if not trainf is None:
            trainf.setThreadCount(0)
            for i in range(self.train_epochs):
                trainf.iteration()
                if isprint:
                    print 'Epochs %i: Error=%.3f' % (i + 1, trainf.getError())
                if trainf.getError() < self.train_goal:
                    break

            trainf.finishTraining()
Beispiel #20
0
 def add_layer(self,
               nodes,
               activation_func,
               preset_Activation=True,
               weights=None,
               biases=None):
     if len(self.layers) == 0:
         layer = Layer(nodes, self.input_nodes, activation_func,
                       preset_Activation, weights, biases)
     else:
         layer = Layer(nodes, self.layers[-1].num_nodes, activation_func,
                       preset_Activation, weights, biases)
     self.layers.append(layer)
Beispiel #21
0
 def __init__(self,n_in,n_hidden):
     w=np.random.random_sample(n_hidden,n_in)
     b=np.zeros((n_hidden,1))
     self.hidden=Layer(w,b)
     w=np.random.random_sample(n_in,n_hidden)
     b=np.zeros((n_in,1))
     self.out=Layer(w,b)
     self.beta=3                   
     self.lmb=0.001
     self.sparsityParam=0.05
     self.active=active.sigmoid()
     self.eta=0.01
     return
Beispiel #22
0
 def train(self, training_input, training_output, ans, ans2):
     if ans == 1:
         self.g_forward(training_input)
         self.g_backward_propagation(training_output, training_input)
     elif ans == 2:
         self.hidden_layer = Layer()
         self.output_layer = Layer()
         for j in range(len(training_input)):
             oo, oh = self.s_forward(training_input[j], ans2)
             if j < 50:
                 self.s_backward_propagation(oo, training_output[j], training_input[j], oh, True)
             else:
                 self.s_backward_propagation(oo, training_output[j], training_input[j], oh, False)
Beispiel #23
0
def multi_layer_network(size, weights):
    """Creates a 3 layer network (1 hidden)
    """
    inputLayer = Layer(LayerType.INPUT, size[0], activation=None)
    hiddenLayer = Layer(LayerType.HIDDEN, size[1], activation=Neuron.LOGISTIC)
    outputLayer = Layer(LayerType.OUTPUT, size[2], activation=Neuron.LOGISTIC)
    inputLayer.connect(hiddenLayer, weights[1])
    hiddenLayer.connect(outputLayer, weights[0])
    return Ann({
        'input': inputLayer,
        'hidden': [hiddenLayer],
        'output': outputLayer
    })
Beispiel #24
0
    def __init__(self, structure):
        """Creates a neural network.

        Arguments:
            structure (array): Array of numbers giving the number of neurons in
            each layer of the neural network.
        """

        self.first_layer = Layer(structure[0])
        self.last_layer = self.first_layer
        self.loss = 0

        for i in range(1, len(structure)):
            self.last_layer = Layer(structure[i], self.last_layer)
Beispiel #25
0
    def __init__(self, input_dim, output_dim, n_hidden, hidden_sizes, nonlin, nonlin_deriv, last_nonlin=None, last_nonlin_deriv=None):
        self.layers = []
        dimensions = [input_dim] + hidden_sizes + [output_dim]
        for i in range(n_hidden):
            l = NDI(dimensions[i], dimensions[i+1], nonlin, nonlin_deriv)
            self.layers.append(l)
        
        #add the last layer

        if last_nonlin:
            self.layers.append(Layer(dimensions[-2], dimensions[-1], last_nonlin, last_nonlin_deriv))
        else:
            self.layers.append(Layer(dimensions[-2], dimensions[-1], nonlin, nonlin_deriv))
        self.cache = []
Beispiel #26
0
 def __init__(self):
     # layer1 is the input layer
     self.layer1 = Layer(784, trainX[5]/255 , None)
     self.layer2 = Layer(16, None, self.layer1)
     self.layer3 = Layer(16, None, self.layer2)
     # layer4 is the output layer
     self.layer4 = Layer(10, None, self.layer3)
     
     self.layer1.layerAfter = self.layer2
     self.layer2.layerAfter = self.layer3
     self.layer3.layerAfter = self.layer4
     self.layer4.layerAfter = None
     
     self.xs = np.array([])
     self.costs = np.array([])
Beispiel #27
0
 def __init__(self, n_nodes,
              activation=Tanh(),
              initialization="he",
              keep_prob=None,
              good_enough_loss=0.045):
     Model.__init__(self, good_enough_loss)
     self.layers = list()
     for i in range(len(n_nodes) - 2):
         self.layers.append(Layer(n_nodes[i], n_nodes[i + 1],
                                  activation=activation,
                                  keep_prob=keep_prob,
                                  initialization=initialization))
     self.layers.append(Layer(n_nodes[-2], n_nodes[-1],
                              activation=Sigmoid(),
                              initialization=initialization))
Beispiel #28
0
 def __init__(self, input_size: int, hidden_layers_sizes: List[int]):
     '''
     @param input_size: Size of the input layer (i.e. how many inputs does
                         the network receive)
     @param hidden_layers_sizes: list of integers where each number at index 
                         i specifies how many nodes does the ith hidden layer contains.
     '''
     self._learning_rate = 0.4
     self._layers: List[Layer] = [Layer(input_size, 1, LayerType.INPUT)]
     for i in range(len(hidden_layers_sizes)):
         in_size = input_size if i == 0 else hidden_layers_sizes[i - 1]
         layer_type = LayerType.OUTPUT if (i == len(hidden_layers_sizes) - 1) \
             else LayerType.HIDDEN
         self._layers.append(
             Layer(hidden_layers_sizes[i], in_size, layer_type))
Beispiel #29
0
 def __init__ (self, input_dim, output_dim, n_hidden, hidden_sizes, nonlin, nonlin_deriv, last_nonlin=None, last_nonlin_deriv=None, identity=None, last_linear_flag=False):
     self.layers = []
     dimensions = [input_dim] + hidden_sizes + [output_dim]
     for i in range(n_hidden):
         l = Layer(dimensions[i], dimensions[i+1], nonlin, nonlin_deriv)
         self.layers.append(l)
     
     #add the last layer
     if last_linear_flag == True:
         self.layers.append(Layer(dimensions[-2], dimensions[-1], identity, identity))
     else:
         if last_nonlin:
             self.layers.append(Layer(dimensions[-2], dimensions[-1], last_nonlin, last_nonlin_deriv))
         else:
             self.layers.append(Layer(dimensions[-2], dimensions[-1], nonlin, nonlin_deriv))
Beispiel #30
0
 def __init__(self, num_hidden_layers, num_test_feat, num_neurons_per_layer,
              num_possible_outputs):
     if num_hidden_layers > 1:
         self.hidden_layers = []
         self.hidden_layers.append(
             Layer(num_neurons_per_layer, num_test_feat))
         for i in range(num_hidden_layers - 2):
             self.hidden_layers.append(
                 Layer(num_neurons_per_layer, num_neurons_per_layer))
         self.hidden_layers.append(
             Layer(num_possible_outputs, num_neurons_per_layer))
     elif num_hidden_layers == 1:
         self.hidden_layers = []
         self.hidden_layers.append(
             Layer(num_possible_outputs, num_test_feat))