Esempio n. 1
0
    def __init__(self, number_of_vectors, number_of_input_elements,
                 number_of_output_elements, number_of_neurons,
                 number_of_hidden_layers, problem_type, function_types, seed):
        self.number_of_vectors = number_of_vectors
        self.number_of_input_elements = number_of_input_elements
        self.number_of_output_elements = number_of_output_elements
        self.number_of_neurons = number_of_neurons
        self.number_of_hidden_layers = number_of_hidden_layers
        self.function_types = function_types
        self.problem_type = problem_type

        # Генерация входных и выходных данных при задаче классификации
        if problem_type == 'classification':
            self.input_layer = lrs.InputLayer(number_of_vectors,
                                              number_of_input_elements, seed)
            lrs.InputLayer.generate_classification(self.input_layer)

            self.output_layer = lrs.OutputLayer(number_of_vectors,
                                                number_of_output_elements)
            lrs.OutputLayer.generate_classification(self.output_layer,
                                                    self.input_layer)

        # Генерация входных и выходных данных при задаче регрессии
        else:
            self.input_layer = lrs.InputLayer(number_of_vectors,
                                              number_of_input_elements, seed)
            lrs.InputLayer.generate_regression(self.input_layer)

            self.output_layer = lrs.OutputLayer(number_of_vectors,
                                                number_of_output_elements)
            lrs.OutputLayer.generate_regression(self.output_layer,
                                                self.input_layer)

        # Список всех слоев, включая слои входных, выходных данных и слои функций активации
        self.layers = list()
        self.layers.append(self.input_layer)
        for i in range(number_of_hidden_layers):
            self.layers.append(
                lrs.HiddenLayer(number_of_vectors, number_of_input_elements,
                                number_of_neurons, i))
            self.layers.append(
                lrs.ActivationFunction(number_of_vectors,
                                       number_of_input_elements,
                                       number_of_neurons, function_types[i]))

        # Выходной слой
        self.layers.append(self.output_layer)
        # Функция потерь
        self.layers.append(
            lrs.ActivationFunction(number_of_vectors, number_of_input_elements,
                                   number_of_neurons, function_types[-1]))
        # Веса выходного слоя генерируются здесь
        lrs.OutputLayer.generate_weights(self.output_layer,
                                         self.layers[-4].number_of_neurons)
    def __init__(self, data_dim, target_dim, hidden_dims):

        self._hidden_layers = []
        self._output_layer = None

        previous_dim = data_dim
        for hidden_dim in hidden_dims:
            self._hidden_layers.append(
                layers.HiddenLayer(previous_dim, hidden_dim))
            previous_dim = hidden_dim

        self._output_layer = layers.OutputLayer(previous_dim, target_dim)
 def get_hidden_layers(self, hidden_layer_sizes):
     for hidden_layer_size in hidden_layer_sizes:
         self.hidden_layers.append(layers.HiddenLayer(hidden_layer_size))
Esempio n. 4
0
    n_valid_batches /= batch_size

    index = T.lscalar()  # index to a [mini]batch
    #     x = T.matrix('x')  # the data is presented as rasterized images
    x = sparse.csr_matrix(
        name='x', dtype='int64')  # the data is presented as rasterized images
    #     y = sparse.csr_matrix(name='y', dtype='int8')  # the labels are presented as 1D vector of  multi
    y = T.bmatrix('y')  # the labels are presented as 1D vector of  multi

    print '... building the computional Graph'
    rng = numpy.random.RandomState(23455)

    if MLP:
        layer1 = layers.HiddenLayer(rng,
                                    input=x,
                                    n_in=in_trn_matrix.shape[1],
                                    n_out=num_of_hidden_units,
                                    sparse=True)
        out_layer = layers.OutputLayer(input=layer1.output,
                                       n_in=num_of_hidden_units,
                                       n_out=numtype)
        params = layer1.params + out_layer.params
        #weights = T.concatenate[layer1.W, out_layer.W]
    else:
        out_layer = layers.OutputLayer(input=x,
                                       n_in=in_trn_matrix.shape[1],
                                       n_out=numtype,
                                       sparse=True)
        params = out_layer.params
        #weights = [out_layer.W]
Esempio n. 5
0
index = T.lscalar()  # index to a [mini]batch
x = T.matrix('x')  # the data is presented as rasterized images
y = T.imatrix('y')  # the labels are presented as 1D vector of
# [int] labels
# y_all = T.ivector('y_all')
######################
# BUILD ACTUAL MODEL #
######################
logger.info('... building the model')

layer2_input = x.reshape((batch_size, 1, ishape[0], ishape[1])).flatten(2)

# construct a fully-connected sigmoidal layer
layer2 = layers.HiddenLayer(rng,
                            input=layer2_input,
                            n_in=ishape[0] * ishape[1],
                            n_out=num_of_hidden_units,
                            activation=T.tanh)

outlayers = []
cost = 0.
out_errors = []
total_errs = 0
params = layer2.params

logger.info('n_in in each softmax: %d and n_out: %d', num_of_hidden_units, 2)
for i in range(n_targets):
    oneOutLayer = layers.OutputLayer(input=layer2.output,
                                     n_in=num_of_hidden_units,
                                     n_out=2)
    #     oneOutLayer = MyLogisticRegression(input=layer1.output, n_in=num_of_hidden_units, n_out=2)
Esempio n. 6
0
                                               e2simmatrix_test)

dt = theano.config.floatX  # @UndefinedVariable

index = T.lscalar()  # index to a [mini]batch
x = T.matrix('x')  # the data is presented as rasterized images
y = T.imatrix('y')  # the labels are presented as 1D vector of
# [int] labels
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
rng = numpy.random.RandomState(23455)
layer1 = layers.HiddenLayer(rng,
                            input=x,
                            n_in=input_matrix_test.shape[1],
                            n_out=num_of_hidden_units,
                            activation=T.tanh)

outlayers = []
cost = 0.
out_errors = []
predicted_probs = []

for i in range(n_targets):
    oneOutLayer = layers.OutputLayer(input=layer1.output,
                                     n_in=num_of_hidden_units,
                                     n_out=2)
    onelogistic = layers.SoftmaxLoss(input=oneOutLayer.score_y_given_x,
                                     n_in=2,
                                     n_out=2)