예제 #1
0
    def __init__(self, input_size, output_size, hidden_layer_sizes):

        self.learning_rate = 0.1

        self.input_layer = InputLayer(input_size)
        self.output_layer = OutputLayer(output_size)
        self.hidden_layers = [
            HiddenLayer(hidden_layer_size)
            for hidden_layer_size in hidden_layer_sizes
        ]

        for i, hidden_layer in enumerate(self.hidden_layers):
            if i == 0 and i == len(self.hidden_layers) - 1:
                hidden_layer.initialize(self.input_layer, self.output_layer)
            elif i == 0:
                hidden_layer.initialize(self.input_layer,
                                        self.hidden_layers[i + 1])
            elif i == len(self.hidden_layers) - 1:
                hidden_layer.initialize(self.hidden_layers[i - 1],
                                        self.output_layer)
            else:
                hidden_layer.initialize(self.hidden_layers[i - 1],
                                        self.hidden_layers[i + 1])

        if (len(self.hidden_layers)):
            self.output_layer.initialize(self.hidden_layers[-1])
        else:
            self.output_layer.initialize(self.input_layer)
예제 #2
0
def main():
    """"""
    input_layer = InputLayer(conf["upper_layer"], conf["data_path"],
                             conf["input_dim"], conf["layer_name"])

    MAX_MESSAGE_LENGTH = 128 * 1024 * 1024
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=4),
                         options=[('grpc.max_send_message_length',
                                   MAX_MESSAGE_LENGTH),
                                  ('grpc.max_receive_message_length',
                                   MAX_MESSAGE_LENGTH)])
    nn_pb2_grpc.add_LayerDataExchangeServicer_to_server(input_layer, server)
    server.add_insecure_port(conf["listen_on"])
    server.start()

    input("Press Enter to start sending data...")

    input_layer.start_feed_data(conf["batch_size"], conf["epochs"])

    # idle
    try:
        while True:
            time.sleep(24 * 60 * 60)
    except KeyboardInterrupt:
        server.stop(0)
예제 #3
0
 def _build_layers(self, inputs):
     self.input_layer = InputLayer(inputs)
     self.layers = []
     for idx, layer_params in enumerate(self.layers_params):
         neurons_num, act_func = layer_params
         if len(self.layers) == 0:
             layer = Layer(neurons_num, self.input_layer.neurons_number,
                         act_func, inputs.shape[0])
         else:
             layer = Layer(neurons_num, self.layers[idx - 1].neurons_number,
                         act_func, inputs.shape[0])
         self.layers.append(layer)
예제 #4
0
class FeedForwardNeuralNetwork:
    def __init__(self, input_size, output_size, hidden_layer_sizes):

        self.learning_rate = 0.1

        self.input_layer = InputLayer(input_size)
        self.output_layer = OutputLayer(output_size)
        self.hidden_layers = [
            HiddenLayer(hidden_layer_size)
            for hidden_layer_size in hidden_layer_sizes
        ]

        for i, hidden_layer in enumerate(self.hidden_layers):
            if i == 0 and i == len(self.hidden_layers) - 1:
                hidden_layer.initialize(self.input_layer, self.output_layer)
            elif i == 0:
                hidden_layer.initialize(self.input_layer,
                                        self.hidden_layers[i + 1])
            elif i == len(self.hidden_layers) - 1:
                hidden_layer.initialize(self.hidden_layers[i - 1],
                                        self.output_layer)
            else:
                hidden_layer.initialize(self.hidden_layers[i - 1],
                                        self.hidden_layers[i + 1])

        if (len(self.hidden_layers)):
            self.output_layer.initialize(self.hidden_layers[-1])
        else:
            self.output_layer.initialize(self.input_layer)

    def predict(self, input_arr):
        self.input_layer.set_values(input_arr)

        for hidden_layer in self.hidden_layers:
            hidden_layer.feed_forward()

        self.output_layer.feed_forward()

        return self.output_layer.values

    def train(self, input_arr, target_arr):
        self.predict(input_arr)

        self.output_layer.calculate_errors(target_arr)
        for hidden_layer in reversed(self.hidden_layers):
            hidden_layer.calculate_errors()

        self.output_layer.adjust_parameters(self.learning_rate)
        for hidden_layer in reversed(self.hidden_layers):
            hidden_layer.adjust_parameters(self.learning_rate)
예제 #5
0
def build_model():
    net = {}
    net['input'] = InputLayer((1, 3, IMAGE_W, IMAGE_W))
    net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1, flip_filters=False)
    net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1, flip_filters=False)
    net['pool1'] = PoolLayer(net['conv1_2'], 2, mode='average_exc_pad')
    net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1, flip_filters=False)
    net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1, flip_filters=False)
    net['pool2'] = PoolLayer(net['conv2_2'], 2, mode='average_exc_pad')
    net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1, flip_filters=False)
    net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1, flip_filters=False)
    net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1, flip_filters=False)
    net['conv3_4'] = ConvLayer(net['conv3_3'], 256, 3, pad=1, flip_filters=False)
    net['pool3'] = PoolLayer(net['conv3_4'], 2, mode='average_exc_pad')
    net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1, flip_filters=False)
    net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1, flip_filters=False)
    net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1, flip_filters=False)
    net['conv4_4'] = ConvLayer(net['conv4_3'], 512, 3, pad=1, flip_filters=False)
    net['pool4'] = PoolLayer(net['conv4_4'], 2, mode='average_exc_pad')
    net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1, flip_filters=False)
    net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1, flip_filters=False)
    net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1, flip_filters=False)
    net['conv5_4'] = ConvLayer(net['conv5_3'], 512, 3, pad=1, flip_filters=False)
    net['pool5'] = PoolLayer(net['conv5_4'], 2, mode='average_exc_pad')
    return net
예제 #6
0
        def view_rec_test(x_curr, prev_s_tensor, prev_in_gate_tensor):
            count = 0
            params = get_trainable_params()
            for p in params:
                count += 1
            print('view rec test : num of params %d' % count)

            rect8_ = InputLayer(view_features_shape, x_curr)
            prev_s_ = InputLayer(s_shape, prev_s_tensor)

            t_x_s_update_ = FCConv3DLayer(
                prev_s_,
                rect8_, (n_deconvfilter[0], n_deconvfilter[0], 3, 3, 3),
                params=t_x_s_update.params,
                isTrainable=True)

            t_x_s_reset_ = FCConv3DLayer(
                prev_s_,
                rect8_, (n_deconvfilter[0], n_deconvfilter[0], 3, 3, 3),
                params=t_x_s_reset.params,
                isTrainable=True)

            update_gate_ = SigmoidLayer(t_x_s_update_)
            comp_update_gate_ = ComplementLayer(update_gate_)
            reset_gate_ = SigmoidLayer(t_x_s_reset_)

            rs_ = EltwiseMultiplyLayer(reset_gate_, prev_s_)
            t_x_rs_ = FCConv3DLayer(
                rs_,
                rect8_, (n_deconvfilter[0], n_deconvfilter[0], 3, 3, 3),
                params=t_x_rs.params,
                isTrainable=True)

            tanh_t_x_rs_ = TanhLayer(t_x_rs_)

            gru_out_ = AddLayer(
                EltwiseMultiplyLayer(update_gate_, prev_s_),
                EltwiseMultiplyLayer(comp_update_gate_, tanh_t_x_rs_))

            return gru_out_.output, update_gate_.output
예제 #7
0
    def get_network(self):
        self._read_config()

        input_layer = None
        layers = []

        prev_layer = None
        for data in self._layers:
            if data["type"] == "input":
                input_size = self._input_size * self._input_size
                output_size = int(data["output_size"])
                layer = InputLayer(input_size, output_size)
            elif data["type"] == "dense":
                if "output_size" in data:
                    output_size = int(data["output_size"])
                else:
                    output_size = self._output_size
                activation_function_str = data["af"]
                activation_function = self._lookup_activation_function(
                    activation_function_str)
                activation_function_d = self._lookup_activation_function_d(
                    activation_function_str)
                learning_rate = float(data["la"])
                layer = DenseLayer(prev_layer.get_output_shape(), output_size,
                                   activation_function, activation_function_d,
                                   learning_rate)
            elif data["type"] == "convolution":
                if prev_layer == None:
                    input_shape = (self._input_size, self._input_size, 1)
                else:
                    input_shape = prev_layer.get_output_shape()
                kernel_n = int(data["kernel_n"])
                kernel_m = int(data["kernel_m"])
                channels_out = int(data["channels"])
                output_shape = (kernel_n, kernel_m, channels_out)
                v_stride = int(data["stride_n"])
                h_stride = int(data["stride_m"])
                padding = int(data["padding"])
                la = float(data["la"])
                layer = ConvolutionLayer(input_shape, output_shape, h_stride,
                                         v_stride, padding, la)
            if input_layer == None:
                input_layer = layer
            else:
                layers.append(layer)
            prev_layer = layer

        network = Network(input_layer, layers)
        return network
예제 #8
0
 def init_network(self,
                  activation_in='relu',
                  alpha_regularization=0,
                  save_plot_values=False):
     layer_in = InputLayer(self.input_data,
                           self.bias,
                           self.shape_in,
                           activation_in,
                           alpha_regularization,
                           regularization_type=self.regularization)
     if save_plot_values:
         self.plotter = Plotter()
     self.crt_layer = layer_in
     self.layers.append(layer_in)
     return self
예제 #9
0
    def build_model(self):
        layers = []
        input_shape = np.array(
            [self.batch_size, self.x_dim, self.x_dim, self.c_dim])
        # layer_1: input_layer ==> [n, 28, 28, 1]
        x = InputLayer(input_shape)
        layers.append(x)
        # layer_2: conv_layer [n, 28, 28, 1] ==> [n, 28, 28, 32]
        x = ConvLayer(x,
                      output_nums=20,
                      kernel=5,
                      strides=1,
                      padding='SAME',
                      name='conv1')
        layers.append(x)
        # layer_4: avgpool_layer [n, 28, 28, 32] ==> [n, 14, 14, 32]
        x = MaxPoolLayer(x, kernel=2, strides=2, paddind='SAME', name='pool1')
        layers.append(x)
        # layer_5: conv_layer [n, 14, 14, 32] ==> [n, 14, 14, 64]
        x = ConvLayer(x,
                      output_nums=50,
                      kernel=5,
                      strides=1,
                      padding='SAME',
                      name='conv2')
        layers.append(x)
        # layer_7: avgpool_layer [n, 14, 14, 64] ==> [n, 7, 7, 64]
        x = MaxPoolLayer(x, kernel=2, strides=2, padding='SAME', name='pool2')
        layers.append(x)
        # layer_8: flatten_layer [n, 7, 7, 64] ==> [n, 7*7*64]
        x = FlattenLayer(x, name='flatten')
        layers.append(x)
        # layer_9: fullconnected_layer [n, 3136] ==> [n, 500]
        x = DenseLayer(x, output_nums=500, name='dense1')
        layers.append(x)
        # layer_10: relu_layer [n, 500] ==> [n, 500]
        x = ReLULayer(x, name='relu1')
        layers.append(x)
        # layer_11: fullconnected_layer [n, 500] ==> [n, 10]
        x = DenseLayer(x, output_nums=10, name='dense2')
        layers.append(x)
        # layer_12: softmax_layer [n, 10] ==> [n, 10]
        x = SoftMaxLayer(x, name='softmax')
        layers.append(x)

        self.layers = layers
예제 #10
0
    def __init__(self, layer_sizes, types=None, cost_f='log', thetas=None):
        """
        Initialize network with the following parameters:
        :param layer_sizes: list of layer sizes including input and output([2, 3, 4] - 2 neurons in the input layer,
                            3 in the hidden layer, 4 in the output)
        :param types: optional list of layer types, list length should be one less the the length of sizes
                (if length of the list is the same as sizes, then first corresponds to the input layer and is ignored)
        :param cost_f: optional, default is 'log' for log cost function
        :param thetas: optional list of initial weights for each layer,
                list length should be one less the the length of sizes
                (if length of the list is the same as sizes, then first corresponds to the input layer and is ignored)
        :return:
        """

        self.layer_sizes = layer_sizes
        self.cost_f = cost_f

        # list to keep track of cost values during training
        self.costs = []
        self.costs_batches = []

        # assign cost function
        self._assign_cost_function()

        # outputs produced by the network, same as last layer activations
        self.outputs = None

        # prepare thetas
        layer_thetas = self._prepare_parameters_list(thetas)

        # prepare types
        layer_types = self._prepare_parameters_list(types)

        # instantiate layers
        self.layers = []
        for i, size in enumerate(self.layer_sizes):
            if i == 0:
                self.layers.append(InputLayer(size))
            else:
                new_layer = self._get_new_layer(layer_types[i],
                                                size,
                                                self.layer_sizes[i - 1],
                                                theta=layer_thetas[i])
                self.layers.append(new_layer)
예제 #11
0
    def __init__(self, num_input=256, num_hidden=[512,512], num_output=256, clip_at=0.0, scale_norm=0.0):
        X = T.fmatrix()
        Y = T.imatrix()
        lr = T.fscalar()
        alpha = T.fscalar()
        reg = T.fscalar()
        dropout_prob = T.fscalar()

        self.num_input = num_input
        self.num_hidden = num_hidden
        self.num_output = num_output
        self.clip_at = clip_at
        self.scale_norm = scale_norm

        inputs = InputLayer(X, name='inputs')
        num_prev = num_input
        prev_layer = inputs

        self.layers = [inputs]
        if type(num_hidden) is types.IntType:
            lstm = LSTMLayer(num_prev, num_hidden, input_layers=[prev_layer], name="lstm", go_backwards=False)
            num_prev = num_hidden
            prev_layer = lstm
            self.layers.append(prev_layer)
            prev_layer = DropoutLayer(prev_layer, dropout_prob=dropout_prob)
            self.layers.append(prev_layer)

        FC = FullyConnectedLayer(num_prev, num_output, input_layers=[prev_layer], name="yhat")
        self.layers.append(FC)
        Y_hat = FC.output()
	
	# change to probilities
        Y_hat = T.nnet.softmax(Y_hat)

        params = get_params(self.layers)
        caches = make_caches(params)
	
        updates, grads = momentum(loss, params, lr, reg)
	
        self.train_func = theano.function([X, Y, lr, reg, dropout_prob, alpha], loss, updates=updates, allow_input_downcast=True)

        self.predict_sequence_func = theano.function([X, dropout_prob], [Y_hat], allow_input_downcast=True)
예제 #12
0
    def __init__(self, name, session, input_dims):
        self.name = name
        self.session = session
        self.input_dims = input_dims

        self.layers = []
        self.connections = []
        self.parameters = []
        self.input_layers = []
        self.input_placeholder_layers = []

        for input_id, input_dim in enumerate(input_dims):
            input_layer = InputLayer(self.name + "_input_" + str(input_id), input_dim)
            self.input_layers.append(input_layer)
            self.input_placeholder_layers.append(input_layer)

        self.is_training = tf.placeholder(tf.bool, name=(name + "_is_training"))

        self.output_layer = None
        self.output_dim = None
        self.compiled = False
예제 #13
0
    def __init__(self,
                 num_input,
                 num_cells=50,
                 num_output=1,
                 lr=0.01,
                 rho=0.95):
        X = T.matrix('x')
        Y = T.matrix('y')
        eta = T.scalar('eta')
        alpha = T.scalar('alpha')

        self.num_input = num_input
        self.num_output = num_output
        self.num_cells = num_cells
        self.eta = eta

        inputs = InputLayer(X, name="inputs")
        lstm = LSTMLayer(num_input, num_cells, input_layer=inputs, name="lstm")
        fc = FullyConnectedLayer(num_cells, num_output, input_layer=lstm)
        Y_hat = T.mean(fc.output(), axis=2)
        layer = inputs, lstm, fc
        self.params = get_params(layer)
        self.caches = make_caches(self.params)
        self.layers = layer
        mean_cost = T.mean((Y - Y_hat)**2)
        last_cost = T.mean((Y[-1] - Y_hat[-1])**2)
        self.cost = alpha * mean_cost + (1 - alpha) * last_cost
        """"
        self.updates = momentum(self.cost, self.params, self.caches, self.eta, clip_at=3.0)
        """
        self.updates, _, _, _, _ = create_optimization_updates(
            self.cost, self.params, method="adadelta", lr=lr, rho=rho)
        self.train = theano.function([X, Y, alpha], [self.cost, last_cost] ,\
                updates=self.updates, allow_input_downcast=True)
        self.costfn = theano.function([X, Y, alpha], [self.cost, last_cost],\
                allow_input_downcast=True)
        self.predict = theano.function([X], [Y_hat], allow_input_downcast=True)
예제 #14
0
    def __init__(self, layers, decay=0.001, learning_rate=0.01):
        mapping = {
            "input": lambda x: InputLayer(x),
            "fc": lambda x: FullyConnectedLayer(x),
            "convolution": lambda x: ConvolutionLayer(x),
            "pool": lambda x: PoolingLayer(x),
            "squaredloss": lambda x: SquaredLossLayer(x),
            "softmax": lambda x: SoftmaxLossLayer(x),
            "relu": lambda x: ReLuLayer(x),
        }

        self.layers = []
        self.decay = decay

        self.learning_rate = learning_rate
        prev_layer = None

        for layer in layers:
            layer["input_shape"] = layer.get("input_shape",
                                             None) or prev_layer.output_shape
            layer["decay"] = self.decay
            layer = mapping[layer["type"]](layer)
            self.layers.append(layer)
            prev_layer = layer
예제 #15
0
filepath = os.path.dirname(os.getcwd()) + DEFAULT_DIR + DEFAULT_NAME

fp = open(filepath, "w")

config = 0

for epoch in epochs:
    for lr in learning_rates:
        for reg in regularizations:
            for alpha in momentums:
                mean_loss = 0
                mean_validation = 0

                for i in range(k):
                    model = NeuralNetwork()
                    model.add(InputLayer(10))
                    model.add(DenseLayer(50, fanin=10))
                    model.add(DenseLayer(30, fanin=50))
                    model.add(OutputLayer(2, fanin=30))
                    model.compile(size, epoch, lr / size, None, reg, alpha,
                                  "mean_squared_error")
                    (train, val) = data.kfolds(index=i, k=k)
                    mean_loss = mean_loss + model.fit(train[0], train[1])[-1]
                    mean_validation = mean_validation + model.evaluate(
                        val[0], val[1])

                fp.write("{}, {}, {}, {}, {}, {}, {}\n".format(
                    config, epoch, lr, reg, alpha, mean_loss / k,
                    mean_validation / k))

                config = config + 1
예제 #16
0
class MultilayerPerceptron:
    def __init__(self, learning_rate, max_epochs_num, layers_params):
        self.learning_rate = learning_rate
        self.max_epochs_num = max_epochs_num
        self.layers_params = self._parse_layers_params(layers_params)
        self.layers = None

    def _parse_layers_params(self, layers_params):
        return layers_params

    def fit(self, X_train, y_train):
        for sample_idx in range(X_train.shape[0]):
            self._forward_propagation(X_train[sample_idx])
            self._back_propagation(y_train[sample_idx])

    def _forward_propagation(self, x):
        if self.layers is None:
            self._build_layers(inputs=x)
        self.input_layer.calculate_act_func_values(x)
        for idx in range(len(self.layers)):
            if idx == 0:
                inputs = self.input_layer.act_func_values
            else:
                inputs = self.layers[idx - 1].act_func_values
            self.layers[idx].calculate_act_func_values(inputs)

    def _build_layers(self, inputs):
        self.input_layer = InputLayer(inputs)
        self.layers = []
        for idx, layer_params in enumerate(self.layers_params):
            neurons_num, act_func = layer_params
            if len(self.layers) == 0:
                layer = Layer(neurons_num, self.input_layer.neurons_number,
                            act_func, inputs.shape[0])
            else:
                layer = Layer(neurons_num, self.layers[idx - 1].neurons_number,
                            act_func, inputs.shape[0])
            self.layers.append(layer)


    def _back_propagation(self, y):
        self.layers[-1].deltas = self.layers[-1].act_func_values - y
        for idx in range(len(self.layers) - 2, -1, -1):
            layer, next_layer = self.layers[idx], self.layers[idx + 1]
            self.layers[idx].deltas = np.dot(next_layer.deltas, next_layer.weights.T) * \
                        layer.activation_func_der(layer.sum_func_values)

        for idx, layer in enumerate(self.layers):
            if idx == 0:
                prev_layer = self.input_layer
            else:
                prev_layer = self.layers[idx - 1]
            prev_layer_act_func_values = prev_layer.act_func_values
            self.layers[idx].weights -= self.learning_rate * \
                layer.deltas * prev_layer_act_func_values.reshape(
                    (prev_layer.neurons_number, 1))
            self.layers[idx].biases -= self.learning_rate * layer.deltas


    def predict(self, X_test):
        predictions = []
        for sample_idx in range(X_test.shape[0]):
            length = X_test[sample_idx].shape[0]
            test_sample = X_test[sample_idx].reshape(1, length)
            self.input_layer.calculate_act_func_values(test_sample)
            for idx in range(len(self.layers)):
                if idx == 0:
                    inputs = self.input_layer.act_func_values
                else:
                    inputs = self.layers[idx - 1].act_func_values
                self.layers[idx].calculate_act_func_values(inputs)
            predictions.append(self.layers[-1].act_func_values)
        
        # output_layer = self.layers[-1]
        # predictions = []
        # for sample_idx in range(X_test.shape[0]):
        #     length = X_test[sample_idx].shape[0]
        #     test_sample = X_test[sample_idx].reshape(1, length)
        #     output_layer.calculate_act_func_values(test_sample)
        #     predictions.append(output_layer.act_func_values)
        return np.array(predictions)

    def regression_score(self, X_test, y_test):
        y_pred = self.predict(X_test)
        return np.mean((y_pred - y_test) ** 2)
예제 #17
0
def convert(keras_model, class_map, description="Neural Network Model"):
	"""
	Convert a keras model to PMML
	@model. The keras model object
	@class_map. A map in the form {class_id: class_name}
	@description. A short description of the model
	Returns a DeepNeuralNetwork object which can be exported to PMML
	"""
	pmml = DeepNetwork(description=description, class_map=class_map)
	pmml.keras_model = keras_model
	pmml.model_name = keras_model.name
	config = keras_model.get_config()

	for layer in config['layers']:
		layer_class = layer['class_name']
		layer_config = layer['config']
		layer_inbound_nodes = layer['inbound_nodes']
		# Input
		if layer_class is "InputLayer":
			pmml._append_layer(InputLayer(
				name=layer_config['name'],
				input_size=layer_config['batch_input_shape'][1:]
			))
		# Conv2D
		elif layer_class is "Conv2D":
			pmml._append_layer(Conv2D(
				name=layer_config['name'],
				channels=layer_config['filters'],
				kernel_size=layer_config['kernel_size'],
				dilation_rate=layer_config['dilation_rate'],
				use_bias=layer_config['use_bias'],
				activation=layer_config['activation'],
				strides=layer_config['strides'],
				padding=layer_config['padding'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		# DepthwiseConv2D
		elif layer_class is "DepthwiseConv2D":
			pmml._append_layer(DepthwiseConv2D(
				name=layer_config['name'],
				kernel_size=layer_config['kernel_size'],
				depth_multiplier=layer_config['depth_multiplier'],
				use_bias=layer_config['use_bias'],
				activation=layer_config['activation'],
				strides=layer_config['strides'],
				padding=layer_config['padding'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		# MaxPooling
		elif layer_class is "MaxPooling2D":
			pmml._append_layer(MaxPooling2D(
				name=layer_config['name'],
				pool_size=layer_config['pool_size'],
				strides=layer_config['strides'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		elif layer_class is "AveragePooling2D":
			pmml._append_layer(AveragePooling2D(
				name=layer_config['name'],
				pool_size=layer_config['pool_size'],
				strides=layer_config['strides'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		elif layer_class is "GlobalAveragePooling2D":
			pmml._append_layer(GlobalAveragePooling2D(
				name=layer_config['name'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		# Flatten
		elif layer_class is "Flatten":
			pmml._append_layer(Flatten(
				name=layer_config['name'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		# Dense
		elif layer_class is "Dense":
			pmml._append_layer(Dense(
				name=layer_config['name'],
				channels=layer_config['units'],
				use_bias=layer_config['use_bias'],
				activation=layer_config['activation'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		# Zero padding layer
		elif layer_class is "ZeroPadding2D":
			pmml._append_layer(ZeroPadding2D(
				name=layer_config['name'],
				padding=layer_config['padding'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		# Reshape layer
		elif layer_class is "Reshape":
			pmml._append_layer(Reshape(
				name=layer_config['name'],
				target_shape=layer_config['target_shape'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		elif layer_class is "Dropout":
			pmml._append_layer(Dropout(
				name=layer_config['name'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		# Batch Normalization
		elif layer_class is "BatchNormalization":
			pmml._append_layer(BatchNormalization(
				name=layer_config['name'],
				axis=layer_config['axis'],
				momentum=layer_config['momentum'],
				epsilon=layer_config['epsilon'],
				center=layer_config['center'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		elif layer_class is "Add":
			pmml._append_layer(Merge(
				name=layer_config['name'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes)
			))
		elif layer_class is "Subtract":
			pmml._append_layer(Merge(
				name=layer_config['name'],
				operator='subtract',
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes)
			))
		elif layer_class is "Dot":
			pmml._append_layer(Merge(
				name=layer_config['name'],
				operator='dot',
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes)
			))
		elif layer_class is "Concatenate":
			pmml._append_layer(Merge(
				name=layer_config['name'],
				axis=layer_config['axis'],
				operator='concatenate',
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes)
			))
		elif layer_class is "Activation":
			pmml._append_layer(Activation(
				name=layer_config['name'],
				activation=layer_config['activation'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		elif layer_class is "ReLU":
			pmml._append_layer(Activation(
				name=layer_config['name'],
				activation='relu',
				threshold = layer_config['threshold'],
				max_value = layer_config['max_value'],
				negative_slope = layer_config['negative_slope'],
				inbound_nodes=get_inbound_nodes(layer_inbound_nodes),
			))
		# Unknown layer
		else:
			raise ValueError("Unknown layer type:",layer_class)
	return pmml
예제 #18
0
if __name__ == "__main__":
    data_dir = '/home/mmay/data/mnist'
    trX, _, teX, _ = load_mnist(data_dir)

    augmenter = SaltAndPepper(low=0.,high=1.,p_corrupt=0.5)

    bce = T.nnet.binary_crossentropy
    # Factor out trainer
    # Generalize to multiple layers
    n_vis=784
    n_hidden=2000
    batch_size = 128
    activation = T.nnet.sigmoid
    layers = [
        InputLayer(n_vis,batch_size=batch_size,augmenter=augmenter),
        HiddenLayer(n_hidden, activation),
        HiddenLayer(n_vis, activation)
    ]


    lr_scheduler = ExponentialDecay(value=0.1, decay=0.99)
    trainer = Momentum(lr=lr_scheduler, m=0.9)

    model = AutoEncoder(n_vis=n_vis, layers=layers, trainer=trainer, loss=bce, batch_size=batch_size, n_batches=32, n_epochs=100, lr_decay=0.99)
    model.fit(trX, teX)

    w1 = model.layers[1].W.get_value().T
    w2 = model.layers[2].W.get_value()
    pred = model.predict(teX)
예제 #19
0
    def addInputLayer(self):
        assert len(self.layers) == 0

        self.layers.append(InputLayer(self.data_placeholder))
예제 #20
0
import numpy as np

import dataset as ds
from neural_networks import NeuralNetwork
from layers import InputLayer, OutputLayer, DenseLayer
import matplotlib.pyplot as plt

data = ds.MonksDataset()

my_model = NeuralNetwork()
my_model.add(InputLayer(17))
my_model.add(DenseLayer(10, fanin=17, activation="sigmoid"))
my_model.add(OutputLayer(1, fanin=10, activation="sigmoid"))

my_model.compile(122, 600, 0.075, None, 0.0001, 0, "mean_squared_error")

(loss, test_loss, accuracy, test_accuracy) = my_model.fit_monks(
    data.train_data_patterns, data.train_data_targets, data.test_data_patterns,
    data.test_data_targets)

print("Loss: {}".format(loss[-1]))
print("Test Loss: {}".format(test_loss[-1]))

print("Accuracy: {}".format(accuracy[-1]))
print("Test accuracy: {}".format(test_accuracy[-1]))

plot1 = plt.figure(1)
plt.plot(loss)
plt.plot(test_loss, "--")

plot2 = plt.figure(2)
예제 #21
0
        def recurrence(x_curr, prev_s_tensor, prev_in_gate_tensor):
            # Scan function cannot use compiled function.
            input_ = InputLayer(input_shape, x_curr)
            conv1a_ = ConvLayer(input_, (n_convfilter[0], 7, 7),
                                params=conv1a.params)
            rect1a_ = LeakyReLU(conv1a_)
            conv1b_ = ConvLayer(rect1a_, (n_convfilter[0], 3, 3),
                                params=conv1b.params)
            rect1_ = LeakyReLU(conv1b_)
            pool1_ = PoolLayer(rect1_)

            conv2a_ = ConvLayer(pool1_, (n_convfilter[1], 3, 3),
                                params=conv2a.params)
            rect2a_ = LeakyReLU(conv2a_)
            conv2b_ = ConvLayer(rect2a_, (n_convfilter[1], 3, 3),
                                params=conv2b.params)
            rect2_ = LeakyReLU(conv2b_)
            conv2c_ = ConvLayer(pool1_, (n_convfilter[1], 1, 1),
                                params=conv2c.params)
            res2_ = AddLayer(conv2c_, rect2_)
            pool2_ = PoolLayer(res2_)

            conv3a_ = ConvLayer(pool2_, (n_convfilter[2], 3, 3),
                                params=conv3a.params)
            rect3a_ = LeakyReLU(conv3a_)
            conv3b_ = ConvLayer(rect3a_, (n_convfilter[2], 3, 3),
                                params=conv3b.params)
            rect3_ = LeakyReLU(conv3b_)
            conv3c_ = ConvLayer(pool2_, (n_convfilter[2], 1, 1),
                                params=conv3c.params)
            res3_ = AddLayer(conv3c_, rect3_)
            pool3_ = PoolLayer(res3_)

            conv4a_ = ConvLayer(pool3_, (n_convfilter[3], 3, 3),
                                params=conv4a.params)
            rect4a_ = LeakyReLU(conv4a_)
            conv4b_ = ConvLayer(rect4a_, (n_convfilter[3], 3, 3),
                                params=conv4b.params)
            rect4_ = LeakyReLU(conv4b_)
            pool4_ = PoolLayer(rect4_)

            conv5a_ = ConvLayer(pool4_, (n_convfilter[4], 3, 3),
                                params=conv5a.params)
            rect5a_ = LeakyReLU(conv5a_)
            conv5b_ = ConvLayer(rect5a_, (n_convfilter[4], 3, 3),
                                params=conv5b.params)
            rect5_ = LeakyReLU(conv5b_)
            conv5c_ = ConvLayer(pool4_, (n_convfilter[4], 1, 1),
                                params=conv5c.params)
            res5_ = AddLayer(conv5c_, rect5_)
            pool5_ = PoolLayer(res5_)

            conv6a_ = ConvLayer(pool5_, (n_convfilter[5], 3, 3),
                                params=conv6a.params)
            rect6a_ = LeakyReLU(conv6a_)
            conv6b_ = ConvLayer(rect6a_, (n_convfilter[5], 3, 3),
                                params=conv6b.params)
            rect6_ = LeakyReLU(conv6b_)
            res6_ = AddLayer(pool5_, rect6_)
            pool6_ = PoolLayer(res6_)

            flat6_ = FlattenLayer(pool6_)
            fc7_ = TensorProductLayer(flat6_,
                                      n_fc_filters[0],
                                      params=fc7.params)
            rect7_ = LeakyReLU(fc7_)

            prev_s_ = InputLayer(s_shape_1d, prev_s_tensor)
            #print(self.prev_s_._output_shape)

            t_x_s_update_ = FCConv1DLayer(prev_s_,
                                          rect7_,
                                          n_fc_filters[0],
                                          params=self.t_x_s_update.params,
                                          isTrainable=True)

            t_x_s_reset_ = FCConv1DLayer(prev_s_,
                                         rect7_,
                                         n_fc_filters[0],
                                         params=self.t_x_s_reset.params,
                                         isTrainable=True)

            update_gate_ = SigmoidLayer(t_x_s_update_)
            comp_update_gate_ = ComplementLayer(update_gate_)
            reset_gate_ = SigmoidLayer(t_x_s_reset_)

            rs_ = EltwiseMultiplyLayer(reset_gate_, prev_s_)
            t_x_rs_ = FCConv1DLayer(rs_,
                                    rect7_,
                                    n_fc_filters[0],
                                    params=self.t_x_rs.params,
                                    isTrainable=True)

            tanh_t_x_rs_ = TanhLayer(t_x_rs_)

            gru_out_ = AddLayer(
                EltwiseMultiplyLayer(update_gate_, prev_s_),
                EltwiseMultiplyLayer(comp_update_gate_, tanh_t_x_rs_))

            return gru_out_.output, update_gate_.output
예제 #22
0
    def __init__(self, x, input_shape):
        n_convfilter = [16, 32, 64, 64, 64, 64]
        n_fc_filters = [1024]
        n_deconvfilter = [64, 64, 64, 16, 8, 2]

        self.x = x
        # To define weights, define the network structure first
        x_ = InputLayer(input_shape)
        conv1a = ConvLayer(x_, (n_convfilter[0], 7, 7))
        conv1b = ConvLayer(conv1a, (n_convfilter[0], 3, 3))
        pool1 = PoolLayer(conv1b)

        print(
            'Conv1a = ConvLayer(x, (%s, 7, 7) => input_shape %s,  output_shape %s)'
            % (n_convfilter[0], conv1a._input_shape, conv1a._output_shape))
        print(
            'Conv1b = ConvLayer(x, (%s, 3, 3) => input_shape %s,  output_shape %s)'
            % (n_convfilter[0], conv1b._input_shape, conv1b._output_shape))
        print('pool1 => input_shape %s,  output_shape %s)' %
              (pool1._input_shape, pool1._output_shape))

        conv2a = ConvLayer(pool1, (n_convfilter[1], 3, 3))
        conv2b = ConvLayer(conv2a, (n_convfilter[1], 3, 3))
        conv2c = ConvLayer(pool1, (n_convfilter[1], 1, 1))
        pool2 = PoolLayer(conv2c)

        print(
            'Conv2a = ConvLayer(x, (%s, 3, 3) => input_shape %s,  output_shape %s)'
            % (n_convfilter[1], conv2a._input_shape, conv2a._output_shape))
        print(
            'Conv2b = ConvLayer(x, (%s, 3, 3) => input_shape %s,  output_shape %s)'
            % (n_convfilter[1], conv2b._input_shape, conv2b._output_shape))
        conv3a = ConvLayer(pool2, (n_convfilter[2], 3, 3))
        conv3b = ConvLayer(conv3a, (n_convfilter[2], 3, 3))
        conv3c = ConvLayer(pool2, (n_convfilter[2], 1, 1))
        pool3 = PoolLayer(conv3b)

        print(
            'Conv3a = ConvLayer(x, (%s, 3, 3) => input_shape %s,  output_shape %s)'
            % (n_convfilter[2], conv3a._input_shape, conv3a._output_shape))
        print(
            'Conv3b = ConvLayer(x, (%s, 3, 3) => input_shape %s,  output_shape %s)'
            % (n_convfilter[2], conv3b._input_shape, conv3b._output_shape))
        print(
            'Conv3c = ConvLayer(x, (%s, 1, 1) => input_shape %s,  output_shape %s)'
            % (n_convfilter[1], conv3c._input_shape, conv3c._output_shape))
        print('pool3 => input_shape %s,  output_shape %s)' %
              (pool3._input_shape, pool3._output_shape))

        conv4a = ConvLayer(pool3, (n_convfilter[3], 3, 3))
        conv4b = ConvLayer(conv4a, (n_convfilter[3], 3, 3))
        pool4 = PoolLayer(conv4b)

        conv5a = ConvLayer(pool4, (n_convfilter[4], 3, 3))
        conv5b = ConvLayer(conv5a, (n_convfilter[4], 3, 3))
        conv5c = ConvLayer(pool4, (n_convfilter[4], 1, 1))
        pool5 = PoolLayer(conv5b)

        conv6a = ConvLayer(pool5, (n_convfilter[5], 3, 3))
        conv6b = ConvLayer(conv6a, (n_convfilter[5], 3, 3))
        pool6 = PoolLayer(conv6b)

        print(
            'Conv6a = ConvLayer(x, (%s, 3, 3) => input_shape %s,  output_shape %s)'
            % (n_convfilter[5], conv6a._input_shape, conv6a._output_shape))
        print(
            'Conv6b = ConvLayer(x, (%s, 3, 3) => input_shape %s,  output_shape %s)'
            % (n_convfilter[5], conv6b._input_shape, conv6b._output_shape))
        print('pool6 => input_shape %s,  output_shape %s)' %
              (pool6._input_shape, pool6._output_shape))

        flat6 = FlattenLayer(pool6)
        print('flat6 => input_shape %s,  output_shape %s)' %
              (flat6._input_shape, flat6._output_shape))

        fc7 = TensorProductLayer(flat6, n_fc_filters[0])
        print('fc7 => input_shape %s,  output_shape %s)' %
              (fc7._input_shape, fc7._output_shape))

        # Set the size to be 64x4x4x4
        #s_shape_1d = (cfg.batch, n_deconvfilter[0])
        s_shape_1d = (cfg.batch, n_fc_filters[0])
        self.prev_s = InputLayer(s_shape_1d)
        #view_features_shape = (cfg.batch, n_fc_filters[0], cfg.CONST.N_VIEWS)

        self.t_x_s_update = FCConv1DLayer(self.prev_s,
                                          fc7,
                                          n_fc_filters[0],
                                          isTrainable=True)

        self.t_x_s_reset = FCConv1DLayer(self.prev_s,
                                         fc7,
                                         n_fc_filters[0],
                                         isTrainable=True)

        self.reset_gate = SigmoidLayer(self.t_x_s_reset)

        self.rs = EltwiseMultiplyLayer(self.reset_gate, prev_s)
        self.t_x_rs = FCConv1DLayer(self.rs,
                                    fc7,
                                    n_fc_filters[0],
                                    isTrainable=True)

        def recurrence(x_curr, prev_s_tensor, prev_in_gate_tensor):
            # Scan function cannot use compiled function.
            input_ = InputLayer(input_shape, x_curr)
            conv1a_ = ConvLayer(input_, (n_convfilter[0], 7, 7),
                                params=conv1a.params)
            rect1a_ = LeakyReLU(conv1a_)
            conv1b_ = ConvLayer(rect1a_, (n_convfilter[0], 3, 3),
                                params=conv1b.params)
            rect1_ = LeakyReLU(conv1b_)
            pool1_ = PoolLayer(rect1_)

            conv2a_ = ConvLayer(pool1_, (n_convfilter[1], 3, 3),
                                params=conv2a.params)
            rect2a_ = LeakyReLU(conv2a_)
            conv2b_ = ConvLayer(rect2a_, (n_convfilter[1], 3, 3),
                                params=conv2b.params)
            rect2_ = LeakyReLU(conv2b_)
            conv2c_ = ConvLayer(pool1_, (n_convfilter[1], 1, 1),
                                params=conv2c.params)
            res2_ = AddLayer(conv2c_, rect2_)
            pool2_ = PoolLayer(res2_)

            conv3a_ = ConvLayer(pool2_, (n_convfilter[2], 3, 3),
                                params=conv3a.params)
            rect3a_ = LeakyReLU(conv3a_)
            conv3b_ = ConvLayer(rect3a_, (n_convfilter[2], 3, 3),
                                params=conv3b.params)
            rect3_ = LeakyReLU(conv3b_)
            conv3c_ = ConvLayer(pool2_, (n_convfilter[2], 1, 1),
                                params=conv3c.params)
            res3_ = AddLayer(conv3c_, rect3_)
            pool3_ = PoolLayer(res3_)

            conv4a_ = ConvLayer(pool3_, (n_convfilter[3], 3, 3),
                                params=conv4a.params)
            rect4a_ = LeakyReLU(conv4a_)
            conv4b_ = ConvLayer(rect4a_, (n_convfilter[3], 3, 3),
                                params=conv4b.params)
            rect4_ = LeakyReLU(conv4b_)
            pool4_ = PoolLayer(rect4_)

            conv5a_ = ConvLayer(pool4_, (n_convfilter[4], 3, 3),
                                params=conv5a.params)
            rect5a_ = LeakyReLU(conv5a_)
            conv5b_ = ConvLayer(rect5a_, (n_convfilter[4], 3, 3),
                                params=conv5b.params)
            rect5_ = LeakyReLU(conv5b_)
            conv5c_ = ConvLayer(pool4_, (n_convfilter[4], 1, 1),
                                params=conv5c.params)
            res5_ = AddLayer(conv5c_, rect5_)
            pool5_ = PoolLayer(res5_)

            conv6a_ = ConvLayer(pool5_, (n_convfilter[5], 3, 3),
                                params=conv6a.params)
            rect6a_ = LeakyReLU(conv6a_)
            conv6b_ = ConvLayer(rect6a_, (n_convfilter[5], 3, 3),
                                params=conv6b.params)
            rect6_ = LeakyReLU(conv6b_)
            res6_ = AddLayer(pool5_, rect6_)
            pool6_ = PoolLayer(res6_)

            flat6_ = FlattenLayer(pool6_)
            fc7_ = TensorProductLayer(flat6_,
                                      n_fc_filters[0],
                                      params=fc7.params)
            rect7_ = LeakyReLU(fc7_)

            prev_s_ = InputLayer(s_shape_1d, prev_s_tensor)
            #print(self.prev_s_._output_shape)

            t_x_s_update_ = FCConv1DLayer(prev_s_,
                                          rect7_,
                                          n_fc_filters[0],
                                          params=self.t_x_s_update.params,
                                          isTrainable=True)

            t_x_s_reset_ = FCConv1DLayer(prev_s_,
                                         rect7_,
                                         n_fc_filters[0],
                                         params=self.t_x_s_reset.params,
                                         isTrainable=True)

            update_gate_ = SigmoidLayer(t_x_s_update_)
            comp_update_gate_ = ComplementLayer(update_gate_)
            reset_gate_ = SigmoidLayer(t_x_s_reset_)

            rs_ = EltwiseMultiplyLayer(reset_gate_, prev_s_)
            t_x_rs_ = FCConv1DLayer(rs_,
                                    rect7_,
                                    n_fc_filters[0],
                                    params=self.t_x_rs.params,
                                    isTrainable=True)

            tanh_t_x_rs_ = TanhLayer(t_x_rs_)

            gru_out_ = AddLayer(
                EltwiseMultiplyLayer(update_gate_, prev_s_),
                EltwiseMultiplyLayer(comp_update_gate_, tanh_t_x_rs_))

            return gru_out_.output, update_gate_.output

        time_features, _ = theano.scan(
            recurrence,
            sequences=[
                self.x
            ],  # along with images, feed in the index of the current frame
            outputs_info=[
                tensor.zeros_like(np.zeros(s_shape_1d),
                                  dtype=theano.config.floatX),
                tensor.zeros_like(np.zeros(s_shape_1d),
                                  dtype=theano.config.floatX)
            ])
        time_all = time_features[0]
        time_last = time_all[-1]

        self.features = time_last
예제 #23
0
    def network_definition(self):

        # (multi_views, time, self.batch_size, 3, self.img_h, self.img_w),
        self.x = tensor6()
        self.is_x_tensor4 = False

        img_w = self.img_w
        img_h = self.img_h
        n_gru_vox = 4
        # n_vox = self.n_vox

        n_convfilter = [16, 32, 64, 64, 64, 64]
        n_fc_filters = [1024]
        n_deconvfilter = [64, 64, 64, 16, 8, 2]

        # Set the size to be 64x4x4x4
        s_shape = (self.batch_size, n_gru_vox, n_deconvfilter[0], n_gru_vox,
                   n_gru_vox)
        # Dummy 3D grid hidden representations
        prev_s = InputLayer(s_shape)

        input_shape = (self.batch_size, 3, img_w, img_h)

        s_shape_1d = (
            cfg.batch,
            n_fc_filters[0],
        )

        lstm1d_all = []

        def get_viewfeats(x_curr):
            lstm1d_all.append(LSTM1D(x_curr, input_shape))
            params_temp = get_trainable_params()
            self.params_lst.append(len(params_temp))
            '''
            count = 0
            for p in params:
                count += 1
            self.param_count
            print('num of params %d' %count)
            '''
            return lstm1d_all[-1].feat()

        view_features_shape = (self.batch_size, n_fc_filters[0])

        view_features, _ = theano.scan(get_viewfeats, sequences=[self.x])
        self.view_features = view_features

        fc7 = InputLayer(view_features_shape)
        t_x_s_update = FCConv3DLayer(
            prev_s,
            fc7, (n_deconvfilter[0], n_deconvfilter[0], 3, 3, 3),
            isTrainable=True)
        t_x_s_reset = FCConv3DLayer(
            prev_s,
            fc7, (n_deconvfilter[0], n_deconvfilter[0], 3, 3, 3),
            isTrainable=True)

        rll = time_features[0]
        time_last = time_all[-1]

        reset_gate = SigmoidLayer(t_x_s_reset)

        rs = EltwiseMultiplyLayer(reset_gate, prev_s)
        t_x_rs = FCConv3DLayer(rs,
                               fc7,
                               (n_deconvfilter[0], n_deconvfilter[0], 3, 3, 3),
                               isTrainable=True)

        def view_rec_test(x_curr, prev_s_tensor, prev_in_gate_tensor):
            count = 0
            params = get_trainable_params()
            for p in params:
                count += 1
            print('view rec test : num of params %d' % count)

            rect8_ = InputLayer(view_features_shape, x_curr)
            prev_s_ = InputLayer(s_shape, prev_s_tensor)

            t_x_s_update_ = FCConv3DLayer(
                prev_s_,
                rect8_, (n_deconvfilter[0], n_deconvfilter[0], 3, 3, 3),
                params=t_x_s_update.params,
                isTrainable=True)

            t_x_s_reset_ = FCConv3DLayer(
                prev_s_,
                rect8_, (n_deconvfilter[0], n_deconvfilter[0], 3, 3, 3),
                params=t_x_s_reset.params,
                isTrainable=True)

            update_gate_ = SigmoidLayer(t_x_s_update_)
            comp_update_gate_ = ComplementLayer(update_gate_)
            reset_gate_ = SigmoidLayer(t_x_s_reset_)

            rs_ = EltwiseMultiplyLayer(reset_gate_, prev_s_)
            t_x_rs_ = FCConv3DLayer(
                rs_,
                rect8_, (n_deconvfilter[0], n_deconvfilter[0], 3, 3, 3),
                params=t_x_rs.params,
                isTrainable=True)

            tanh_t_x_rs_ = TanhLayer(t_x_rs_)

            gru_out_ = AddLayer(
                EltwiseMultiplyLayer(update_gate_, prev_s_),
                EltwiseMultiplyLayer(comp_update_gate_, tanh_t_x_rs_))

            return gru_out_.output, update_gate_.output

        s_update, _ = theano.scan(
            view_rec_test,
            sequences=[
                view_features
            ],  # along with images, feed in the index of the current frame
            outputs_info=[
                tensor.zeros_like(np.zeros(s_shape),
                                  dtype=theano.config.floatX),
                tensor.zeros_like(np.zeros(s_shape),
                                  dtype=theano.config.floatX)
            ])

        update_all = s_update[-1]
        s_all = s_update[0]
        s_last = s_all[-1]

        #s_last = np.random.rand(self.batch_size, n_gru_vox, n_deconvfilter[0], n_gru_vox, n_gru_vox)
        self.gru_s = InputLayer(s_shape, s_last)

        unpool7 = Unpool3DLayer(self.gru_s)
        self.conv7a = Conv3DLayer(unpool7, (n_deconvfilter[1], 3, 3, 3))
        self.rect7a = LeakyReLU(self.conv7a)
        self.conv7b = Conv3DLayer(self.rect7a, (n_deconvfilter[1], 3, 3, 3))
        self.rect7 = LeakyReLU(self.conv7b)
        self.res7 = AddLayer(unpool7, self.rect7)

        print('unpool7 => input_shape %s,  output_shape %s)' %
              (unpool7._input_shape, unpool7._output_shape))

        unpool8 = Unpool3DLayer(self.res7)
        conv8a = Conv3DLayer(unpool8, (n_deconvfilter[2], 3, 3, 3))
        rect8a = LeakyReLU(conv8a)
        self.conv8b = Conv3DLayer(rect8a, (n_deconvfilter[2], 3, 3, 3))
        self.rect8 = LeakyReLU(self.conv8b)
        self.res8 = AddLayer(unpool8, self.rect8)

        print('unpool8 => input_shape %s,  output_shape %s)' %
              (unpool8._input_shape, unpool8._output_shape))

        unpool12 = Unpool3DLayer(self.res8)
        conv12a = Conv3DLayer(unpool12, (n_deconvfilter[2], 3, 3, 3))
        rect12a = LeakyReLU(conv12a)
        self.conv12b = Conv3DLayer(rect12a, (n_deconvfilter[2], 3, 3, 3))
        self.rect12 = LeakyReLU(self.conv12b)
        self.res12 = AddLayer(unpool12, self.rect12)

        print('unpool12 => input_shape %s,  output_shape %s)' %
              (unpool12._input_shape, unpool12._output_shape))

        unpool9 = Unpool3DLayer(self.res12)
        self.conv9a = Conv3DLayer(unpool9, (n_deconvfilter[3], 3, 3, 3))
        self.rect9a = LeakyReLU(self.conv9a)
        self.conv9b = Conv3DLayer(self.rect9a, (n_deconvfilter[3], 3, 3, 3))
        self.rect9 = LeakyReLU(self.conv9b)
        self.conv9c = Conv3DLayer(unpool9, (n_deconvfilter[3], 1, 1, 1))
        self.res9 = AddLayer(self.conv9c, self.rect9)

        print('unpool9 => input_shape %s,  output_shape %s)' %
              (unpool9._input_shape, unpool9._output_shape))

        unpool10 = Unpool3DLayer(self.res9)
        self.conv10a = Conv3DLayer(unpool10, (n_deconvfilter[4], 3, 3, 3))
        self.rect10a = LeakyReLU(self.conv10a)
        self.conv10b = Conv3DLayer(self.rect10a, (n_deconvfilter[4], 3, 3, 3))
        self.rect10 = LeakyReLU(self.conv10b)
        self.conv10c = Conv3DLayer(self.rect10a, (n_deconvfilter[4], 3, 3, 3))
        self.res10 = AddLayer(self.conv10c, self.rect10)

        print('unpool9 => input_shape %s,  output_shape %s)' %
              (unpool10._input_shape, unpool10._output_shape))

        self.conv11 = Conv3DLayer(self.res10, (n_deconvfilter[5], 3, 3, 3))
        #self.conv11 = TanhLayer(conv11)
        print(
            'Conv11 = Conv3DLayer(x, (%s, 3, 3, 3) => input_shape %s,  output_shape %s)'
            % (n_deconvfilter[5], self.conv11._input_shape,
               self.conv11._output_shape))

        #self.conv11 = np.random.rand(cfg.batch, 128, 2, 128, 128)
        softmax_loss = SoftmaxWithLoss3D(self.conv11.output)
        self.softloss = softmax_loss
        self.loss = softmax_loss.loss(self.y)
        self.error = softmax_loss.error(self.y)
        self.params = get_trainable_params()
        self.output = softmax_loss.prediction()
        #update_all = [1,2,3]
        self.activations = [update_all]