def __init__(self, config: Config):
        # Network Parameters
        n_input = 784  # MNIST data input (img shape: 28*28)
        n_classes = 10  # MNIST total classes (0-9 digits)
        n_hidden_1 = config.n_hidden_1
        n_hidden_2 = config.n_hidden_2

        # tf Graph input
        X = tf.placeholder(tf.float32, [None, n_input], name="Features")
        y_ = tf.placeholder(tf.float32, [None, n_classes], name="Labels")
        d = tf.placeholder(tf.float32)

        #X_2d = tf.reshape(X, [-1, 28, 28, 1])

        #hidden1 = ConvLayer(X_2d, W=weight_gauss_conv2d([3, 3, 1, n_hidden_1]), name="ConvLayer1")
        #maxpool1 = MaxPoolingLayer(hidden1, name="MaxPoolingLayer1")
        #hidden2 = ConvLayer(maxpool1, W=weight_gauss_conv2d([3, 3, n_hidden_1, n_hidden_2]), name="ConvLayer2")
        #maxpool2 = MaxPoolingLayer(hidden2, name="MaxPoolingLayer2")
        #flat = tf.contrib.layers.flatten(maxpool2.get_output())

        # Hidden 1
        hidden1 = DenseLayer(X,
                             n_hidden_1,
                             name="Hidden_Layer_1",
                             a=tf.nn.sigmoid,
                             W=weight_truncated_normal,
                             b=tf.zeros)
        # Hidden 2
        hidden2 = DenseLayer(hidden1,
                             n_hidden_2,
                             name="Hidden_Layer_2",
                             a=tf.nn.sigmoid,
                             W=weight_truncated_normal,
                             b=tf.zeros)

        # Output
        out = DenseLayer(hidden2,
                         n_classes,
                         name="Output_Layer",
                         a=tf.identity,
                         W=weight_truncated_normal,
                         b=tf.zeros)

        self.X = X
        self.y_ = y_
        self.dropout = d
        self.hidden1 = hidden1
        self.hidden2 = hidden2
        self.output = out.get_output()
def fc(input, init, act, units, flatten, id):
    return DenseLayer(input,
                      units,
                      name="FC-{}".format(id),
                      a=act,
                      W=init,
                      b=tf.zeros,
                      flatten_input=flatten)
    def __init__(self, config: Config):
        # Network Parameters
        n_input = 784  # MNIST data input (img shape: 28*28)
        n_classes = 10  # MNIST total classes (0-9 digits)
        n_hidden_1 = config.n_hidden_1
        n_hidden_2 = config.n_hidden_2

        # tf Graph input
        X = tf.placeholder(tf.float32, [None, n_input], name="Features")
        y_ = tf.placeholder(tf.float32, [None, n_classes], name="Labels")
        d = tf.placeholder(tf.float32)

        # Hidden 1
        hidden1 = DenseLayer(X,
                             n_hidden_1,
                             name="Hidden_Layer_1",
                             a=tf.nn.elu,
                             W=weight_truncated_normal,
                             b=tf.zeros)
        # Hidden 2
        hidden2 = DenseLayer(hidden1,
                             n_hidden_2,
                             name="Hidden_Layer_2",
                             a=tf.nn.elu,
                             W=weight_truncated_normal,
                             b=tf.zeros)
        # Dropout
        drop2 = DropoutLayer(hidden2, prob=d)
        # Output
        out = DenseLayer(drop2,
                         n_classes,
                         name="Output_Layer",
                         a=tf.nn.sigmoid,
                         W=weight_truncated_normal,
                         b=tf.zeros)

        self.X = X
        self.y_ = y_
        self.dropout = d
        self.hidden1 = hidden1
        self.hidden2 = hidden2
        self.output = out.get_output()
Esempio n. 4
0
    def __init__(self, config: Config, dataset):
        """Simple network with LSTM layer and dense output layer; All sequence positions are fed to the LSTM layer at
        once, this is the most convenient but least flexible design; see ArchitectureLSTM_optimized for a faster
        version;
        
        Command-line usage:
        >>> python3 samples/main_lstm.py --config=samples/config_lstm.json
        
        Example input shapes: [n_samples, n_sequence_positions, n_features]
        Example output shapes: [n_samples, n_sequence_positions, n_features] (with return_states=True),
        [n_samples, 1, n_features] (with return_states=False)
        """
        #
        # Some convenience objects
        #
        # We will use a list to store all layers for regularization etc. (this is optional)
        layers = []
        # Prepare xavier initialization for weights
        w_init = tf.contrib.layers.xavier_initializer(uniform=False, seed=None, dtype=tf.float32)

        #
        # Create placeholders for input data (shape: [n_samples, n_sequence_positions, n_features])
        #
        X = tf.placeholder(tf.float32, shape=dataset.X_shape)
        y_ = tf.placeholder(tf.float32, shape=dataset.y_shape)
        n_output_units = dataset.y_shape[-1]  # nr of output features is number of classes
        
        # ----------------------------------------------------------------------------------------------------------
        # Define network architecture
        # ----------------------------------------------------------------------------------------------------------
        
        #
        # LSTM Layer
        #  We want to create an output sequence with the LSTM instead of only returning the ouput at the last sequence
        #  position -> return_states=True
        #
        print("\tLSTM...")
        lstm_layer = LSTMLayer(incoming=X, n_units=config.n_lstm, name='LSTM',
                               W_ci=w_init, W_ig=w_init, W_og=w_init, W_fg=w_init,
                               b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,
                               a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.nn.elu,
                               c_init=tf.zeros, h_init=tf.zeros, forgetgate=True, precomp_fwds=True, return_states=True)
        layers.append(lstm_layer)
        
        #
        # Output Layer
        #
        print("\tOutput layer...")
        output_layer = DenseLayer(incoming=lstm_layer, n_units=n_output_units, name='DenseLayerOut',
                                  W=w_init, b=tf.zeros, a=tf.sigmoid)
        layers.append(output_layer)
        
        #
        # Calculate output
        #
        output = output_layer.get_output(tickersteps=config.tickersteps)
        
        print("\tDone!")
        
        #
        # Publish
        #
        self.X = X
        self.y_ = y_
        self.output = output
        # Store layers in list for regularization in main file
        self.__layers = layers
Esempio n. 5
0
    def __init__(self, config: Config, dataset):
        """Architecture with LSTM layer followed by 2 dense layers and a dense output layer; The outputs of the 2 dense
        layers are used as additional recurrent connections for the LSTM; Inputs are fed to LSTM layer sequence
        position by sequence position in RNN loop; This is an advanced example, see ArchitectureLSTM to get started;
        
        Command-line usage:
        >>> python3 samples/main_lstm.py --config=samples/config_lstm3.json
        
        Example input shapes: [n_samples, n_sequence_positions, n_features]
        Example output shapes: [n_samples, n_sequence_positions, n_features] (with return_states=True),
        [n_samples, 1, n_features] (with return_states=False)
        """
        #
        # Some convenience objects
        #
        # We will use a list to store all layers for regularization etc. (this is optional)
        layers = []
        # Prepare xavier initialization for weights
        w_init = tf.contrib.layers.xavier_initializer(uniform=False, seed=None, dtype=tf.float32)

        #
        # Create placeholders for input data (shape: [n_samples, n_sequence_positions, n_features])
        #
        X = tf.placeholder(tf.float32, shape=dataset.X_shape)
        y_ = tf.placeholder(tf.float32, shape=dataset.y_shape)
        n_output_units = dataset.y_shape[-1]  # nr of output features is number of classes
        n_seq_pos = dataset.X_shape[1]  # dataset.X_shape is [sample, seq_pos, features]
        
        # ----------------------------------------------------------------------------------------------------------
        # Define network architecture
        # ----------------------------------------------------------------------------------------------------------
        
        #
        # Input Layer
        #  RNNInputLayer will hold the input to network at each sequence position. We will initalize it with zeros-
        #  tensor of shape [sample, 1, features]
        #
        input_shape = dataset.X_shape[:1] + (1,) + dataset.X_shape[2:]
        rnn_input_layer = RNNInputLayer(tf.zeros(input_shape, dtype=tf.float32))
        layers.append(rnn_input_layer)
        
        #
        # LSTM Layer
        #
        print("\tLSTM...")
        # We want to modify the number of recurrent connections -> we have to specify the shape of the recurrent weights
        rec_w_shape = (sum(config.n_dense_units) + config.n_lstm, config.n_lstm)
        # The forward weights can be initialized automatically, for the recurrent ones we will use our rec_w_shape
        lstm_w = [w_init, w_init(rec_w_shape)]
        lstm_layer = LSTMLayer(incoming=rnn_input_layer, n_units=config.n_lstm, name='LSTM',
                               W_ci=lstm_w, W_ig=lstm_w, W_og=lstm_w, W_fg=lstm_w,
                               b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,
                               a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.nn.elu,
                               c_init=tf.zeros, h_init=tf.zeros, forgetgate=True, precomp_fwds=True, return_states=True)
        layers.append(lstm_layer)
        
        #
        # Dense Layers
        #
        print("\tDense layers...")
        dense_layers = list()
        for n_units in config.n_dense_units:
            dense_layers.append(DenseLayer(incoming=layers[-1], n_units=n_units, name='DenseLayer', W=w_init,
                                           b=tf.zeros, a=tf.nn.elu))
            layers.append(layers[-1])
        
        #
        # Use dense layers as additional recurrent input to LSTM
        #
        full_lstm_input = ConcatLayer([lstm_layer] + dense_layers, name='LSTMRecurrence')
        lstm_layer.add_external_recurrence(full_lstm_input)
        
        #
        # Output Layer
        #
        print("\tOutput layer...")
        output_layer = DenseLayer(incoming=dense_layers[-1], n_units=n_output_units, name='DenseLayerOut', W=w_init,
                                  b=tf.zeros, a=tf.sigmoid)
        layers.append(output_layer)
        
        # ----------------------------------------------------------------------------------------------------------
        # Loop through sequence positions and create graph
        # ----------------------------------------------------------------------------------------------------------
        
        #
        # Loop through sequence positions
        #
        print("\tRNN Loop...")
        for seq_pos in range(n_seq_pos):
            with tf.name_scope("Sequence_pos_{}".format(seq_pos)):
                print("\t  seq. pos. {}...".format(seq_pos))
                # Set rnn input layer to input at current sequence position
                layers[0].update(X[:, seq_pos:seq_pos + 1, :])
                
                # Calculate new lstm state (this automatically computes all dependencies, including rec. connections)
                _ = lstm_layer.get_output()
        
        #
        # Loop through tickersteps
        #
        # Use zeros as input during ticker steps
        tickerstep_input = tf.zeros(dataset.X_shape[:1] + (1,) + dataset.X_shape[2:], dtype=tf.float32,
                                    name="tickerstep_input")
        
        for tickerstep in range(config.tickersteps):
            with tf.name_scope("Tickerstep_{}".format(tickerstep)):
                print("\t  tickerstep {}...".format(tickerstep))
                # Set rnn input layer to input at current sequence position
                layers[0].update(tickerstep_input)

                # Calculate new lstm state (this automatically computes all dependencies, including rec. connections)
                _ = lstm_layer.get_output(tickerstep_nodes=True)
        
        #
        # Calculate output but consider that the lstm_layer is already computed
        #
        output = output_layer.get_output(prev_layers=[lstm_layer])
        
        print("\tDone!")
        
        #
        # Publish
        #
        self.X = X
        self.y_ = y_
        self.output = output
        # Store layers in list for regularization in main file
        self.__layers = layers
Esempio n. 6
0
 def __init__(self, config: Config, dataset):
     """Simple network with dense layer and dense output layer;
     
     Command-line usage:
     >>> python3 samples/main_lstm.py --config=samples/config_dense.json
     
     Example input shapes: [n_samples, n_features]
     Example output shapes: [n_samples, n_features]
     """
     #
     # Some convenience objects
     #
     # We will use a list to store all layers for regularization etc. (this is optional)
     layers = []
     # Prepare xavier initialization for weights
     w_init = tf.contrib.layers.xavier_initializer(uniform=False, seed=None, dtype=tf.float32)
     
     #
     # Create placeholders for input data (shape: [n_samples, n_features])
     #
     X = tf.placeholder(tf.float32, shape=dataset.X_shape)
     y_ = tf.placeholder(tf.float32, shape=dataset.y_shape)
     n_output_units = dataset.y_shape[-1]  # nr of output features is number of classes
     
     # ----------------------------------------------------------------------------------------------------------
     # Define network architecture
     # ----------------------------------------------------------------------------------------------------------
     
     #
     # Dense Layer
     #  Input for the dense layer shall be X (TeLL layers take tensors or TeLL Layer instances as input)
     #
     print("\tDense layer...")
     
     dense_layer = DenseLayer(incoming=X, n_units=config.n_dense, name='DenseLayer', W=w_init, b=tf.zeros,
                              a=tf.nn.elu)
     layers.append(dense_layer)
     
     #
     # Output Layer
     #
     print("\tOutput layer...")
     output_layer = DenseLayer(incoming=dense_layer, n_units=n_output_units, name='DenseLayerOut', W=w_init,
                               b=tf.zeros, a=tf.sigmoid)
     layers.append(output_layer)
     
     #
     # Calculate output
     #  This will calculate the output of output_layer, including all dependencies
     #
     output = output_layer.get_output()
     
     print("\tDone!")
     
     #
     # Publish
     #
     self.X = X
     self.y_ = y_
     self.output = output
     # Store layers in list for regularization in main file
     self.__layers = layers
Esempio n. 7
0
    def __init__(self, config: Config, dataset):
        """Architecture with LSTM layer followed by dense output layer; Inputs are fed to LSTM layer sequence position
        by sequence position in a for-loop; this is the most flexible design, as showed e.g. in ArchitectureLSTM3;
        
        Command-line usage:
        Change entry
        "architecture": "sample_architectures.ArchitectureLSTM"
        to
        "architecture": "sample_architectures.ArchitectureLSTMFlexible" in samples/config_lstm.json. Then run
        >>> python3 samples/main_lstm.py --config=samples/config_lstm.json
        
        Example input shapes: [n_samples, n_sequence_positions, n_features]
        Example output shapes: [n_samples, n_sequence_positions, n_features] (with return_states=True),
        [n_samples, 1, n_features] (with return_states=False)
        """
        #
        # Some convenience objects
        #
        # We will use a list to store all layers for regularization etc. (this is optional)
        layers = []
        # Prepare xavier initialization for weights
        w_init = tf.contrib.layers.xavier_initializer(uniform=False, seed=None, dtype=tf.float32)

        #
        # Create placeholders for input data (shape: [n_samples, n_sequence_positions, n_features])
        #
        X = tf.placeholder(tf.float32, shape=dataset.X_shape)
        y_ = tf.placeholder(tf.float32, shape=dataset.y_shape)
        n_output_units = dataset.y_shape[-1]  # nr of output features is number of classes
        n_seq_pos = dataset.X_shape[1]  # dataset.X_shape is [sample, seq_pos, features]
        
        # ----------------------------------------------------------------------------------------------------------
        # Define network architecture
        # ----------------------------------------------------------------------------------------------------------
        
        #
        # Input Layer
        #  RNNInputLayer will hold the input to network at each sequence position. We will initalize it with zeros-
        #  tensor of shape [sample, 1, features]
        #
        input_shape = dataset.X_shape[:1] + (1,) + dataset.X_shape[2:]
        rnn_input_layer = RNNInputLayer(tf.zeros(input_shape, dtype=tf.float32))
        layers.append(rnn_input_layer)
        
        #
        # LSTM Layer
        #
        print("\tLSTM...")
        lstm_layer = LSTMLayer(incoming=rnn_input_layer, n_units=config.n_lstm, name='LSTM',
                               W_ci=w_init, W_ig=w_init, W_og=w_init, W_fg=w_init,
                               b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,
                               a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.nn.elu,
                               c_init=tf.zeros, h_init=tf.zeros, forgetgate=True, precomp_fwds=True, return_states=True)
        layers.append(lstm_layer)

        #
        # Output Layer
        #
        print("\tOutput layer...")
        output_layer = DenseLayer(incoming=lstm_layer, n_units=n_output_units, name='DenseLayerOut',
                                  W=w_init, b=tf.zeros, a=tf.sigmoid)
        layers.append(output_layer)
        
        # ----------------------------------------------------------------------------------------------------------
        # Loop through sequence positions and create graph
        # ----------------------------------------------------------------------------------------------------------
        
        #
        # Loop through sequence positions
        #
        print("\tRNN Loop...")
        for seq_pos in range(n_seq_pos):
            with tf.name_scope("Sequence_pos_{}".format(seq_pos)):
                print("\t  seq. pos. {}...".format(seq_pos))
                
                # Set rnn input layer to input at current sequence position
                rnn_input_layer.update(X[:, seq_pos:seq_pos + 1, :])

                # Calculate new network state at new frame (this updates the network's hidden activations, cell states,
                # and dependencies automatically)
                _ = lstm_layer.get_output()
        
        #
        # Loop through tickersteps
        #
        # Use zero input during ticker steps
        tickerstep_input = tf.zeros(dataset.X_shape[:1] + (1,) + dataset.X_shape[2:], dtype=tf.float32,
                                    name="tickerstep_input")
        
        for tickerstep in range(config.tickersteps):
            with tf.name_scope("Tickerstep_{}".format(tickerstep)):
                print("\t  tickerstep {}...".format(tickerstep))

                # Set rnn input layer to tickerstep input
                rnn_input_layer.update(tickerstep_input)

                # Calculate new network state at new frame (this updates the network's hidden activations, cell states,
                # and dependencies automatically)
                _ = lstm_layer.get_output(tickerstep_nodes=True)
        
        #
        # Calculate output but consider that the lstm_layer is already computed (i.e. do not modify cell states any
        # further)
        #
        output = output_layer.get_output(prev_layers=[lstm_layer])
        
        print("\tDone!")
        
        #
        # Publish
        #
        self.X = X
        self.y_ = y_
        self.output = output
        # Store layers in list for regularization in main file
        self.__layers = layers
Esempio n. 8
0
    def __init__(self, config: Config, dataset):
        """Architecture with LSTM layer followed by dense output layer; Inputs are fed to LSTM layer sequence position
        by sequence position in tensorflow tf.while_loop();
        This is not as flexible as using a for-loop and more difficult to use but can be faster and optimized
        differently; LSTM return_states is not possible here unless manually implemented into the tf.while_loop (that is
        why we are only using the prediction at the last sequence position in this example);
        This is an advanced example, see ArchitectureLSTM to get started;
        
        Command-line usage:
        Change entry
        "architecture": "sample_architectures.ArchitectureLSTM"
        to
        "architecture": "sample_architectures.ArchitectureLSTMOptimized" in samples/config_lstm.json. Then run
        >>> python3 samples/main_lstm.py --config=samples/config_lstm.json
        
        Example input shapes: [n_samples, n_sequence_positions, n_features]
        Example output shapes: [n_samples, 1, n_features]
        """
        #
        # Some convenience objects
        #
        # We will use a list to store all layers for regularization etc. (this is optional)
        layers = []
        # Prepare xavier initialization for weights
        w_init = tf.contrib.layers.xavier_initializer(uniform=False,
                                                      seed=None,
                                                      dtype=tf.float32)

        #
        # Create placeholders for input data (shape: [n_samples, n_sequence_positions, n_features])
        #
        X = tf.placeholder(tf.float32, shape=dataset.X_shape)
        y_ = tf.placeholder(tf.float32, shape=dataset.y_shape)
        n_output_units = dataset.y_shape[
            -1]  # nr of output features is number of classes
        n_seq_pos = dataset.X_shape[
            1]  # dataset.X_shape is [sample, seq_pos, features]

        # ----------------------------------------------------------------------------------------------------------
        # Define network architecture
        # ----------------------------------------------------------------------------------------------------------

        #
        # Input Layer
        #  RNNInputLayer will hold the input to network at each sequence position. We will initalize it with zeros-
        #  tensor of shape [sample, 1, features]
        #
        input_shape = dataset.X_shape[:1] + (1, ) + dataset.X_shape[2:]
        rnn_input_layer = RNNInputLayer(tf.zeros(input_shape,
                                                 dtype=tf.float32))
        layers.append(rnn_input_layer)

        #
        # LSTM Layer
        #
        print("\tLSTM...")
        lstm_layer = LSTMLayer(incoming=rnn_input_layer,
                               n_units=config.n_lstm,
                               name='LSTM',
                               W_ci=w_init,
                               W_ig=w_init,
                               W_og=w_init,
                               W_fg=w_init,
                               b_ci=tf.zeros,
                               b_ig=tf.zeros,
                               b_og=tf.zeros,
                               b_fg=tf.zeros,
                               a_ci=tf.tanh,
                               a_ig=tf.sigmoid,
                               a_og=tf.sigmoid,
                               a_fg=tf.sigmoid,
                               a_out=tf.nn.elu,
                               c_init=tf.zeros,
                               h_init=tf.zeros,
                               forgetgate=True,
                               precomp_fwds=True)
        layers.append(lstm_layer)

        #
        # Output Layer
        #
        print("\tOutput layer...")
        output_layer = DenseLayer(incoming=lstm_layer,
                                  n_units=n_output_units,
                                  name='DenseLayerOut',
                                  W=w_init,
                                  b=tf.zeros,
                                  a=tf.sigmoid)
        layers.append(output_layer)

        # ----------------------------------------------------------------------------------------------------------
        # Loop through sequence positions and create graph
        # ----------------------------------------------------------------------------------------------------------

        #
        # Loop through sequence positions
        #
        if n_seq_pos:

            def cond(seq_pos, *args):
                return seq_pos < n_seq_pos

            def body(seq_pos, lstm_h, lstm_c):
                # Set rnn input layer to input at current sequence position
                rnn_input_layer.update(X[:, seq_pos:seq_pos + 1, :])

                # Update lstm states
                lstm_layer.h[-1], lstm_layer.c[-1] = lstm_h, lstm_c

                # Calculate new network state at new frame (this updates the network's hidden activations, cell states,
                # and dependencies automatically)
                _ = lstm_layer.get_output()

                seq_pos = tf.add(seq_pos, 1)

                return seq_pos, lstm_layer.h[-1], lstm_layer.c[-1]

            with tf.name_scope("Sequence_pos"):
                print("\t  seq. pos. ...")
                wl_ret = tf.while_loop(cond=cond,
                                       body=body,
                                       loop_vars=(tf.constant(0),
                                                  lstm_layer.h[-1],
                                                  lstm_layer.c[-1]),
                                       parallel_iterations=10,
                                       back_prop=True,
                                       swap_memory=True)
                lstm_layer.h[-1], lstm_layer.c[-1] = wl_ret[-2], wl_ret[-1]

        #
        # Loop through tickersteps
        #
        if config.tickersteps:

            def cond(seq_pos, *args):
                return seq_pos < config.tickersteps

            def body(seq_pos, lstm_h, lstm_c):
                # Set rnn input layer to input at current sequence position
                rnn_input_layer.update(X[:, -1:, :])

                # Update lstm states
                lstm_layer.h[-1], lstm_layer.c[-1] = lstm_h, lstm_c

                # Calculate new network state at new frame (this updates the network's hidden activations, cell states,
                # and dependencies automatically)
                _ = lstm_layer.get_output(tickerstep_nodes=True)

                seq_pos = tf.add(seq_pos, 1)

                return seq_pos, lstm_layer.h[-1], lstm_layer.c[-1]

            with tf.name_scope("Tickersteps"):
                print("\t  tickersteps ...")
                wl_ret = tf.while_loop(cond=cond,
                                       body=body,
                                       loop_vars=(tf.constant(0),
                                                  lstm_layer.h[-1],
                                                  lstm_layer.c[-1]),
                                       parallel_iterations=10,
                                       back_prop=True,
                                       swap_memory=True)
                lstm_layer.h[-1], lstm_layer.c[-1] = wl_ret[-2], wl_ret[-1]

        #
        # Calculate output but consider that the lstm_layer is already computed
        #
        output = output_layer.get_output(prev_layers=[lstm_layer])

        print("\tDone!")

        #
        # Publish
        #
        self.X = X
        self.y_ = y_
        self.output = output
        # Store layers in list for regularization in main file
        self.__layers = layers
Esempio n. 9
0
                           b_og=og_bias([n_lstm_cells]),
                           b_fg=fg_bias([n_lstm_cells]),
                           a_ci=tf.tanh,
                           a_ig=tf.sigmoid,
                           a_og=tf.sigmoid,
                           a_fg=tf.sigmoid,
                           a_out=tf.identity,
                           c_init=tf.zeros,
                           h_init=tf.zeros,
                           forgetgate=True,
                           store_states=True)

    n_output_units = 4
    output_layer = DenseLayer(incoming=lstm_layer,
                              n_units=n_output_units,
                              a=tf.identity,
                              W=w_init,
                              b=tf.zeros([n_output_units], dtype=tf.float32),
                              name="OutputLayer")

lstm_input_shape = reward_redistibution_input.get_output_shape()


#
# Ending condition
#
def cond(time, *args):
    """Break if game is over by looking at n_timesteps"""
    return ~tf.greater(time, n_timesteps)


#