def define_tensorflow_variables(net_params, trainable=True):
    """
    This function defines TF Variables from the C++ initialization.
    
    Parameters
    ----------
    trainable: boolean
        If `true` the network updates the convolutional layers as well as the
        instance norm layers of the network. If `false` only the instance norm
        layers of the network are updated.
    Returns
    -------
    out: dict
        The TF Variable dictionary.
    """
    tensorflow_variables = dict()
    for key, value in net_params.items():
        if 'weight' in key:
            if 'conv' in key:
                tensorflow_variables[key] = _tf.Variable(initial_value = _utils.convert_conv2d_coreml_to_tf(net_params[key]), name=key, trainable=trainable)
            else:
                tensorflow_variables[key] = _tf.Variable(initial_value = _utils.convert_dense_coreml_to_tf(net_params[key]), name=key, trainable=trainable)
        else:
            tensorflow_variables[key] = _tf.Variable(initial_value = net_params[key], name=key, trainable=trainable)
    return tensorflow_variables
Exemple #2
0
    def load_weights(self, net_params):
        """
        Function to load weights from the C++ implementation into TensorFlow

        Parameters
        ----------
        net_params: Dictionary
            Dict with weights from the C++ implementation and  its names

        """
        for key in net_params.keys():
            if key in self.weights.keys():
                if key.startswith('conv'):
                    net_params[key] = _utils.convert_conv1d_coreml_to_tf(net_params[key])
                    self.sess.run(_tf.assign(_tf.get_default_graph().get_tensor_by_name(key+":0"), net_params[key]))
                elif key.startswith('dense'):
                    net_params[key] = _utils.convert_dense_coreml_to_tf(net_params[key])
                    self.sess.run(_tf.assign(_tf.get_default_graph().get_tensor_by_name(key+":0"), net_params[key] ))
            elif key in self.biases.keys():
                self.sess.run(_tf.assign(_tf.get_default_graph().get_tensor_by_name(key+":0"), net_params[key]))

        h2h_i_bias = net_params['lstm_h2h_i_bias'] 
        h2h_c_bias = net_params['lstm_h2h_c_bias'] 
        h2h_f_bias = net_params['lstm_h2h_f_bias'] 
        h2h_o_bias = net_params['lstm_h2h_o_bias']
        lstm_bias = _utils.convert_lstm_bias_coreml_to_tf(h2h_i_bias, h2h_c_bias, h2h_f_bias, h2h_o_bias)
        self.sess.run(_tf.assign(_tf.get_default_graph().get_tensor_by_name('rnn/lstm_cell/bias:0'), lstm_bias))     
def define_tensorflow_variables(net_params, trainable=True):
    """
    This function defines TF Variables from the C++ initialization.

    Parameters
    ----------
    trainable: boolean
        If `True` the transformer network updates the convolutional layers as
        well as the instance norm layers of the network. If `False` only the
        instance norm layers of the network are updated.

        Note the VGG network's parameters aren't updated
    Returns
    -------
    out: dict
        The TF Variable dictionary.
    """
    _tf = _lazy_import_tensorflow()
    tensorflow_variables = dict()
    for key in net_params.keys():
        if "weight" in key:
            # only set the parameter to train if in the transformer network
            train_param = trainable and "transformer_" in key
            if "conv" in key:
                tensorflow_variables[key] = _tf.Variable(
                    initial_value=_utils.convert_conv2d_coreml_to_tf(
                        net_params[key]),
                    name=key,
                    trainable=train_param,
                )
            else:
                # This is the path that the instance norm takes
                tensorflow_variables[key] = _tf.Variable(
                    initial_value=_utils.convert_dense_coreml_to_tf(
                        net_params[key]),
                    name=key,
                    trainable=True,
                )
        else:
            tensorflow_variables[key] = _tf.Variable(
                initial_value=net_params[key], name=key, trainable=False)
    return tensorflow_variables
Exemple #4
0
    def __init__(self, net_params, batch_size, num_classes):
        """
        Defines the TensorFlow model, loss, optimisation and accuracy. Then
        loads the MXNET weights into the model.

        """
        self.gpu_policy = _utils.TensorFlowGPUPolicy()
        self.gpu_policy.start()

        for key in net_params.keys():
            net_params[key] = _utils.convert_shared_float_array_to_numpy(
                net_params[key])

        _tf.reset_default_graph()

        self.num_classes = num_classes
        self.batch_size = batch_size

        self.input = _tf.placeholder(_tf.float32, [None, 28, 28, 1])

        self.one_hot_labels = _tf.placeholder(_tf.int32,
                                              [None, self.num_classes])

        # Weights
        weights = {
            'drawing_conv0_weight':
            _tf.Variable(_tf.zeros([3, 3, 1, 16]),
                         name='drawing_conv0_weight'),
            'drawing_conv1_weight':
            _tf.Variable(_tf.zeros([3, 3, 16, 32]),
                         name='drawing_conv1_weight'),
            'drawing_conv2_weight':
            _tf.Variable(_tf.zeros([3, 3, 32, 64]),
                         name='drawing_conv2_weight'),
            'drawing_dense0_weight':
            _tf.Variable(_tf.zeros([576, 128]), name='drawing_dense0_weight'),
            'drawing_dense1_weight':
            _tf.Variable(_tf.zeros([128, self.num_classes]),
                         name='drawing_dense1_weight')
        }

        # Biases
        biases = {
            'drawing_conv0_bias':
            _tf.Variable(_tf.zeros([16]), name='drawing_conv0_bias'),
            'drawing_conv1_bias':
            _tf.Variable(_tf.zeros([32]), name='drawing_conv1_bias'),
            'drawing_conv2_bias':
            _tf.Variable(_tf.zeros([64]), name='drawing_conv2_bias'),
            'drawing_dense0_bias':
            _tf.Variable(_tf.zeros([128]), name='drawing_dense0_bias'),
            'drawing_dense1_bias':
            _tf.Variable(_tf.zeros([self.num_classes]),
                         name='drawing_dense1_bias')
        }

        conv_1 = _tf.nn.conv2d(self.input,
                               weights["drawing_conv0_weight"],
                               strides=1,
                               padding='SAME')
        conv_1 = _tf.nn.bias_add(conv_1, biases["drawing_conv0_bias"])
        relu_1 = _tf.nn.relu(conv_1)
        pool_1 = _tf.nn.max_pool2d(relu_1,
                                   ksize=[1, 2, 2, 1],
                                   strides=[1, 2, 2, 1],
                                   padding='VALID')

        conv_2 = _tf.nn.conv2d(pool_1,
                               weights["drawing_conv1_weight"],
                               strides=1,
                               padding='SAME')
        conv_2 = _tf.nn.bias_add(conv_2, biases["drawing_conv1_bias"])
        relu_2 = _tf.nn.relu(conv_2)
        pool_2 = _tf.nn.max_pool2d(relu_2,
                                   ksize=[1, 2, 2, 1],
                                   strides=[1, 2, 2, 1],
                                   padding='VALID')

        conv_3 = _tf.nn.conv2d(pool_2,
                               weights["drawing_conv2_weight"],
                               strides=1,
                               padding='SAME')
        conv_3 = _tf.nn.bias_add(conv_3, biases["drawing_conv2_bias"])
        relu_3 = _tf.nn.relu(conv_3)
        pool_3 = _tf.nn.max_pool2d(relu_3,
                                   ksize=[1, 2, 2, 1],
                                   strides=[1, 2, 2, 1],
                                   padding='VALID')

        # Flatten the data to a 1-D vector for the fully connected layer
        fc1 = _tf.reshape(pool_3, (-1, 576))

        fc1 = _tf.nn.xw_plus_b(fc1,
                               weights=weights["drawing_dense0_weight"],
                               biases=biases["drawing_dense0_bias"])

        fc1 = _tf.nn.relu(fc1)

        out = _tf.nn.xw_plus_b(fc1,
                               weights=weights["drawing_dense1_weight"],
                               biases=biases["drawing_dense1_bias"])
        softmax_out = _tf.nn.softmax(out)

        self.predictions = softmax_out

        # Loss
        self.cost = _tf.losses.softmax_cross_entropy(
            logits=out,
            onehot_labels=self.one_hot_labels,
            reduction=_tf.losses.Reduction.NONE)

        # Optimizer
        self.optimizer = _tf.train.AdamOptimizer(learning_rate=0.001).minimize(
            self.cost)

        # Predictions
        correct_prediction = _tf.equal(_tf.argmax(self.predictions, 1),
                                       _tf.argmax(self.one_hot_labels, 1))

        self.sess = _tf.Session()
        self.sess.run(_tf.global_variables_initializer())

        # Assign the initialised weights from C++ to tensorflow
        layers = [
            'drawing_conv0_weight', 'drawing_conv0_bias',
            'drawing_conv1_weight', 'drawing_conv1_bias',
            'drawing_conv2_weight', 'drawing_conv2_bias',
            'drawing_dense0_weight', 'drawing_dense0_bias',
            'drawing_dense1_weight', 'drawing_dense1_bias'
        ]

        for key in layers:
            if 'bias' in key:
                self.sess.run(
                    _tf.assign(
                        _tf.get_default_graph().get_tensor_by_name(key + ":0"),
                        net_params[key]))
            else:
                if 'drawing_dense0_weight' in key:
                    '''
                    To make output of CoreML pool3 (NCHW) compatible with TF (NHWC).
                    Decompose FC weights to NCHW. Transpose to NHWC. Reshape back to FC.
                    '''
                    coreml_128_576 = net_params[key]
                    coreml_128_576 = _np.reshape(coreml_128_576,
                                                 (128, 64, 3, 3))
                    coreml_128_576 = _np.transpose(coreml_128_576,
                                                   (0, 2, 3, 1))
                    coreml_128_576 = _np.reshape(coreml_128_576, (128, 576))
                    self.sess.run(
                        _tf.assign(
                            _tf.get_default_graph().get_tensor_by_name(key +
                                                                       ":0"),
                            _np.transpose(coreml_128_576, (1, 0))))
                elif 'dense' in key:
                    dense_weights = _utils.convert_dense_coreml_to_tf(
                        net_params[key])
                    self.sess.run(
                        _tf.assign(
                            _tf.get_default_graph().get_tensor_by_name(key +
                                                                       ":0"),
                            dense_weights))
                else:
                    # TODO: Call _utils.convert_conv2d_coreml_to_tf when #2513 is merged
                    self.sess.run(
                        _tf.assign(
                            _tf.get_default_graph().get_tensor_by_name(key +
                                                                       ":0"),
                            _np.transpose(net_params[key], (2, 3, 1, 0))))
    def init_drawing_classifier_graph(self, net_params):
        _tf = _lazy_import_tensorflow()

        self.input = _tf.placeholder(_tf.float32, [self.batch_size, 28, 28, 1])
        self.weights = _tf.placeholder(_tf.float32, [self.batch_size, 1])
        self.labels = _tf.placeholder(_tf.int64, [self.batch_size, 1])

        # One hot encoding target
        reshaped_labels = _tf.reshape(self.labels, [self.batch_size])
        one_hot_labels = _tf.one_hot(reshaped_labels,
                                     depth=self.num_classes,
                                     axis=-1)

        # Reshaping weights
        reshaped_weights = _tf.reshape(self.weights, [self.batch_size])

        self.one_hot_labels = _tf.placeholder(_tf.int32,
                                              [None, self.num_classes])

        # Weights
        weights = {
            name:
            _tf.Variable(_utils.convert_conv2d_coreml_to_tf(net_params[name]),
                         name=name)
            for name in (
                "drawing_conv0_weight",
                "drawing_conv1_weight",
                "drawing_conv2_weight",
            )
        }
        weights["drawing_dense1_weight"] = _tf.Variable(
            _utils.convert_dense_coreml_to_tf(
                net_params["drawing_dense1_weight"]),
            name="drawing_dense1_weight",
        )
        """
        To make output of CoreML pool3 (NCHW) compatible with TF (NHWC).
        Decompose FC weights to NCHW. Transpose to NHWC. Reshape back to FC.
        """
        coreml_128_576 = net_params["drawing_dense0_weight"]
        coreml_128_576 = _np.reshape(coreml_128_576, (128, 64, 3, 3))
        coreml_128_576 = _np.transpose(coreml_128_576, (0, 2, 3, 1))
        coreml_128_576 = _np.reshape(coreml_128_576, (128, 576))
        weights["drawing_dense0_weight"] = _tf.Variable(
            _np.transpose(coreml_128_576, (1, 0)),
            name="drawing_dense0_weight")

        # Biases
        biases = {
            name: _tf.Variable(net_params[name], name=name)
            for name in (
                "drawing_conv0_bias",
                "drawing_conv1_bias",
                "drawing_conv2_bias",
                "drawing_dense0_bias",
                "drawing_dense1_bias",
            )
        }

        conv_1 = _tf.nn.conv2d(self.input,
                               weights["drawing_conv0_weight"],
                               strides=1,
                               padding="SAME")
        conv_1 = _tf.nn.bias_add(conv_1, biases["drawing_conv0_bias"])
        relu_1 = _tf.nn.relu(conv_1)
        pool_1 = _tf.nn.max_pool2d(relu_1,
                                   ksize=[1, 2, 2, 1],
                                   strides=[1, 2, 2, 1],
                                   padding="VALID")

        conv_2 = _tf.nn.conv2d(pool_1,
                               weights["drawing_conv1_weight"],
                               strides=1,
                               padding="SAME")
        conv_2 = _tf.nn.bias_add(conv_2, biases["drawing_conv1_bias"])
        relu_2 = _tf.nn.relu(conv_2)
        pool_2 = _tf.nn.max_pool2d(relu_2,
                                   ksize=[1, 2, 2, 1],
                                   strides=[1, 2, 2, 1],
                                   padding="VALID")

        conv_3 = _tf.nn.conv2d(pool_2,
                               weights["drawing_conv2_weight"],
                               strides=1,
                               padding="SAME")
        conv_3 = _tf.nn.bias_add(conv_3, biases["drawing_conv2_bias"])
        relu_3 = _tf.nn.relu(conv_3)
        pool_3 = _tf.nn.max_pool2d(relu_3,
                                   ksize=[1, 2, 2, 1],
                                   strides=[1, 2, 2, 1],
                                   padding="VALID")

        # Flatten the data to a 1-D vector for the fully connected layer
        fc1 = _tf.reshape(pool_3, (-1, 576))
        fc1 = _tf.nn.xw_plus_b(
            fc1,
            weights=weights["drawing_dense0_weight"],
            biases=biases["drawing_dense0_bias"],
        )
        fc1 = _tf.nn.relu(fc1)

        out = _tf.nn.xw_plus_b(
            fc1,
            weights=weights["drawing_dense1_weight"],
            biases=biases["drawing_dense1_bias"],
        )
        self.predictions = _tf.nn.softmax(out)

        # Loss
        self.cost = _tf.losses.softmax_cross_entropy(
            logits=out,
            onehot_labels=one_hot_labels,
            weights=reshaped_weights,
            reduction=_tf.losses.Reduction.NONE,
        )

        # Optimizer
        self.optimizer = _tf.train.AdamOptimizer(learning_rate=0.001).minimize(
            self.cost)

        self.sess = _tf.Session()
        self.sess.run(_tf.global_variables_initializer())
Exemple #6
0
    def init_drawing_classifier_graph(self, net_params):

        self.input = _tf.placeholder(_tf.float32, [self.batch_size, 28, 28, 1])
        self.weights = _tf.placeholder(_tf.float32, [self.batch_size, 1])
        self.labels = _tf.placeholder(_tf.int64, [self.batch_size, 1])

        # One hot encoding target
        reshaped_labels = _tf.reshape(self.labels, [self.batch_size])
        one_hot_labels = _tf.one_hot(reshaped_labels,
                                     depth=self.num_classes,
                                     axis=-1)

        # Reshaping weights
        reshaped_weights = _tf.reshape(self.weights, [self.batch_size])

        self.one_hot_labels = _tf.placeholder(_tf.int32,
                                              [None, self.num_classes])

        # Weights
        weights = {
            "drawing_conv0_weight":
            _tf.Variable(_tf.zeros([3, 3, 1, 16]),
                         name="drawing_conv0_weight"),
            "drawing_conv1_weight":
            _tf.Variable(_tf.zeros([3, 3, 16, 32]),
                         name="drawing_conv1_weight"),
            "drawing_conv2_weight":
            _tf.Variable(_tf.zeros([3, 3, 32, 64]),
                         name="drawing_conv2_weight"),
            "drawing_dense0_weight":
            _tf.Variable(_tf.zeros([576, 128]), name="drawing_dense0_weight"),
            "drawing_dense1_weight":
            _tf.Variable(_tf.zeros([128, self.num_classes]),
                         name="drawing_dense1_weight"),
        }

        # Biases
        biases = {
            "drawing_conv0_bias":
            _tf.Variable(_tf.zeros([16]), name="drawing_conv0_bias"),
            "drawing_conv1_bias":
            _tf.Variable(_tf.zeros([32]), name="drawing_conv1_bias"),
            "drawing_conv2_bias":
            _tf.Variable(_tf.zeros([64]), name="drawing_conv2_bias"),
            "drawing_dense0_bias":
            _tf.Variable(_tf.zeros([128]), name="drawing_dense0_bias"),
            "drawing_dense1_bias":
            _tf.Variable(_tf.zeros([self.num_classes]),
                         name="drawing_dense1_bias"),
        }

        conv_1 = _tf.nn.conv2d(self.input,
                               weights["drawing_conv0_weight"],
                               strides=1,
                               padding="SAME")
        conv_1 = _tf.nn.bias_add(conv_1, biases["drawing_conv0_bias"])
        relu_1 = _tf.nn.relu(conv_1)
        pool_1 = _tf.nn.max_pool2d(relu_1,
                                   ksize=[1, 2, 2, 1],
                                   strides=[1, 2, 2, 1],
                                   padding="VALID")

        conv_2 = _tf.nn.conv2d(pool_1,
                               weights["drawing_conv1_weight"],
                               strides=1,
                               padding="SAME")
        conv_2 = _tf.nn.bias_add(conv_2, biases["drawing_conv1_bias"])
        relu_2 = _tf.nn.relu(conv_2)
        pool_2 = _tf.nn.max_pool2d(relu_2,
                                   ksize=[1, 2, 2, 1],
                                   strides=[1, 2, 2, 1],
                                   padding="VALID")

        conv_3 = _tf.nn.conv2d(pool_2,
                               weights["drawing_conv2_weight"],
                               strides=1,
                               padding="SAME")
        conv_3 = _tf.nn.bias_add(conv_3, biases["drawing_conv2_bias"])
        relu_3 = _tf.nn.relu(conv_3)
        pool_3 = _tf.nn.max_pool2d(relu_3,
                                   ksize=[1, 2, 2, 1],
                                   strides=[1, 2, 2, 1],
                                   padding="VALID")

        # Flatten the data to a 1-D vector for the fully connected layer
        fc1 = _tf.reshape(pool_3, (-1, 576))

        fc1 = _tf.nn.xw_plus_b(
            fc1,
            weights=weights["drawing_dense0_weight"],
            biases=biases["drawing_dense0_bias"],
        )

        fc1 = _tf.nn.relu(fc1)

        out = _tf.nn.xw_plus_b(
            fc1,
            weights=weights["drawing_dense1_weight"],
            biases=biases["drawing_dense1_bias"],
        )
        softmax_out = _tf.nn.softmax(out)

        self.predictions = softmax_out

        # Loss
        self.cost = _tf.losses.softmax_cross_entropy(
            logits=out,
            onehot_labels=one_hot_labels,
            weights=reshaped_weights,
            reduction=_tf.losses.Reduction.NONE,
        )

        # Optimizer
        self.optimizer = _tf.train.AdamOptimizer(learning_rate=0.001).minimize(
            self.cost)

        self.sess = _tf.Session()
        self.sess.run(_tf.global_variables_initializer())

        # Assign the initialised weights from C++ to tensorflow
        layers = [
            "drawing_conv0_weight",
            "drawing_conv0_bias",
            "drawing_conv1_weight",
            "drawing_conv1_bias",
            "drawing_conv2_weight",
            "drawing_conv2_bias",
            "drawing_dense0_weight",
            "drawing_dense0_bias",
            "drawing_dense1_weight",
            "drawing_dense1_bias",
        ]

        for key in layers:
            if "bias" in key:
                self.sess.run(
                    _tf.assign(
                        _tf.get_default_graph().get_tensor_by_name(key + ":0"),
                        net_params[key],
                    ))
            else:
                if "drawing_dense0_weight" in key:
                    """
                    To make output of CoreML pool3 (NCHW) compatible with TF (NHWC).
                    Decompose FC weights to NCHW. Transpose to NHWC. Reshape back to FC.
                    """
                    coreml_128_576 = net_params[key]
                    coreml_128_576 = _np.reshape(coreml_128_576,
                                                 (128, 64, 3, 3))
                    coreml_128_576 = _np.transpose(coreml_128_576,
                                                   (0, 2, 3, 1))
                    coreml_128_576 = _np.reshape(coreml_128_576, (128, 576))
                    self.sess.run(
                        _tf.assign(
                            _tf.get_default_graph().get_tensor_by_name(key +
                                                                       ":0"),
                            _np.transpose(coreml_128_576, (1, 0)),
                        ))
                elif "dense" in key:
                    dense_weights = _utils.convert_dense_coreml_to_tf(
                        net_params[key])
                    self.sess.run(
                        _tf.assign(
                            _tf.get_default_graph().get_tensor_by_name(key +
                                                                       ":0"),
                            dense_weights,
                        ))
                else:
                    self.sess.run(
                        _tf.assign(
                            _tf.get_default_graph().get_tensor_by_name(key +
                                                                       ":0"),
                            _utils.convert_conv2d_coreml_to_tf(
                                net_params[key]),
                        ))
Exemple #7
0
    def init_activity_classifier_graph(
        self, net_params, num_features, prediction_window, seed
    ):
        # Vars
        self.data = _tf.placeholder(
            _tf.float32, [None, prediction_window * self.seq_len, num_features]
        )
        self.weight = _tf.placeholder(_tf.float32, [None, self.seq_len, 1])
        self.target = _tf.placeholder(_tf.int32, [None, self.seq_len, 1])
        self.is_training = _tf.placeholder(_tf.bool)

        # Reshaping weights
        reshaped_weight = _tf.reshape(self.weight, [self.batch_size, self.seq_len])

        # One hot encoding target
        reshaped_target = _tf.reshape(self.target, [self.batch_size, self.seq_len])
        one_hot_target = _tf.one_hot(reshaped_target, depth=self.num_classes, axis=-1)

        # Weights
        self.weights = {
            "conv_weight": _tf.Variable(
                _utils.convert_conv1d_coreml_to_tf(net_params["conv_weight"]),
                shape=[prediction_window, num_features, CONV_H],
                name="conv_weight"
            ),
            "dense0_weight": _tf.Variable(
                _utils.convert_dense_coreml_to_tf(net_params["dense0_weight"]),
                shape=[LSTM_H, DENSE_H],
                name="dense0_weight"
            ),
            "dense1_weight": _tf.Variable(
                _utils.convert_dense_coreml_to_tf(net_params["dense1_weight"]),
                shape=[DENSE_H, self.num_classes],
                name="dense1_weight"
            ),
        }

        # Biases
        self.biases = {
            "conv_bias": _tf.Variable(
                net_params["conv_bias"],
                shape=[CONV_H],
                name="conv_bias"),
            "dense0_bias": _tf.Variable(
                net_params["dense0_bias"],
                shape=[DENSE_H],
                name="dense0_bias"),
        }

        # Convolution
        conv = _tf.nn.conv1d(
            self.data,
            self.weights["conv_weight"],
            stride=prediction_window,
            padding="SAME",
        )
        conv = _tf.nn.bias_add(conv, self.biases["conv_bias"])
        conv = _tf.nn.relu(conv)

        dropout = _tf.layers.dropout(
            conv, rate=0.2, training=self.is_training, seed=seed
        )

        # Long Short Term Memory
        lstm = self._get_lstm_weights_params(net_params)
        cells = _tf.nn.rnn_cell.LSTMCell(
            num_units=LSTM_H,
            reuse=_tf.AUTO_REUSE,
            forget_bias=0.0,
            initializer=_tf.initializers.constant(lstm, verify_shape=True),
        )
        init_state = cells.zero_state(self.batch_size, _tf.float32)
        rnn_outputs, _ = _tf.nn.dynamic_rnn(
            cells, dropout, initial_state=init_state
        )

        # Dense
        dense = _tf.reshape(rnn_outputs, (-1, LSTM_H))
        dense = _tf.add(
            _tf.matmul(dense, self.weights["dense0_weight"]), self.biases["dense0_bias"]
        )
        dense = _tf.layers.batch_normalization(
            inputs=dense,
            beta_initializer=_tf.initializers.constant(
                net_params["bn_beta"], verify_shape=True
            ),
            gamma_initializer=_tf.initializers.constant(
                net_params["bn_gamma"], verify_shape=True
            ),
            moving_mean_initializer=_tf.initializers.constant(
                net_params["bn_running_mean"], verify_shape=True
            ),
            moving_variance_initializer=_tf.initializers.constant(
                net_params["bn_running_var"], verify_shape=True
            ),
            training=self.is_training,
        )

        dense = _tf.nn.relu(dense)
        dense = _tf.layers.dropout(
            dense, rate=0.5, training=self.is_training, seed=seed
        )

        # Output
        out = _tf.matmul(dense, self.weights["dense1_weight"])
        out = _tf.reshape(out, (-1, self.seq_len, self.num_classes))
        self.probs = _tf.nn.softmax(out)

        # Weights
        seq_sum_weights = _tf.reduce_sum(reshaped_weight, axis=1)
        binary_seq_sum_weights = _tf.reduce_sum(
            _tf.cast(seq_sum_weights > 0, dtype=_tf.float32)
        )

        # Loss
        loss = _tf.losses.softmax_cross_entropy(
            logits=out,
            onehot_labels=one_hot_target,
            weights=reshaped_weight,
            reduction=_tf.losses.Reduction.NONE,
        )
        self.loss_per_seq = _tf.reduce_sum(loss, axis=1) / (seq_sum_weights + 1e-5)
        self.loss_op = _tf.reduce_sum(self.loss_per_seq) / (
            binary_seq_sum_weights + 1e-5
        )

        # Optimizer
        update_ops = _tf.get_collection(_tf.GraphKeys.UPDATE_OPS)
        self.set_learning_rate(1e-3)
        train_op = self.optimizer.minimize(self.loss_op)
        self.train_op = _tf.group([train_op, update_ops])

        # Initialize all variables
        self.sess.run(_tf.global_variables_initializer())
        self.sess.run(_tf.local_variables_initializer())

        self._load_lstm_biases(net_params)