コード例 #1
0
ファイル: network.py プロジェクト: tfboyd/agents
    def summary(self, line_length=None, positions=None, print_fn=None):
        """Prints a string summary of the network.

    Args:
        line_length: Total length of printed lines
            (e.g. set this to adapt the display to different
            terminal window sizes).
        positions: Relative or absolute positions of log elements
            in each line. If not provided,
            defaults to `[.33, .55, .67, 1.]`.
        print_fn: Print function to use. Defaults to `print`.
            It will be called on each line of the summary.
            You can set it to a custom function
            in order to capture the string summary.

    Raises:
        ValueError: if `summary()` is called before the model is built.
    """
        if not self.built:
            raise ValueError(
                "This model has not yet been built. "
                "Build the model first by calling `build()` or "
                "`__call__()` with some data, or `create_variables()`.")
        layer_utils.print_summary(self,
                                  line_length=line_length,
                                  positions=positions,
                                  print_fn=print_fn)
コード例 #2
0
    def build(self, inputs_shape):
        """ Build TCN architecture

        The `inputs_shape` argument is a `(N, T, D)` tuple where `N` denotes the number of samples, `T` the number of
        time-frames, and `D` the number of channels
        """
        x = tf.keras.layers.Input(inputs_shape[1:])

        dropout_rate = 1 - self.config.keep_prob

        net = x
        for _ in range(self.config.nb_conv_stacks):
            net = tf.keras.layers.Conv1D(
                filters=self.config.nb_conv_filters,
                kernel_size=self.config.kernel_size,
                strides=self.config.nb_conv_strides,
                padding=self.config.padding,
                kernel_initializer=self.config.kernel_initializer,
                kernel_regularizer=tf.keras.regularizers.l2(
                    self.config.kernel_regularizer))(net)
            if self.config.use_batch_norm:
                net = tf.keras.layers.BatchNormalization(axis=-1)(net)

            net = tf.keras.layers.Activation(self.config.activation)(net)

            net = tf.keras.layers.Dropout(dropout_rate)(net)

        net = tf.keras.layers.Flatten()(net)

        for _ in range(self.config.nb_fc_stacks):
            net = tf.keras.layers.Dense(
                units=self.config.nb_fc_neurons,
                kernel_initializer=self.config.kernel_initializer,
                kernel_regularizer=tf.keras.regularizers.l2(
                    self.config.kernel_regularizer))(net)
            if self.config.use_batch_norm:
                net = tf.keras.layers.BatchNormalization(axis=-1)(net)

            net = tf.keras.layers.Activation(self.config.activation)(net)

            net = tf.keras.layers.Dropout(dropout_rate)(net)

        net = tf.keras.layers.Dense(
            units=self.config.n_classes,
            kernel_initializer=self.config.kernel_initializer,
            kernel_regularizer=tf.keras.regularizers.l2(
                self.config.kernel_regularizer))(net)

        net = tf.keras.layers.Softmax()(net)

        self.net = tf.keras.Model(inputs=x, outputs=net)

        print_summary(self.net)
コード例 #3
0
    def build(self, inputs_shape):
        """ Build Transformer encoder architecture

        The `inputs_shape` argument is a `(N, T, D)` tuple where `N` denotes the number of samples, `T` the number of
        time-frames, and `D` the number of channels
        """
        seq_len = inputs_shape[1]

        self.net = tf.keras.Sequential([
            self.encoder, self.dense,
            tf.keras.layers.MaxPool1D(pool_size=seq_len),
            tf.keras.layers.Lambda(
                lambda x: tf.keras.backend.squeeze(x, axis=-2),
                name='squeeze'),
            tf.keras.layers.Softmax()
        ])
        # Build the model, so we can print the summary
        self.net.build(inputs_shape)

        print_summary(self.net)
コード例 #4
0
ファイル: trainModel.py プロジェクト: rlarkdms/Turing-Tale
def main():
    features, labels = loadFromPickle()
    # features, labels = augmentData(features, labels)
    features, labels = shuffle(features, labels)
    labels = prepress_labels(labels)
    train_x, test_x, train_y, test_y = train_test_split(features,
                                                        labels,
                                                        random_state=0,
                                                        test_size=0.1)
    train_x = train_x.reshape(train_x.shape[0], 28, 28, 1)
    test_x = test_x.reshape(test_x.shape[0], 28, 28, 1)
    model, callbacks_list = keras_model(28, 28)
    print_summary(model)
    model.fit(train_x,
              train_y,
              validation_data=(test_x, test_y),
              epochs=50,
              batch_size=64,
              callbacks=[TensorBoard(log_dir="QuickDraw")])
    model.save('QuickDraw.h5')
コード例 #5
0
    def build(self, inputs_shape):
        """ Build TCN architecture

        The `inputs_shape` argument is a `(N, T, D)` tuple where `N` denotes the number of samples, `T` the number of
        time-frames, and `D` the number of channels
        """
        x = tf.keras.layers.Input(inputs_shape[1:])

        dropout_rate = 1 - self.config.keep_prob

        net = x

        net = tf.keras.layers.Conv1D(
            filters=self.config.nb_filters,
            kernel_size=1,
            padding=self.config.padding,
            kernel_initializer=self.config.kernel_initializer)(net)

        # list to hold all the member ResidualBlocks
        residual_blocks = list()
        skip_connections = list()

        total_num_blocks = self.config.nb_stacks * len(self.config.dilations)
        if not self.config.use_skip_connections:
            total_num_blocks += 1  # cheap way to do a false case for below

        for s in range(self.config.nb_stacks):
            for d in self.config.dilations:
                net, skip_out = ResidualBlock(
                    dilation_rate=d,
                    nb_filters=self.config.nb_filters,
                    kernel_size=self.config.kernel_size,
                    padding=self.config.padding,
                    activation=self.config.activation,
                    dropout_rate=dropout_rate,
                    use_batch_norm=self.config.use_batch_norm,
                    use_layer_norm=self.config.use_layer_norm,
                    kernel_initializer=self.config.kernel_initializer,
                    last_block=len(residual_blocks) + 1 == total_num_blocks,
                    name=f'residual_block_{len(residual_blocks)}')(net)
                residual_blocks.append(net)
                skip_connections.append(skip_out)

        # Author: @karolbadowski.
        output_slice_index = int(net.shape.as_list()[1] / 2) \
            if self.config.padding.lower() == 'same' else -1
        lambda_layer = tf.keras.layers.Lambda(
            lambda tt: tt[:, output_slice_index, :])

        if self.config.use_skip_connections:
            net = tf.keras.layers.add(skip_connections)

        if not self.config.return_sequences:
            net = lambda_layer(net)

        net = tf.keras.layers.Dense(self.config.n_classes)(net)

        net = tf.keras.layers.Softmax()(net)

        self.net = tf.keras.Model(inputs=x, outputs=net)

        print_summary(self.net)
コード例 #6
0
    def build(self, inputs_shape):
        self.net.build(inputs_shape)

        print_summary(self.net)