Exemple #1
0
    def cnn2d(input, training, height):
        """
        Model function for CNN.
        accepts input shape: [step_size, time_shift*features]
        transformed to: [step_size, time_shift(height), features(width), channel]
        """
        # with tf.variable_scope("conv2d_parent"):
        print("shape of cnn input: {}".format(input.get_shape()))
        width = int(input.get_shape()[1]) // height
        input2d = tf.reshape(input, [-1, height, width, 1])
        # Transforms into 2D compatible format [batch(step), height, width, channel]
        print("input transformed to 2D pre-conv shape: {}".format(
            input2d.get_shape()))
        nlayer = numLayers(height, width)
        print("height:{} width:{} #conv layers: {}".format(
            height, width, nlayer))
        filters = max(2, 2**(math.ceil(math.log(max(height, width), 2))))
        # krange = 3
        # drange = 3
        convlayer = input2d

        for i in range(nlayer):
            filters *= 2
            convlayer = conv2d(convlayer, filters, i)

        print("final conv2d: {}".format(convlayer.get_shape()))
        convlayer = tf.squeeze(convlayer, [1, 2])
        print("squeeze: {}".format(convlayer.get_shape()))
        dense = tf.compat.v1.layers.dense(
            # name="cnn2d_dense",
            inputs=convlayer,
            units=convlayer.get_shape()[1] * 2,
            kernel_initializer=tf.compat.v1.truncated_normal_initializer(
                stddev=0.01),
            bias_initializer=tf.compat.v1.constant_initializer(0.1),
            activation=tf.nn.elu)
        print("dense: {}".format(dense.get_shape()))
        dropout = tf.compat.v1.layers.dropout(
            # name="cnn2d_dropout",
            inputs=dense,
            rate=0.5,
            training=training)
        return dropout
Exemple #2
0
 def cnn(self, input):
     with tf.compat.v1.variable_scope("cnn"):
         # Transforms into 2D compatible format [batch, step, feature, channel]
         convlayer = tf.expand_dims(input, 3)
         height = int(input.get_shape()[1])
         width = int(input.get_shape()[2])
         nlayer = min(5, numLayers(height, width))
         filters = max(
             16, 2**(math.floor(math.log(self._num_hidden // nlayer))))
         for i in range(nlayer):
             filters = min(filters * 2, self._num_hidden * 2)
             convlayer = self.conv2d(convlayer, int(filters), i,
                                     tf.nn.relu6)
         convlayer = tf.compat.v1.layers.flatten(convlayer)
         # convlayer = tf.contrib.layers.layer_norm(inputs=convlayer)
         # convlayer = tf.layers.dense(
         #     inputs=convlayer,
         #     units=math.ceil(
         #         math.sqrt(float(int(convlayer.get_shape()[1])))),
         #     kernel_initializer=tf.truncated_normal_initializer(
         #         stddev=0.01),
         #     bias_initializer=tf.constant_initializer(0.1)
         # )
         return convlayer
Exemple #3
0
 def cnn2d(input, training, height):
     """
     Model function for CNN.
     accepts input shape: [step_size, time_shift*features]
     transformed to: [step_size, time_shift(height), features(width), channel]
     """
     with tf.compat.v1.variable_scope("conv2d_parent"):
         print("shape of cnn input: {}".format(input.get_shape()))
         width = int(input.get_shape()[1]) // height
         input2d = tf.reshape(input, [-1, height, width, 1])
         # Transforms into 2D compatible format [batch(step), height, width, channel]
         print("input transformed to 2D pre-conv shape: {}".format(
             input2d.get_shape()))
         nlayer = numLayers(height, width)
         print("height:{} width:{} #conv layers: {}".format(
             height, width, nlayer))
         filters = max(16, 2**(math.ceil(math.log(max(height, width), 2))))
         # krange = 3
         # drange = 3
         convlayer = input2d
         for i in range(nlayer):
             filters *= 2
             convlayer = conv2d(convlayer, filters, i)
             # conv = tf.layers.conv2d(
             #     name="conv_lv{}".format(i),
             #     inputs=convlayer,
             #     filters=filters,
             #     kernel_size=2,
             #     kernel_initializer=tf.truncated_normal_initializer(
             #         stddev=0.01),
             #     bias_initializer=tf.constant_initializer(0.1),
             #     padding="same")
             # h_stride = 2 if int(conv.get_shape()[1]) >= 2 else 1
             # w_stride = 2 if int(conv.get_shape()[2]) >= 2 else 1
             # pool = tf.layers.max_pooling2d(
             #     name="pool_lv{}".format(i),
             #     inputs=conv, pool_size=2, strides=[h_stride, w_stride],
             #     padding="same")
             # print("#{} conv:{} pool: {}".format(
             #     i+1, conv.get_shape(), pool.get_shape()))
             # can't use tf.nn.batch_normalization in a mapped function
             # tf.contrib.layers.batch_norm seems ineffective
             # norm = tf.contrib.layers.batch_norm(
             #     inputs=pool,
             #     scale=True,
             #     # updates_collections=None,
             #     # param_initializers=tf.truncated_normal_initializer(
             #     #     stddev=0.01),
             #     is_training=training
             # )
             # norm = tf.nn.lrn(pool, name="lrn_lv{}".format(i))
             # convlayer = pool
         print("final conv2d: {}".format(convlayer.get_shape()))
         convlayer = tf.squeeze(convlayer, [1, 2])
         print("squeezed: {}".format(convlayer.get_shape()))
         # use tf.contrib.layers.fully_connected?
         output = tf.compat.v1.layers.dense(
             name="cnn2d_dense",
             inputs=convlayer,
             units=convlayer.get_shape()[1] * 2,
             kernel_initializer=tf.compat.v1.truncated_normal_initializer(
                 stddev=0.01),
             bias_initializer=tf.compat.v1.constant_initializer(0.1),
             activation=tf.nn.relu6)
         print("dense: {}".format(output.get_shape()))
         output = tf.compat.v1.layers.dropout(inputs=output,
                                              rate=0.5,
                                              training=training)
         return output