Esempio n. 1
0
def build_classifier(dsvdd):

    filter_size = 3
    n_filters_factor = 2

    dsvdd.trainable = False

    c_x = keras.Input(shape=dsvdd.input.shape[1:], name='c_x')
    y = dsvdd(c_x)

    y = layers.Lambda(lambda x: keras.backend.expand_dims(x, -1))(y)
    y = layers.Conv1D(4 * n_filters_factor, filter_size, padding='same')(y)
    y = layers.LeakyReLU(.3)(y)
    y = layers.AveragePooling1D(padding='same')(y)
    y = layers.Conv1D(8 * n_filters_factor, filter_size, padding='same')(y)
    y = layers.LeakyReLU(.3)(y)
    y = layers.AveragePooling1D(padding='same')(y)
    y = layers.Conv1D(12 * n_filters_factor, filter_size, padding='same')(y)
    y = layers.LeakyReLU(.3)(y)
    y = layers.AveragePooling1D(padding='same')(y)
    y = layers.Conv1D(24 * n_filters_factor, filter_size, padding='same')(y)
    y = layers.LeakyReLU(.3)(y)
    y = layers.AveragePooling1D(padding='same')(y)
    y = layers.Conv1D(24 * n_filters_factor, filter_size, padding='same')(y)
    y = layers.LeakyReLU(.3)(y)
    y = layers.AveragePooling1D(padding='same')(y)

    y = layers.Flatten()(y)
    y = layers.Dense(1)(y)  # relu?
    y = layers.Activation(keras.activations.sigmoid)(y)

    cls = keras.Model(inputs=c_x, outputs=y, name='cls')

    return cls
Esempio n. 2
0
 def __init__(self, num_classes):
     """
     Initializes TimeCNNBlock
     :param num_classes: number of classes in input
     """
     super(TimeCNNBlock, self).__init__()
     self.conv1_valid = layers.Conv1D(filters=6,
                                      kernel_size=7,
                                      padding='valid',
                                      activation='sigmoid')
     self.conv1_same = layers.Conv1D(filters=6,
                                     kernel_size=7,
                                     padding='same',
                                     activation='sigmoid')
     self.AvePool1 = layers.AveragePooling1D(pool_size=3)
     self.conv2_valid = layers.Conv1D(filters=12,
                                      kernel_size=7,
                                      padding='valid',
                                      activation='sigmoid')
     self.conv2_same = layers.Conv1D(filters=12,
                                     kernel_size=7,
                                     padding='same',
                                     activation='sigmoid')
     self.AvePool2 = layers.AveragePooling1D(pool_size=3)
     self.flatten = layers.Flatten()
     self.out = layers.Dense(units=num_classes, activation='sigmoid')
Esempio n. 3
0
def define_1DCNN(nchan, L, Fs):
    model = tf.keras.Sequential()
    model.add(layers.InputLayer((L, nchan), batch_size=None))
    model.add(layers.Conv1D(filters=30, kernel_size=64, padding="causal"))
    model.add(layers.LayerNormalization())
    model.add(layers.Activation('elu'))
    model.add(layers.AveragePooling1D(pool_size=(2)))
    model.add(layers.Dropout(0.2))
    model.add(layers.Conv1D(filters=15, kernel_size=32, padding="causal"))
    model.add(layers.LayerNormalization())
    model.add(layers.Activation('elu'))
    model.add(layers.AveragePooling1D(pool_size=(2)))
    model.add(layers.Dropout(0.3))
    model.add(layers.Conv1D(filters=10, kernel_size=16, padding="causal"))
    model.add(layers.LayerNormalization())
    model.add(layers.Activation('elu'))
    model.add(layers.AveragePooling1D(pool_size=(2)))
    model.add(layers.Dropout(0.4))
    model.add(layers.Flatten())
    model.add(layers.Dense(15, activation="tanh"))
    model.add(layers.LayerNormalization())
    model.add(layers.Dense(3))
    model.add(layers.Activation('softmax'))
    model.compile(loss=losses.CategoricalCrossentropy(),
                  optimizer=optimizers.Adam(),
                  metrics=['accuracy'],
                  run_eagerly=False)
    return model
 def create_discriminator(input_shape):
     inp = keras.Input(input_shape)
     out_map1 = discriminator_block(inp)
     pool1 = layers.AveragePooling1D()(inp)
     out_map2 = discriminator_block(pool1)
     pool2 = layers.AveragePooling1D()(pool1)
     out_map3 = discriminator_block(pool2)
     return keras.Model(inp, [out_map1, out_map2, out_map3])
Esempio n. 5
0
    def __init__(self, channels, kernel_size, initial_activation=None, normalization=None, downsample_rate=2, regularization=None):
        super(DownResBlock, self).__init__()
        self.out_channels = channels
        self.in_act = initial_activation
        self.norm0 = None
        self.norm1 = None

        if normalization is "layer":
            self.norm0 = layers.LayerNormalization(axis=[2])
            self.norm1 = layers.LayerNormalization(axis=[2])
        elif normalization is "batch":
            self.norm0 = layers.BatchNormalization()
            self.norm1 = layers.BatchNormalization()

        self.conv1 = layers.Conv1D(filters=channels, kernel_size=kernel_size, padding="same",
                                   kernel_regularizer=regularization)
        self.conv1act = layers.LeakyReLU()
        self.conv2 = layers.Conv1D(filters=channels, kernel_size=kernel_size, padding="same", strides=downsample_rate,
                                   kernel_regularizer=regularization)

        #self.pool = layers.AveragePooling1D(pool_size=downsample_rate, strides=downsample_rate, padding="same")

        self.shortcut_conv = layers.Conv1D(filters=channels, kernel_size=1, padding="same",
                                           kernel_regularizer=regularization)
        self.shortcut_pool = layers.AveragePooling1D(pool_size=downsample_rate, strides=downsample_rate, padding="same")
Esempio n. 6
0
    def __init__(
        self, **kwargs
    ):  #vocab_size,embed_dim,context_size,stride,drop_rate,fc_dim,num_class,embedding_path=None):
        super(swem_hier, self).__init__()
        if kwargs["embedding_path"] is None:
            embedding_initializer = "uniform"
        elif type(kwargs["embedding_path"]) == str:
            embedding_initializer = np.load(embedding_path).astype(np.float32)
            embedding_initializer = tf.keras.initializers.Constant(
                embedding_initializer)
            print("Initialize from", embedding_path)
        elif type(kwargs["embedding_path"]) == np.ndarray:
            embedding_initializer = tf.keras.initializers.Constant(
                kwargs["embedding_path"])
            print("Initialize from a numpy array")

        if kwargs["is_mlp"]:
            self.embedding = keras.Sequential([
                layers.Embedding(kwargs["vocab_size"],
                                 kwargs["embed_dim"],
                                 embedding_initializer,
                                 mask_zero=True),
                layers.Dense(kwargs["embed_dim"], "relu")
            ])
        else:
            self.embedding = layers.Embedding(kwargs["vocab_size"],
                                              kwargs["embed_dim"],
                                              embedding_initializer,
                                              mask_zero=True)
        self.hier = layers.AveragePooling1D(pool_size=kwargs["context_size"],
                                            strides=kwargs["stride"])
        self.drop = layers.Dropout(kwargs["drop_rate"])
        self.fc = layers.Dense(kwargs["fc_dim"], "relu")
        self.drop2 = layers.Dropout(kwargs["drop_rate"])
        self.classifier = layers.Dense(kwargs["num_class"])
Esempio n. 7
0
def build_model_2(time_steps):
    # 卷积层过多的激活函数效果反而一般
    model = tf.keras.models.Sequential()
    # 每次输入一个月的数据量
    # 输入数据5个通道是指四个价格,加一个交易量
    # 时间步数太短,卷积核尺寸开始小,然后增大,不能一直用1,否则卷积无法查看相邻的关系
    # model.add(layers.Conv1D(16, 2, padding='same', activation='tanh', strides=1, input_shape=(time_steps, 5)))
    # model.add(layers.Conv1D(32, 2, padding='same', activation='tanh', strides=1))

    model.add(
        layers.Conv1D(64, 2, padding='same', activation='tanh', strides=1))
    model.add(
        layers.Conv1D(128, 2, padding='same', activation='tanh', strides=1))
    model.add(layers.AveragePooling1D(2))
    # 卷积核数量作为需要标准化的轴,每个卷积核使用不用beta和gamma
    # 在这里用BN层效果一般,可能是股价的均值,方差不稳定
    # model.add(layers.BatchNormalization(axis=2))
    # activation = 'relu' CuDNNGRU,CuDNNLSTM的激活函数貌似是内定的
    # 单次直接输入多个日期,貌似不需要时间记忆,先去掉,把神经网络加深。。。。
    # return_sequences 决定返回单个 hidden state值还是返回全部time steps 的 hidden state值
    # 这里第一个不加return sequence ,不能连7续用两个gru,输出没有time steps形状不匹配
    model.add(CuDNNGRU(128, return_sequences=True))
    # model.add(CuDNNGRU(256, return_sequences=True))
    model.add(layers.Flatten())
    model.add(layers.Dropout(0.4))
    model.add(layers.Dense(128, activation='relu'))
    model.add(layers.Dense(5, activation='tanh'))
    # 最终输出层用tanh收敛好于relu,并且预测效果远好于relu,可能是输出-1,1,对应输入范围广的原因
    model.compile(optimizer='adam', loss='mse')
    return model
Esempio n. 8
0
    def __init__(self, kernel: tuple, stride=2, padding=0, scope='APOOL'):
        super(AvgPool, self).__init__(scope)
        dim = len(kernel)
        is_global = True if kernel[-1]<0 else False
        if is_global:
            assert padding == 0

        if dim == 1:
            if is_global:
                self.pool = _AP(1)
            else:
                self.pool = layers.AveragePooling1D(kernel, stride)
            pad_fn = layers.ZeroPadding1D
        elif dim == 2:
            if is_global:
                self.pool = _AP(2)
            else:
                self.pool = layers.AveragePooling2D(kernel, stride)
            pad_fn = layers.ZeroPadding2D
        elif dim == 3:
            if is_global:
                self.pool = _AP(3)
            else:
                self.pool = layers.AveragePooling3D(kernel, stride)
            pad_fn = layers.ZeroPadding3D
        else:
            raise Exception('NEBULAE ERROR ⨷ %d-d pooling is not supported.' % dim)

        if isinstance(padding, int):
            padding = dim * [[padding, padding]]
        elif isinstance(padding, (list, tuple)):
            padding = [(padding[2*d], padding[2*d+1]) for d in range(dim-1, -1, -1)]

        self.pad = pad_fn(padding)
Esempio n. 9
0
def build_model_3(time_steps):
    model = tf.keras.models.Sequential()

    model.add(
        layers.Conv1D(64,
                      2,
                      padding='same',
                      strides=1,
                      activation='relu',
                      kernel_initializer='uniform',
                      input_shape=(time_steps, 5)))
    # model.add(layers.Conv1D(32, 2, padding='same', strides=1,activation='relu',kernel_initializer='uniform'))
    # model.add(layers.Conv1D(64, 2, padding='same', strides=1,activation='relu',kernel_initializer='uniform'))
    model.add(
        layers.Conv1D(128,
                      2,
                      padding='same',
                      strides=1,
                      activation='relu',
                      kernel_initializer='uniform'))
    model.add(layers.AveragePooling1D(2))
    model.add(CuDNNGRU(128, return_sequences=True))
    model.add(layers.Flatten())
    model.add(layers.Dropout(0.4))
    model.add(layers.Dense(128, activation='relu'))
    model.add(layers.Dense(128, activation='relu'))
    model.add(layers.Dense(5, activation='tanh'))

    # 优化算法使用adam,短周期收敛较慢
    model.compile(optimizer='adam', loss='mse')
    return model
Esempio n. 10
0
def fit_model(trainX, trainy, epochs=50, batch_size=32, verbose=1):
    n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[
        2], trainy.shape[1]

    norm_layer = layers.experimental.preprocessing.Normalization()
    norm_layer.adapt(trainX)

    model = models.Sequential([
        layers.Input(shape=(n_timesteps, n_features)),
        norm_layer,
        layers.AveragePooling1D(),
        layers.Conv1D(filters=3, kernel_size=200, activation='relu'),
        layers.MaxPooling1D(),
        layers.Conv1D(filters=6, kernel_size=200, activation='relu'),
        layers.GlobalMaxPooling1D(),
        layers.Dense(n_outputs, activation='softmax'),
    ])
    model.summary()
    opt = tf.keras.optimizers.Adam(learning_rate=0.001)
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])
    # fit network
    callback = tf.keras.callbacks.LearningRateScheduler(scheduler)
    model.fit(trainX,
              trainy,
              epochs=epochs,
              batch_size=batch_size,
              verbose=verbose,
              callbacks=[callback])

    return model
    def __init__(self, name="discrete_10000_ec_0"):
        super(discrete_10000_ec_0, self).__init__(name=name)
        self.decider = keras.Sequential()
        self.decider.add(layers.Conv1D(6, 2, strides=1, padding='same'))
        self.decider.add(layers.LeakyReLU())
        self.decider.add(layers.Conv1D(7, 4, strides=1, padding='same'))
        self.decider.add(layers.LeakyReLU())
        self.decider.add(layers.Conv1D(8, 6, strides=1, padding='same'))

        self.counters = []
        for i in range(1, 8):
            model = keras.Sequential()
            model.add(layers.Conv1D(8, 2**i, 2**(i - 1), padding='same'))
            model.add(layers.LeakyReLU())

            pool_size = (10000 + 2**(i - 1) - 1) // 2**(i - 1)
            model.add(layers.AveragePooling1D(pool_size=pool_size))
            model.add(layers.LeakyReLU())
            self.counters.append(model)

        self.conv_down = layers.Dense(1)

        self.weighters = keras.Sequential()
        self.weighters.add(layers.BatchNormalization())
        self.weighters.add(layers.Dense(21))
        self.weighters.add(layers.Activation(layers.LeakyReLU()))
        self.weighters.add(layers.Dense(14))
        self.weighters.add(layers.Activation(layers.LeakyReLU()))
        self.weighters.add(layers.Dense(7))
        self.weighters.add(layers.Activation('softmax'))

        self.out_layer = layers.Dense(1)
Esempio n. 12
0
    def build(self, input_shape):
        assert len(input_shape) == 2
        assert len(input_shape[0]) == 3 and len(input_shape[1]) == 3
        # Assertion for high inputs
        assert input_shape[0][1] >= self.mor_kernel_size
        # Assertion for low inputs
        assert input_shape[0][1] // input_shape[1][1] == self.beta
        # channel last for Tensorflow
        assert K.image_data_format() == 'channels_last'

        self.conv_high_high = NCausalConv1D(
            filters=self.high_channels,
            kernel_size=self.mor_kernel_size,
            dilation_rate=self.mor_dilation,
            activation=tf.nn.leaky_relu,
        )
        self.conv_low_high = NCausalConv1D(
            filters=self.high_channels,
            kernel_size=self.rhy_kernel_size,
            dilation_rate=self.rhy_dilation,
            activation=tf.nn.leaky_relu,
        )
        self.conv_high_low = NCausalConv1D(
            filters=self.low_channels,
            kernel_size=self.mor_kernel_size,
            dilation_rate=self.mor_dilation,
            activation=tf.nn.leaky_relu,
        )
        self.conv_low_low = NCausalConv1D(
            filters=self.low_channels,
            kernel_size=self.rhy_kernel_size,
            dilation_rate=self.rhy_dilation,
            activation=tf.nn.leaky_relu,
        )
        # self.bi_gru = BiGRUCell(rnn_size=self.filters,
        #                         dropout=self.dropout)
        self.high_cross = layers.Conv1D(self.filters, 1, padding='same')
        self.low_cross = layers.Conv1D(self.filters, 1, padding='same')
        self.upsampling1d = layers.UpSampling1D(size=self.beta)
        self.averagepooling1d = layers.AveragePooling1D(pool_size=self.beta)

        self.batch_norm1 = layers.BatchNormalization()
        self.batch_norm2 = layers.BatchNormalization()
        self.batch_norm3 = layers.BatchNormalization()
        self.batch_norm4 = layers.BatchNormalization()

        self.dropout_high = layers.Dropout(
            self.dropout,
            [tf.constant(1),
             tf.constant(1),
             tf.constant(self.high_channels)])
        self.dropout_low = layers.Dropout(
            self.dropout,
            [tf.constant(1),
             tf.constant(1),
             tf.constant(self.low_channels)])

        super().build(input_shape)
Esempio n. 13
0
 def createModel(self, learningRate: float):
     # Should go over minutes, not seconds
     input_layer = layers.Input(shape=(self._MINUTES, 4))
     layer = layers.Conv1D(filters=16, kernel_size=16, activation='relu',
                      input_shape=(self._MINUTES, 4))(input_layer)
     layer = layers.AveragePooling1D(pool_size=2)(layer)
     layer = layers.Conv1D(filters=8, kernel_size=8, activation='relu',
                       input_shape=(self._MINUTES, 4))(layer)
     layer = layers.AveragePooling1D(pool_size=2)(layer)
     layer = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(self._MINUTES, input_shape=layer.shape))(layer)
     layer = layers.Dense(5, activation='relu')(layer)
     layer = layers.Dense(2, activation='relu')(layer)
     layer = tf.keras.layers.Dropout(0.1)(layer)
     layer = layers.Dense(1, activation='sigmoid')(layer)
     self.model = tf.keras.Model(input_layer, layer)
     self.model.compile(loss='binary_crossentropy',
                        optimizer=tf.keras.optimizers.RMSprop(lr=learningRate),
                        metrics=self._metrics)
     tf.keras.utils.plot_model(self.model,
                               "crypto_model.png",
                               show_shapes=True)
Esempio n. 14
0
def create_vgg_2d(input_shape=(80, 3), num_classes=3, activation="softmax"):
    """2次元データのVGGっぽいの"""
    inputs = layers.Input(input_shape)
    x = inputs
    for ch in [64, 128, 256]:
        x = layers.Conv1D(ch, 3, padding='same')(x)
        x = layers.BatchNormalization()(x)
        x = layers.ReLU()(x)
        if ch != 256:
            x = layers.AveragePooling1D(2)(x)
    x = layers.GlobalAveragePooling1D()(x)
    x = layers.Dense(num_classes, activation=activation)(x)
    return tf.keras.models.Model(inputs, x)
Esempio n. 15
0
    def __init__(self, output_channels, downsample_factor):
        """Initilizer for the unconditional DBlock of the
        GAN-TTS discriminator.

        Paramaters:
            output_channels: The number of output channels
                from the DBlock.
            downsample_factor: The downsampling factor of the
                DBlock.
        """

        super(DBlock, self).__init__()

        self.stack = keras.Sequential([
            layers.AveragePooling1D(pool_size=downsample_factor,
                                    strides=downsample_factor),
            layers.ReLU(),
            layers.Conv1D(filters=output_channels,
                          kernel_size=3,
                          strides=1,
                          dilation_rate=1,
                          padding='same'),
            layers.ReLU(),
            layers.Conv1D(filters=output_channels,
                          kernel_size=3,
                          strides=1,
                          dilation_rate=2,
                          padding='same')
        ])

        self.residual = keras.Sequential([
            layers.Conv1D(filters=output_channels,
                          kernel_size=3,
                          strides=1,
                          dilation_rate=1,
                          padding='same'),
            layers.AveragePooling1D(pool_size=downsample_factor,
                                    strides=downsample_factor)
        ])
    def __init__(self, params):
        name = params['model_name']
        super(discrete_10000_ec_4, self).__init__(name=name)

        # float
        dropout_rate = params['dropout_rate']

        # list with 2 element: start and end
        # [0:1] = V
        # [1:2] = W
        # [0:2] =  V and W
        self.pchoice = params['pchoice']

        self.d = layers.Conv1D(1, 2, strides=1, padding='same')
        self.d2 = layers.Conv1D(1, 3, strides=1, padding='same')
        self.semilocal = layers.Conv1D(1, 16, strides=1, padding='same')

        self.decider = keras.Sequential()
        self.decider.add(layers.Conv1D(6, 2, strides=1, padding='same'))
        self.decider.add(layers.ELU())
        self.decider.add(layers.Dropout(rate=dropout_rate))
        self.decider.add(layers.Conv1D(7, 4, strides=1, padding='same'))
        self.decider.add(layers.LeakyReLU())
        self.decider.add(layers.Dropout(rate=dropout_rate))
        self.decider.add(layers.Conv1D(8, 6, strides=1, padding='same'))

        self.counters = []
        for i in range(1, 8):
            model = keras.Sequential()
            model.add(layers.Conv1D(8, 2**i, 2**(i - 1), padding='same'))
            model.add(layers.ELU())
            model.add(layers.Dropout(rate=dropout_rate))
            pool_size = (10000 + 2**(i - 1) - 1) // 2**(i - 1)
            model.add(layers.AveragePooling1D(pool_size=pool_size))
            model.add(layers.LeakyReLU())
            self.counters.append(model)

        self.conv_down = layers.Dense(1)

        self.weighters = keras.Sequential()
        self.weighters.add(layers.BatchNormalization())
        self.weighters.add(layers.Dense(21))
        self.weighters.add(layers.ELU())
        self.weighters.add(layers.Dropout(rate=dropout_rate))
        self.weighters.add(layers.Dense(14))
        self.weighters.add(layers.LeakyReLU())
        self.weighters.add(layers.Dropout(rate=dropout_rate))
        self.weighters.add(layers.Dense(7))
        self.weighters.add(layers.Activation('softmax'))

        self.out_layer = layers.Dense(1)
Esempio n. 17
0
 def __call__(self, x):
     """
     x: input tensor.
     returns: output tensor for the block.
     """
     x = layers.BatchNormalization(epsilon=1.001e-5,
                                   name=self.name + "_bn")(x)
     x = layers.Activation('relu', name=self.name + "_relu")(x)
     x = layers.Conv1D(int(x.shape[-1] * self.reduction),
                       1,
                       use_bias=False,
                       name=self.name + "_conv")(x)
     x = layers.AveragePooling1D(2, strides=2, name=self.name + "_pool")(x)
     return x
Esempio n. 18
0
def build_model_resnet(h):
    try:
        callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                    min_delta=0.0001,
                                                    patience=10)

        inputs = layers.Input(shape=(h.get('len_slice'), 2))

        t = layers.BatchNormalization()(inputs)
        t = layers.GaussianNoise(0.1)(t)
        t = layers.Conv1D(kernel_size=h.Int('kernel_1', 2, 20, 1, default=7),
                          filters=h.Int('filters_1', 10, 200, 10, default=64),
                          activation=h.Choice(
                              'activation_1',
                              values=['relu', 'tanh', 'sigmoid'],
                              default='relu'))(t)
        t = layers.MaxPool1D(2)(t)
        f_b_1 = h.Int('filters_block_1', 10, 200, 10, default=64)
        t = proj_block(t, f_b_1)
        for i in range(h.Int('nb_id_1', 1, 5, 1, default=2)):
            t = id_block(t, f_b_1)

        f_b_2 = h.Int('filters_block_2', 10, 200, 10, default=128)
        t = proj_block(t, f_b_2)
        for j in range(h.Int('nb_id_2', 1, 5, 1, default=3)):
            t = id_block(t, f_b_2)
        t = layers.AveragePooling1D(2)(t)
        t = layers.Flatten()(t)
        outputs = layers.Dense(3, activation='softmax')(t)

        model = tf.keras.models.Model(inputs, outputs)

        model.compile(optimizer=optimizers.Adam(learning_rate=0.0001),
                      loss=tf.keras.losses.categorical_crossentropy,
                      metrics=['accuracy'])
    except:
        model = models.Sequential()
        model.add(
            layers.BatchNormalization(input_shape=(h.get('len_slice'), 2)))
        model.add(layers.Flatten())
        model.add(layers.Dense(3, activation='softmax'))

        model.compile(optimizer=optimizers.Adam(learning_rate=0.0001),
                      loss=tf.keras.losses.categorical_crossentropy,
                      metrics=['accuracy'])

    return model
Esempio n. 19
0
  def __init__(self, encoder_blocks, encoder_layers, encoder_channels, kernel_size, encoder_pool, **kwargs):
    super().__init__(**kwargs)

    self.encoder_blocks = encoder_blocks
    self.encoder_layers = encoder_layers
    self.encoder_channels = encoder_channels
    self.kernel_size = kernel_size
    self.encoder_pool = encoder_pool

    dilated_layers = []
    for _ in range(self.encoder_blocks):
      for i in range(self.encoder_layers):
        dilation = 2 ** i
        dilated_layers.append(DilatedResConv(channels=self.encoder_channels, kernel_size=self.kernel_size, dilation=dilation))
    self.dilated_convs = Sequential(layers=dilated_layers)
    self.start = layers.Conv1D(self.encoder_channels, self.kernel_size) # default padding is 'valid', for Non-Causal Convulution
    self.conv_1x1 = layers.Conv1D(self.encoder_channels, kernel_size = 1)
    self.pool = layers.AveragePooling1D(self.encoder_pool, padding='same')
Esempio n. 20
0
def create_multiview_model_old(base_model, multiviews_num, input_shape, output_size, use_lstm):

    assert base_model == "voxnet" or base_model == "pointnet"

    if base_model == "voxnet":
        base_model = create_voxnet_model_homepage(input_shape, output_size)
    elif base_model == "pointnet":
        base_model = create_point_net(input_shape, output_size)

    model = models.Sequential()
    model.add(layers.TimeDistributed(base_model, input_shape=(multiviews_num,) + input_shape))
    if use_lstm is True:
        model.add(layers.LSTM(8, activation="relu"))
    else:
        model.add(layers.AveragePooling1D(multiviews_num))
        model.add(layers.Flatten())
    model.add(layers.Dense(output_size))

    return model
Esempio n. 21
0
    def build_model(self):
        ECG = layers.Input(shape=self.input_shape, name='ECG')

        x = kanres_init(ECG, 64, 32, 8, 3, 1)
        x = layers.AveragePooling1D(pool_size=2,
                                    data_format='channels_last')(x)
        x = kanres_module(x, 64, 32, 50, 50, 1)
        x = kanres_module(x, 64, 32, 50, 50, 1)
        x = kanres_module(x, 64, 32, 50, 50, 1)
        x = kanres_module(x, 64, 32, 50, 50, 1)
        x = kanres_module(x, 64, 32, 50, 50, 1)
        x = kanres_module(x, 64, 32, 50, 50, 1)
        x = kanres_module(x, 64, 32, 50, 50, 1)
        x = kanres_module(x, 64, 32, 50, 50, 1)
        x = layers.GlobalAveragePooling1D()(x)

        output = layers.Dense(self.output_size)(x)

        model = Model(inputs=ECG, outputs=output)

        self.model = model
def transition_block(x, reduction, reg, name):
    """A transition block.
    # Arguments
        x: input tensor.
        reduction: float, compression rate at transition layers.
        name: string, block label.
    # Returns
        output tensor for the block.
    """
    bn_axis = 2
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name=name + '_bn')(x)
    x = layers.Activation('relu', name=name + '_relu')(x)
    x = layers.Conv1D(int(backend.int_shape(x)[bn_axis] * reduction),
                      1,
                      use_bias=False,
                      kernel_regularizer=reg,
                      name=name + '_conv')(x)
    x = layers.AveragePooling1D(2, strides=2, name=name + '_pool')(x)
    return x
Esempio n. 23
0
    def build_discriminator(self):
        dropout_rate = 0.0
        activation = "relu"

        D = Models.Sequential(name="Discriminator")
        D.add(Layers.Input(shape=self.output_timesteps))
        D.add(Layers.Reshape(target_shape=(self.output_timesteps, 1)))

        D.add(Layers.Conv1D(filters=64, kernel_size=2, activation=activation))
        D.add(Layers.Conv1D(filters=64, kernel_size=2, activation=activation))
        D.add(Layers.AveragePooling1D(pool_size=2))
        D.add(Layers.Dropout(rate=dropout_rate))

        D.add(Layers.Flatten())
        D.add(Layers.Dense(units=200, activation="relu"))
        D.add(Layers.Dense(units=20, activation="relu"))
        D.add(Layers.Dense(1, activation="sigmoid"))

        D.summary()

        return D
Esempio n. 24
0
    def build_model(
        self,
        optimizer="adam",
        dropout_rate=0.005,
        neurons=50,
        kernel_size=3,
        pool_size=2,
        activation="relu",
        conv_stacks=3,
    ):
        model = M.Sequential()

        model.add(L.Input(shape=(self.input_timesteps, self.input_features)))
        for _ in range(conv_stacks):
            model.add(
                L.Conv1D(
                    filters=neurons, kernel_size=kernel_size, activation=activation
                )
            )
            model.add(
                L.Conv1D(
                    filters=neurons, kernel_size=kernel_size, activation=activation
                )
            )
            model.add(L.AveragePooling1D(pool_size=pool_size))
            model.add(L.Dropout(rate=dropout_rate))
        model.add(L.LSTM(units=20, return_sequences=True))
        model.add(L.Dropout(rate=dropout_rate))
        model.add(L.LSTM(units=5, return_sequences=True))
        model.add(L.Dropout(rate=dropout_rate))
        model.add(L.Flatten())
        model.add(L.Dense(units=neurons))
        model.add(L.Dense(units=self.output_timesteps, activation="linear"))

        mape = "mean_absolute_percentage_error"
        model.compile(loss=mape, optimizer=optimizer, metrics=[mape])

        return model
Esempio n. 25
0
def build_model_1(time_steps):
    model = tf.keras.models.Sequential()
    # 时间步数太短,卷积核尺寸开始小,然后增大,不能一直用1,否则卷积无法查看相邻的关系
    model.add(
        layers.Conv1D(16,
                      2,
                      padding='same',
                      strides=1,
                      input_shape=(time_steps, 5)))
    model.add(layers.Conv1D(32, 2, padding='same', strides=1))
    model.add(layers.Conv1D(64, 2, padding='same', strides=1))
    # 注意这里第二次卷积核为2的卷积实际上就已经跨过三天的k线,所以没必要用太多
    model.add(layers.Conv1D(128, 2, padding='same', strides=1))
    model.add(layers.AveragePooling1D(2))
    model.add(CuDNNGRU(128, return_sequences=True))
    model.add(layers.Flatten())
    model.add(layers.Dropout(0.4))
    model.add(layers.Dense(128, activation='relu'))
    # model.add(layers.Dense(128, activation='relu'))
    model.add(layers.Dense(5, activation='tanh'))

    # 优化算法使用adam,短周期收敛较慢
    model.compile(optimizer='adam', loss='mse')
    return model
Esempio n. 26
0
 def doublePool(x, pool_size=2, strides=None):
     return layers.concatenate([
         layers.MaxPooling1D(pool_size, strides)(x),
         layers.AveragePooling1D(pool_size, strides)(x)
     ])
def build_dense_net(blocks,
                    output_dim,
                    input_shape=None,
                    growth_rate=32,
                    lr=0.01,
                    dpt=0.5,
                    mid_dpt=0.0,
                    cvf=128,
                    cvs=7,
                    mid_cvs=3,
                    use_l2=1,
                    l2_val=0.001):
    # In 2D convolution with keras, we pass
    # the channels axis here. The equivalent
    # axis for us is the second one, which
    # is the number of features describing each
    # time step

    bn_axis = 2

    # If we want to use an l2 penalty, reg is
    # initialized to the usual keras l2 regularizer.
    # If not, we use a lambda function that always
    # returns 0 (no penalty)

    if use_l2 == 1:
        reg = regularizers.l2(l2_val)
    else:
        reg = lambda weight_matrix: 0.0

    input_tensor = layers.Input(shape=input_shape)

    x = layers.Conv1D(cvf,
                      cvs,
                      strides=1,
                      use_bias=False,
                      kernel_regularizer=reg,
                      padding="same",
                      name='conv1/conv')(input_tensor)

    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name='conv1/bn')(x)

    x = layers.Activation('relu', name='conv1/relu')(x)
    x = layers.AveragePooling1D(3, name='pool1')(x)

    for i, val in enumerate(blocks):
        x = dense_block(x,
                        val,
                        growth_rate,
                        mid_dpt,
                        mid_cvs,
                        reg,
                        name=f'dense_block_{i}')
        x = transition_block(x, 0.5, reg, name=f'transition_block_{i}')

    x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='bn')(x)
    x = layers.Dropout(dpt)(x)
    x = layers.Activation('relu', name='relu')(x)
    x = layers.GlobalAveragePooling1D(name='avg_pool')(x)

    if output_dim > 1:
        x = layers.Dense(output_dim, activation='softmax', name='fc1000')(x)
    else:
        x = layers.Dense(output_dim, activation='sigmoid', name='fc1000')(x)

    model = models.Model(input_tensor, x, name="densenet")

    return model
Esempio n. 28
0
    'Mangrove Warbler': 261,
    'Myrtle Warbler': 262,
    'Yellow-throated Vireo': 263
}

INV_BIRD_CODE = {v: k for k, v in BIRD_CODE.items()}

SAMPLE_RATE = 32000
NUM_CLASSES_SOUND = 264
PERIOD = 5

base_model = ResNet50(include_top=False, weights=None)
x = base_model.output
x = tf.reduce_mean(x, axis=2)
x1 = L.MaxPooling1D(pool_size=3, strides=1, padding='same')(x)
x2 = L.AveragePooling1D(pool_size=3, strides=1, padding='same')(x)
x = x1 + x2
x = L.Dropout(0.5)(x)
x = L.Dense(1024, activation='relu')(x)
x = L.Dropout(0.5)(x)

norm_att = L.Conv1D(filters=NUM_CLASSES_SOUND, kernel_size=1,
                    padding='same')(x)
norm_att = tf.keras.activations.tanh(norm_att / 10) * 10
norm_att = tf.keras.activations.softmax(norm_att, axis=-2)
segmentwise_output = L.Conv1D(filters=NUM_CLASSES_SOUND,
                              kernel_size=1,
                              padding='same',
                              activation='sigmoid',
                              name='segmentwise_output')(x)
clipwise_output = tf.math.reduce_sum(norm_att * segmentwise_output, axis=1)
Esempio n. 29
0
train_x = tf.convert_to_tensor(
    data.get_description('..\\track1_round1_train_20210222.csv'))
train_y = tf.convert_to_tensor(
    data.get_label('..\\track1_round1_train_20210222.csv'))
print(train_x.shape)

test_x, test_y = data.get_test('..\\track1_round1_train_20210222.csv')

rnn_units = 32

input_layer = keras.Input(shape=(104, 1))
# x = layers.Embedding(input_dim=859, output_dim=10)(input_layer)
x = layers.Conv1D(filters=rnn_units, kernel_size=4,
                  padding='valid')(input_layer)
x = layers.Conv1D(filters=rnn_units, kernel_size=4, padding='valid')(x)
x = layers.AveragePooling1D()(x)

# attention
attention_pre = layers.Dense(32, name='attention_vec')(x)  # [b_size,maxlen,64]
attention_probs = layers.Softmax()(attention_pre)  # [b_size,maxlen,64]
attention_mul = layers.Lambda(lambda x: x[0] * x[1])([attention_probs, x])

y = layers.Dense(32, name='attention_vec1')(attention_mul)
y = layers.Softmax()(y)
attention_mul = layers.Lambda(lambda x: x[0] * x[1])([y, attention_mul])

y = layers.Dense(32, name='attention_vec2')(attention_mul)
y = layers.Softmax()(y)
attention_mul = layers.Lambda(lambda x: x[0] * x[1])([y, attention_mul])

y = layers.Dense(32, name='attention_vec3')(attention_mul)
Esempio n. 30
0
def InceptionV3(include_top=True, weights='hasc', input_shape=None, pooling=None, classes=6, classifier_activation='softmax'):
    if input_shape is None:
        input_shape = (256*3, 1)

    if weights in ['hasc', 'HASC'] and include_top and classes != 6:
        raise ValueError('If using `weights` as `"hasc"` with `include_top`'
                         ' as true, `classes` should be 6')

    inputs = layers.Input(shape=input_shape)

    x = Conv1DBN(32, 3, strides=2, padding='valid')(inputs)
    x = Conv1DBN(32, 3, padding='valid')(x)
    x = Conv1DBN(64, 3)(x)
    x = layers.MaxPooling1D(3, strides=2)(x)

    x = Conv1DBN(80, 1, padding='valid')(x)
    x = Conv1DBN(192, 3, padding='valid')(x)
    x = layers.MaxPooling1D(3, strides=2)(x)

    # mixed 0
    branch1x1 = Conv1DBN(64, 1)(x)

    branch5x5 = Conv1DBN(48, 1)(x)
    branch5x5 = Conv1DBN(64, 5)(branch5x5)

    branch3x3dbl = Conv1DBN(64, 1)(x)
    branch3x3dbl = Conv1DBN(96, 3)(branch3x3dbl)
    branch3x3dbl = Conv1DBN(96, 3)(branch3x3dbl)

    branch_pool = layers.AveragePooling1D(3, strides=1, padding='same')(x)
    branch_pool = Conv1DBN(32, 1)(branch_pool)
    x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool], name='mixed0')

    # mixed 1
    branch1x1 = Conv1DBN(64, 1)(x)

    branch5x5 = Conv1DBN(48, 1)(x)
    branch5x5 = Conv1DBN(64, 5)(branch5x5)

    branch3x3dbl = Conv1DBN(64, 1)(x)
    branch3x3dbl = Conv1DBN(96, 3)(branch3x3dbl)
    branch3x3dbl = Conv1DBN(96, 3)(branch3x3dbl)

    branch_pool = layers.AveragePooling1D(3, strides=1, padding='same')(x)
    branch_pool = Conv1DBN(64, 1)(branch_pool)
    x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool], name='mixed1')

    # mixed 2
    branch1x1 = Conv1DBN(64, 1)(x)

    branch5x5 = Conv1DBN(48, 1)(x)
    branch5x5 = Conv1DBN(64, 5)(branch5x5)

    branch3x3dbl = Conv1DBN(64, 1)(x)
    branch3x3dbl = Conv1DBN(96, 3)(branch3x3dbl)
    branch3x3dbl = Conv1DBN(96, 3)(branch3x3dbl)

    branch_pool = layers.AveragePooling1D(3, strides=1, padding='same')(x)
    branch_pool = Conv1DBN(64, 1)(branch_pool)
    x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool], name='mixed2')

    # mixed 3
    branch3x3 = Conv1DBN(384, 3, strides=2, padding='valid')(x)

    branch3x3dbl = Conv1DBN(64, 1)(x)
    branch3x3dbl = Conv1DBN(96, 3)(branch3x3dbl)
    branch3x3dbl = Conv1DBN(96, 3, strides=2, padding='valid')(branch3x3dbl)

    branch_pool = layers.MaxPooling1D(3, strides=2)(x)
    x = layers.concatenate([branch3x3, branch3x3dbl, branch_pool], name='mixed3')

    # mixed 4
    branch1x1 = Conv1DBN(192, 1)(x)

    branch7x7 = Conv1DBN(128, 1)(x)
    branch7x7 = Conv1DBN(128, 1)(branch7x7)
    branch7x7 = Conv1DBN(128, 7)(branch7x7)

    branch7x7dbl = Conv1DBN(128, 1)(x)
    branch7x7dbl = Conv1DBN(128, 7)(branch7x7dbl)
    branch7x7dbl = Conv1DBN(128, 1)(branch7x7dbl)
    branch7x7dbl = Conv1DBN(128, 7)(branch7x7dbl)
    branch7x7dbl = Conv1DBN(128, 1)(branch7x7dbl)

    branch_pool = layers.AveragePooling1D(3, strides=1, padding='same')(x)
    branch_pool = Conv1DBN(192, 1)(branch_pool)
    x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool], name='mixed4')

    # mixed 5, 6
    for i in range(2):
        branch1x1 = Conv1DBN(192, 1)(x)

        branch7x7 = Conv1DBN(160, 1)(x)
        branch7x7 = Conv1DBN(160, 1)(branch7x7)
        branch7x7 = Conv1DBN(192, 7)(branch7x7)

        branch7x7dbl = Conv1DBN(160, 1)(x)
        branch7x7dbl = Conv1DBN(160, 7)(branch7x7dbl)
        branch7x7dbl = Conv1DBN(160, 1)(branch7x7dbl)
        branch7x7dbl = Conv1DBN(160, 7)(branch7x7dbl)
        branch7x7dbl = Conv1DBN(192, 1)(branch7x7dbl)

        branch_pool = layers.AveragePooling1D(3, strides=1, padding='same')(x)
        branch_pool = Conv1DBN(192, 1)(branch_pool)
        x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool], name='mixed' + str(5 + i))

    # mixed 7
    branch1x1 = Conv1DBN(192, 1)(x)

    branch7x7 = Conv1DBN(192, 1)(x)
    branch7x7 = Conv1DBN(192, 1)(branch7x7)
    branch7x7 = Conv1DBN(192, 7)(branch7x7)

    branch7x7dbl = Conv1DBN(192, 1)(x)
    branch7x7dbl = Conv1DBN(192, 7)(branch7x7dbl)
    branch7x7dbl = Conv1DBN(192, 1)(branch7x7dbl)
    branch7x7dbl = Conv1DBN(192, 7)(branch7x7dbl)
    branch7x7dbl = Conv1DBN(192, 1)(branch7x7dbl)

    branch_pool = layers.AveragePooling1D(3, strides=1, padding='same')(x)
    branch_pool = Conv1DBN(192, 1)(branch_pool)
    x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool], name='mixed7')

    # mixed 8
    branch3x3 = Conv1DBN(192, 1)(x)
    branch3x3 = Conv1DBN(320, 3, strides=2, padding='valid')(branch3x3)

    branch7x7x3 = Conv1DBN(192, 1)(x)
    branch7x7x3 = Conv1DBN(192, 1)(branch7x7x3)
    branch7x7x3 = Conv1DBN(192, 7)(branch7x7x3)
    branch7x7x3 = Conv1DBN(192, 3, strides=2, padding='valid')(branch7x7x3)

    branch_pool = layers.MaxPooling1D(3, strides=2)(x)
    x = layers.concatenate([branch3x3, branch7x7x3, branch_pool], name='mixed8')

    # mixed 9, 10
    for i in range(2):
        branch1x1 = Conv1DBN(320, 1)(x)

        branch3x3 = Conv1DBN(384, 1)(x)
        branch3x3_1 = Conv1DBN(384, 1)(branch3x3)
        branch3x3_2 = Conv1DBN(384, 3)(branch3x3)
        branch3x3 = layers.concatenate([branch3x3_1, branch3x3_2], name='mixed9_' + str(i))

        branch3x3dbl = Conv1DBN(448, 1)(x)
        branch3x3dbl = Conv1DBN(384, 3)(branch3x3dbl)
        branch3x3dbl_1 = Conv1DBN(384, 1)(branch3x3dbl)
        branch3x3dbl_2 = Conv1DBN(384, 3)(branch3x3dbl)
        branch3x3dbl = layers.concatenate([branch3x3dbl_1, branch3x3dbl_2])

        branch_pool = layers.AveragePooling1D(3, strides=1, padding='same')(x)
        branch_pool = Conv1DBN(192, 1)(branch_pool)
        x = layers.concatenate([branch1x1, branch3x3, branch3x3dbl, branch_pool], name='mixed' + str(9 + i))

    # Classification block
    x = layers.GlobalAveragePooling1D(name='avg_pool')(x)
    y = layers.Dense(classes, activation=classifier_activation, name='predictions')(x)

    model = Model(inputs, y)

    if weights is not None:
        if weights in ['hasc', "HASC"]:
            weights = 'weights/inceptionv3/inceptionv3_hasc_weights_{}_{}.hdf5'.format(int(input_shape[0]),
                                                                                       int(input_shape[1]))

        # hasc or weights fileで初期化
        if os.path.exists(weights):
            print("Load weights from {}".format(weights))
            model.load_weights(weights)
        else:
            print("Not exist weights: {}".format(weights))

    # topを含まないとき
    if not include_top:
        if pooling is None:
            # topを削除する
            model = Model(inputs=model.input, outputs=model.layers[-3].output)
        elif pooling == 'avg':
            y = layers.GlobalAveragePooling1D()(model.layers[-3].output)
            model = Model(inputs=model.input, outputs=y)
        elif pooling == 'max':
            y = layers.GlobalMaxPooling1D()(model.layers[-3].output)
            model = Model(inputs=model.input, outputs=y)
        else:
            print("Not exist pooling option: {}".format(pooling))
            model = Model(inputs=model.input, outputs=model.layers[-3].output)

    return model