Example #1
0
File: mm.py Project: hsha0/M-M
def create_lstm_model():
    inputs = tf.keras.Input(shape=(FLAGS.interval, 3))

    embeddings = layers.Embedding(SEQUENCE_LENGTH,
                                  FLAGS.embedding_size,
                                  input_length=FLAGS.interval)(inputs)
    print(embeddings.shape)

    if FLAGS.sum_embeddings:
        reshape = tf.math.reduce_sum(embeddings, axis=2)
    else:
        reshape = layers.Reshape(
            (FLAGS.interval, 3 * FLAGS.embedding_size))(embeddings)
    lstm = layers.LSTM(FLAGS.num_cells, return_sequences=True)(reshape)

    for i in range(FLAGS.num_lstm_layers - 2):
        dropout = layers.Dropout(0.2)(lstm)
        lstm = layers.LSTM(FLAGS.num_cells, return_sequences=True)(dropout)

    dropout = layers.Dropout(0.2)(lstm)
    lstm = layers.LSTM(FLAGS.num_cells)(dropout)
    dropout = layers.Dropout(0.2)(lstm)
    notes = layers.Softmax(name='notes')(layers.Dense(256)(dropout))
    velocity = layers.Softmax(name='velocity')(layers.Dense(
        len(VELOCITY))(dropout))
    time = layers.Softmax(name='time')(layers.Dense(101)(dropout))

    model = tf.keras.Model(inputs=inputs, outputs=[notes, velocity, time])

    model.summary()

    return model
Example #2
0
    def call(self, input_ids, **kwargs):
        mu_theta, logsigma_theta, kl_theta = self.encoder(input_ids)

        print(mu_theta, logsigma_theta, kl_theta)
        z = self.sampler([mu_theta, logsigma_theta])
        theta = layers.Softmax(axis=-1)(z)  # (batch, num_topic)

        beta = tf.einsum('TE,VE->TV', self.alpha,
                         self.rho)  # (num_topic, num_vocab)
        beta = layers.Softmax(axis=-1)(beta)

        lookup_matrix = self.decoder([theta, beta])  # (batch, num_vocab)
        lookup_matrix = tf.einsum('BV->VB',
                                  lookup_matrix)  # (num_vocab, batch')
        recon_loss = tf.nn.embedding_lookup(
            lookup_matrix, input_ids)  # (batch, seq_size, batch')
        recon_loss = self.encoder.mask_layer(input_ids) * recon_loss
        recon_loss = tf.einsum('BSN->BN', recon_loss)
        recon_loss = -tf.linalg.diag_part(recon_loss)

        loss = tf.reduce_mean(recon_loss) + tf.reduce_mean(kl_theta)

        self.add_loss(loss)
        self.add_metric(recon_loss, name='recon_loss', aggregation='mean')
        self.add_metric(kl_theta, name='kl_theta', aggregation='mean')

        return theta
def build_2d_model(args):
    l2r = 1e-9

    T, X = tfkl.Input((N_TOKS,)), tfkl.Input((H, W, 3 + N_OBJS))

    ti = tfkl.Embedding(N_VOCAB, N_EMBED, input_length=N_TOKS)(T)
    print(ti.shape)
    th = tfkm.Sequential([
        tfkl.Bidirectional(tfkl.CuDNNLSTM(128, return_sequences=True)),
        tfkl.Bidirectional(tfkl.CuDNNLSTM(128, return_sequences=True)),
        tfkl.Conv1D(256, (1,), activation='elu', kernel_regularizer=tfkr.l2(l2r)),
        tfkl.Conv1D(6, (1,), activation=None, kernel_regularizer=tfkr.l2(l2r)),
        tfkl.Softmax(axis=-2, name='lstm_attn'),
    ], name='lstm_layers')(ti)

    tia = tfkb.sum(tfkl.Reshape((N_TOKS, 1, -1))(th) * tfkl.Reshape((N_TOKS, N_EMBED, 1))(ti), axis=-3)

    Xi = tfkb.sum(X[:, :, :, 3:], axis=-1, keepdims=True)

    s1 = tfkl.Dense(N_OBJS, activation='softmax')(tia[:, :, 0])
    s1b = tfkm.Sequential([tfkl.RepeatVector(W * H), tfkl.Reshape((H, W, N_OBJS))])(s1)
    Xs1 = tfkb.sum(X[:, :, :, 3:] * s1b, axis=-1, keepdims=True)

    s2 = tfkl.Dense(3)(tia[:, :, 1])
    s2b = tfkm.Sequential([tfkl.RepeatVector(W * H), tfkl.Reshape((H, W, 3))])(s2)
    s2c = tfkb.sum(s2b * X[:, :, :, 2:3] - (1 - Xi) * 20, axis=-1, keepdims=True)
    Xs2 = tfkm.Sequential([tfkl.Reshape((-1, 1)), tfkl.Softmax(axis=-2), tfkl.Reshape((H, W, 1))])(s2c)
    Xs2 = Xs2 - tfkb.max(Xs2, axis=[1, 2], keepdims=True)

    s3 = tfkl.Dense(N_OBJS, activation='softmax')(tia[:, :, 2])
    s3b = tfkm.Sequential([tfkl.RepeatVector(W * H), tfkl.Reshape((H, W, N_OBJS))])(s3)
    Xs3 = tfkb.sum(X[:, :, :, 3:] * s3b, axis=-1, keepdims=True)

    s4 = tfkl.Dense(16, activation='softmax')(tia[:, :, 3])
    s4b = tfkm.Sequential([tfkl.RepeatVector(W * H), tfkl.Reshape((H, W, 16))])(s4)
    Xs4 = s4b * Xi

    s5 = tfkl.Dense(16, activation='softmax')(tia[:, :, 4])
    s5b = tfkm.Sequential([tfkl.RepeatVector(W * H), tfkl.Reshape((H, W, 16))])(s5)
    Xs5 = s5b * Xi

    s6 = tfkl.Dense(16, activation='softmax')(tia[:, :, 5])
    s6b = tfkm.Sequential([tfkl.RepeatVector(W * H), tfkl.Reshape((H, W, 16))])(s6)
    Xs6 = s6b * Xi

    xt = tfkl.concatenate([Xi, Xs1, Xs2, Xs3, Xs4, Xs5, Xs6], axis=-1)

    attn = unet(xt)
    Y = tfkb.sum(attn * X[:, :, :, :2], axis=[1, 2])

    model = tfkm.Model(inputs=[T, X], outputs=[Y])

    def acc(y_pred, y_true):
        return tfkb.mean(tfkb.min(tfkb.cast((tfkb.abs(y_true-y_pred) < args.tol), 'float32'), axis=1))

    model.compile(tfk.optimizers.Adam(args.lr), 'mse', metrics=[acc])

    return model
Example #4
0
def genmodel():
    cnn = tf.keras.Sequential()
    cnn.add(
        layers.TimeDistributed(layers.Conv2D(96, (2, 2),
                                             strides=(1, 1),
                                             activation='relu'),
                               input_shape=(672, 9, 5, 2))
    )  # (5,9,2,672) is the exact shape that data.mat has when loaded with loadmat. Values should be added dynamically #TODO
    cnn.add(
        layers.TimeDistributed(
            layers.MaxPool2D(pool_size=(2, 2), strides=(1, 1))))
    cnn.add(layers.TimeDistributed(layers.Conv2D(96, (2, 2), strides=(1, 1))))
    cnn.add(
        layers.TimeDistributed(
            layers.MaxPool2D(pool_size=(2, 2), strides=(1, 1))))
    cnn.add(layers.TimeDistributed(layers.Flatten()))
    cnn.add(layers.LSTM(units=512, input_shape=(10, 512)))
    cnn.add(layers.Dense(units=64))
    cnn.add(layers.Dropout(rate=0.33))
    cnn.add(layers.Dense(units=3))
    cnn.add(layers.Softmax())
    cnn.build()
    cnn.compile(optimizer='Adam',
                loss='categorical_crossentropy',
                metrics=['accuracy'])
    return cnn
 def build(self, input_shape):
     (_, h, w, num_features) = input_shape
     self.transposer = layers.Permute((3, 1, 2))
     self.reshaper = layers.Reshape((num_features, h * w))
     self.softmaxer = layers.Softmax(axis=-1)
     self.unflattener = layers.Reshape((num_features, h, w))
     self.untransposer = layers.Permute((2, 3, 1))
def test_compute_model_performance_singletask_classifier():
    """Computes model performance on singletask dataset with one-hot label encoding."""
    n_data_points = 20
    n_features = 10

    X = np.ones(shape=(int(n_data_points / 2), n_features)) * -1
    X1 = np.ones(shape=(int(n_data_points / 2), n_features))
    X = np.concatenate((X, X1))
    class_1 = np.array([[0.0, 1.0] for x in range(int(n_data_points / 2))])
    class_0 = np.array([[1.0, 0.0] for x in range(int(n_data_points / 2))])
    y = np.concatenate((class_0, class_1))
    dataset = NumpyDataset(X, y)

    features = layers.Input(shape=(n_features, ))
    dense = layers.Dense(2)(features)
    output = layers.Softmax()(dense)
    keras_model = tf.keras.Model(inputs=features, outputs=[output])
    model = dc.models.KerasModel(keras_model,
                                 dc.models.losses.SoftmaxCrossEntropy(),
                                 learning_rate=0.1)

    model.fit(dataset, nb_epoch=1000)
    metric = dc.metrics.Metric(dc.metrics.roc_auc_score,
                               np.mean,
                               mode="classification",
                               n_tasks=1)

    scores = model.evaluate_generator(model.default_generator(dataset),
                                      [metric],
                                      per_task_metrics=True)
    scores = list(scores[1].values())
    assert np.isclose(scores, [1.0], atol=0.05)
    def get_final_activation_op(self, output_name):
        """
        Define a masked softmax activation function

        Parameters
        ----------
        output_name : str
            Name of the output to apply softmax activation on

        Returns
        -------
        function
            Function to compute masked softmax

        Notes
        -----
            Uses `mask` field to exclude padded records from contributing
            to the softmax activation
        """
        softmax_op = layers.Softmax(axis=-1, name=output_name)

        # Listwise Top 1 RankNet Loss
        def masked_softmax(logits, mask):
            """
            NOTE:
            Tried to manually compute softmax with tf operations,
            but tf.keras.layers.Softmax() is more stable when working with
            cross_entropy layers
            """
            logits = tf.where(tf.equal(mask, tf.constant(1.0)), logits,
                              tf.constant(tf.float32.min))

            return softmax_op(logits)

        return masked_softmax
def AlexNet_pytorch(im_height=224, im_width=224, class_num=1000):
    # tensorflow中的tensor通道排序是NHWC
    input_image = layers.Input(shape=(im_height, im_width, 3),
                               dtype="float32")  # output(None, 224, 224, 3)
    x = layers.ZeroPadding2D(
        ((2, 1), (2, 1)))(input_image)  # output(None, 227, 227, 3)
    x = layers.Conv2D(64, kernel_size=11, strides=4,
                      activation="relu")(x)  # output(None, 55, 55, 64)
    x = layers.MaxPool2D(pool_size=3, strides=2)(x)  # output(None, 27, 27, 64)
    x = layers.Conv2D(192, kernel_size=5, padding="same",
                      activation="relu")(x)  # output(None, 27, 27, 192)
    x = layers.MaxPool2D(pool_size=3,
                         strides=2)(x)  # output(None, 13, 13, 128)
    x = layers.Conv2D(384, kernel_size=3, padding="same",
                      activation="relu")(x)  # output(None, 13, 13, 384)
    x = layers.Conv2D(256, kernel_size=3, padding="same",
                      activation="relu")(x)  # output(None, 13, 13, 256)
    x = layers.Conv2D(256, kernel_size=3, padding="same",
                      activation="relu")(x)  # output(None, 13, 13, 256)
    x = layers.MaxPool2D(pool_size=3, strides=2)(x)  # output(None, 6, 6, 256)

    x = layers.Flatten()(x)  # output(None, 6*6*256)
    x = layers.Dropout(0.5)(x)
    x = layers.Dense(4096, activation="relu")(x)  # output(None, 4096)
    x = layers.Dropout(0.5)(x)
    x = layers.Dense(4096, activation="relu")(x)  # output(None, 4096)
    x = layers.Dense(class_num)(x)  # output(None, 5)
    predict = layers.Softmax()(x)

    model = models.Model(inputs=input_image, outputs=predict)
    return model
Example #9
0
    def __init__(self,
                 channels,
                 reduction=8,
                 data_format="channels_last",
                 **kwargs):
        super(PosAttBlock, self).__init__(**kwargs)
        self.data_format = data_format
        mid_channels = channels // reduction

        self.query_conv = conv1x1(in_channels=channels,
                                  out_channels=mid_channels,
                                  use_bias=True,
                                  data_format=data_format,
                                  name="query_conv")
        self.key_conv = conv1x1(in_channels=channels,
                                out_channels=mid_channels,
                                use_bias=True,
                                data_format=data_format,
                                name="key_conv")
        self.value_conv = conv1x1(in_channels=channels,
                                  out_channels=channels,
                                  use_bias=True,
                                  data_format=data_format,
                                  name="value_conv")
        self.scale = ScaleBlock(data_format=data_format, name="scale")
        self.softmax = nn.Softmax(axis=-1)
Example #10
0
 def __init__(self, out_channels, dropout_rate, temperature=30, **kwargs):
     super(Routing, self).__init__(**kwargs)
     self.avgpool = layers.GlobalAveragePooling2D()
     self.dropout = layers.Dropout(rate=dropout_rate)
     self.fc = layers.Dense(units=out_channels)
     self.softmax = layers.Softmax()
     self.temperature = temperature
Example #11
0
def build_model():
    global label_name_len
    global char_set_len
    global model
    global history

    model = models.Sequential([
        layers.Conv2D(32, (3, 3), activation='relu',
                      input_shape=(50, 200, 1)),  # 卷积层1,卷积核3*3
        layers.MaxPooling2D((2, 2)),  # 池化层1,2*2采样
        layers.Conv2D(64, (3, 3), activation='relu'),  # 卷积层2,卷积核3*3
        layers.MaxPooling2D((2, 2)),  # 池化层2,2*2采样
        layers.Flatten(),  # Flatten层,连接卷积层与全连接层
        layers.Dense(1000, activation='relu'),  # 全连接层,特征进一步提取
        layers.Dense(label_name_len * char_set_len),
        layers.Reshape([label_name_len, char_set_len]),
        layers.Softmax()  # 输出层,输出预期结果
    ])
    # 打印网络结构
    model.summary()

    # 编译
    model.compile(optimizer="adam",
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    # 训练
    history = model.fit(train_ds, validation_data=val_ds, epochs=epochs)
Example #12
0
    def generate_topic_words(self):
        beta = tf.einsum('TE,VE->TV', self.alpha, self.rho)
        beta = layers.Softmax(axis=-1)(beta)
        represent_sort = tf.argsort(beta, direction='DESCENDING')
        represent_sort = represent_sort[:, :10].numpy()

        return represent_sort
Example #13
0
def make_model():
    input_layer = layers.Input(shape=images_shape)
    output_layer = layers.Conv2D(64, (2, 2), padding='same')(input_layer)
    output_layer = layers.BatchNormalization()(output_layer)
    output_layer = layers.ReLU()(output_layer)
    output_layer = layers.MaxPooling2D()(output_layer)
    output_layer = layers.Dropout(0.3)(output_layer)
    output_layer = layers.Conv2D(32, (2, 2), padding='same')(output_layer)
    output_layer = layers.MaxPooling2D()(output_layer)
    output_layer = layers.ReLU()(output_layer)
    output_layer = layers.Dropout(0.3)(output_layer)
    output_layer = layers.Flatten()(output_layer)
    output_layer = layers.ReLU()(output_layer)
    output_layer = layers.Dropout(0.3)(output_layer)
    output_layer = layers.Dense(1024)(output_layer)
    output_layer = layers.Dropout(0.3)(output_layer)
    output_layer = layers.ReLU()(output_layer)
    output_layer = layers.Dense(100)(output_layer)
    output_layer = layers.Dropout(0.3)(output_layer)
    output_layer = layers.ReLU()(output_layer)
    embedding_layer = layers.Dense(embedding_size)(output_layer)
    output_layer = layers.Dense(class_count)(embedding_layer)
    output_layer = layers.Softmax()(output_layer)
    return tf.keras.Model(inputs=input_layer,
                          outputs=[embedding_layer, output_layer])
Example #14
0
def AlexNet_v1(im_height=224, im_width=224, num_classes=1000):
    # tensorflow中的tensor通道排序是NHWC
    input_image = layers.Input(shape=(im_height, im_width, 3),
                               dtype="float32")  # output(None, 224, 224, 3)
    x = layers.ZeroPadding2D(
        ((1, 2), (1, 2)))(input_image)  # output(None, 227, 227, 3)
    x = layers.Conv2D(48, kernel_size=11, strides=4,
                      activation="relu")(x)  # output(None, 55, 55, 48)
    x = layers.MaxPool2D(pool_size=3, strides=2)(x)  # output(None, 27, 27, 48)
    x = layers.Conv2D(128, kernel_size=5, padding="same",
                      activation="relu")(x)  # output(None, 27, 27, 128)
    x = layers.MaxPool2D(pool_size=3,
                         strides=2)(x)  # output(None, 13, 13, 128)
    x = layers.Conv2D(192, kernel_size=3, padding="same",
                      activation="relu")(x)  # output(None, 13, 13, 192)
    x = layers.Conv2D(192, kernel_size=3, padding="same",
                      activation="relu")(x)  # output(None, 13, 13, 192)
    x = layers.Conv2D(128, kernel_size=3, padding="same",
                      activation="relu")(x)  # output(None, 13, 13, 128)
    x = layers.MaxPool2D(pool_size=3, strides=2)(x)  # output(None, 6, 6, 128)

    x = layers.Flatten()(x)  # output(None, 6*6*128)
    x = layers.Dropout(0.2)(x)
    x = layers.Dense(2048, activation="relu")(x)  # output(None, 2048)
    x = layers.Dropout(0.2)(x)
    x = layers.Dense(2048, activation="relu")(x)  # output(None, 2048)
    x = layers.Dense(num_classes)(x)  # output(None, 5)
    predict = layers.Softmax()(x)

    model = models.Model(inputs=input_image, outputs=predict)
    return model
Example #15
0
def AlexNet_v1(im_height=224, im_width=224, num_classes=1000):
    # tensorflow中的tensor通道排序是NHWC
    input_image = layers.Input(shape=(im_height, im_width, 3), dtype="float32")  # output(None, 224, 224, 3)
    x = layers.ZeroPadding2D(((1, 2), (1, 2)))(input_image)                      # output(None, 227, 227, 3)
        # * same或者valid padding都没法通过224*224变为55*55,所以先用ZeroPadding2D扩充为227*227,然后valid padding变为55*55
        # * interpreted as ((top_pad, bottom_pad), (left_pad, right_pad))

    x = layers.Conv2D(48, kernel_size=11, strides=4, activation="relu")(x)       # output(None, 55, 55, 48) 默认valid padding
    x = layers.MaxPool2D(pool_size=3, strides=2)(x)                              # output(None, 27, 27, 48)
    x = layers.Conv2D(128, kernel_size=5, padding="same", activation="relu")(x)  # output(None, 27, 27, 128)
    x = layers.MaxPool2D(pool_size=3, strides=2)(x)                              # output(None, 13, 13, 128)
    x = layers.Conv2D(192, kernel_size=3, padding="same", activation="relu")(x)  # output(None, 13, 13, 192)
    x = layers.Conv2D(192, kernel_size=3, padding="same", activation="relu")(x)  # output(None, 13, 13, 192)
    x = layers.Conv2D(128, kernel_size=3, padding="same", activation="relu")(x)  # output(None, 13, 13, 128)
    x = layers.MaxPool2D(pool_size=3, strides=2)(x)                              # output(None, 6, 6, 128)

    x = layers.Flatten()(x)                         # output(None, 6*6*128)      # ^ 全连接之前,要将特征矩阵展成一维向量 .Flatten()
    x = layers.Dropout(0.2)(x)                      # 20% 失活比例                # ^ .Dropout在使用全连接层之前                      
    x = layers.Dense(2048, activation="relu")(x)    # output(None, 2048)
    x = layers.Dropout(0.2)(x)
    x = layers.Dense(2048, activation="relu")(x)    # output(None, 2048)
    x = layers.Dense(num_classes)(x)                # output(None, 5)            # ^ 最后一个Dense不要设置激活函数,因为最后有softmax处理
    predict = layers.Softmax()(x)                                                # softmax将输出转化为概率分布             

    model = models.Model(inputs=input_image, outputs=predict)                    # ^ 通过 models.Model 定义网络的 输入 和 输出
    return model
Example #16
0
    def call(self, inputs, **kwargs):
        x = Conv_block(filters=32,kernel_size=(3,3),stride=(2,2))(inputs)

        # 一层不使用残差,下面的写法要求输入是numpy
        # input_shannel_size =(tf.shape(x))[-1]
        input_shannel_size =x.shape[-1]
        # x = Depthwise_res_block(filters=16,kernel=(3,3),stride=(1,1),t=1,input_shannel_size=input_shannel_size,resdiual = False)(x)
        x = self.Depthwise_res_block1(x)

        for i in range(2):
            input_shannel_size = x.shape[-1]

            if i==0:
                x = Depthwise_res_block(filters=24,kernel=(3,3),stride=(2,2),t=6,input_shannel_size=input_shannel_size,resdiual=False)(x)
            else:
                x = Depthwise_res_block(filters=24,kernel=(3,3),stride=(1,1),t=6,input_shannel_size=input_shannel_size,resdiual=True)(x)
        for i in range(3):
            input_shannel_size = x.shape[-1]

            if i==0:
                x = Depthwise_res_block(filters=32,kernel=(3,3),stride=(2,2),t=6,input_shannel_size=input_shannel_size,resdiual=False)(x)
            else:
                x = Depthwise_res_block(filters=32,kernel=(3,3),stride=(1,1),t=6,input_shannel_size=input_shannel_size,resdiual=True)(x)

        for i in range(4):
            input_shannel_size = x.shape[-1]

            if i==0:
                x = Depthwise_res_block(filters=64,kernel=(3,3),stride=(2,2),t=6,input_shannel_size=input_shannel_size,resdiual=False)(x)
            else:
                x = Depthwise_res_block(filters=64,kernel=(3,3),stride=(1,1),t=6,input_shannel_size=input_shannel_size,resdiual=True)(x)
        for i in range(3):
            input_shannel_size = x.shape[-1]

            if i==0:
                x = Depthwise_res_block(filters=96,kernel=(3,3),stride=(1,1),t=6,input_shannel_size=input_shannel_size,resdiual=False)(x)
            else:
                x = Depthwise_res_block(filters=96,kernel=(3,3),stride=(1,1),t=6,input_shannel_size=input_shannel_size,resdiual=True)(x)
        for i in range(3):
            input_shannel_size = x.shape[-1]

            if i==0:
                x = Depthwise_res_block(filters=160,kernel=(3,3),stride=(2,2),t=6,input_shannel_size=input_shannel_size,resdiual=False)(x)
            else:
                x = Depthwise_res_block(filters=160,kernel=(3,3),stride=(1,1),t=6,input_shannel_size=input_shannel_size,resdiual=True)(x)
        for i in range(1):
            input_shannel_size = x.shape[-1]

            if i==0:
                x = Depthwise_res_block(filters=320,kernel=(3,3),stride=(1,1),t=6,input_shannel_size=input_shannel_size,resdiual=False)(x)
            else:
                x = Depthwise_res_block(filters=320,kernel=(3,3),stride=(1,1),t=6,input_shannel_size=input_shannel_size,resdiual=True)(x)
        x = Conv_block(filters=1280,kernel_size=(1,1),stride=(1,1))(x)

        x = layers.GlobalAveragePooling2D()(x)
        x = layers.Reshape((1,1,1280))(x)
        x = layers.Conv2D(filters=self.label_size,kernel_size=(1,1),strides=(1,1),padding='same')(x)
        x = layers.Reshape((self.label_size,))(x)
        x = layers.Softmax()(x)
        return x
def build_model(h, w, c, b, range_init, w_init, out, activation='linear'):
    input = l.Input((h, w, c))
    hist = DiffHist(b, range_init, w_init)

    x = hist(input[..., 1:])
    x = x * tf.expand_dims(tf.expand_dims(input[..., 0], axis=-1), axis=-1)
    x = tf.sqrt(x)

    y = tf.reduce_mean(x, axis=[1, 2])
    y = tf.math.divide_no_nan(y, tf.reduce_sum(y, axis=-1, keepdims=True))
    y1 = y[..., 0:(c - 1) // 2, :]
    y2 = y[..., (c - 1) // 2:c - 1, :]

    def dense_encode(y1):
        y1 = tf.reshape(y1, (-1, np.prod(y1.shape[1:])))
        y1 = l.Dense(b // 2, use_bias=False)(
            y1
        )  # TODO: Dodati mogucnost duplog histograma, duplog dense layera
        y1 = l.BatchNormalization()(y1)
        y1 = l.Dropout(0.2)(y1)
        y1 = l.ReLU()(y1)
        return y1

    y1 = dense_encode(y1)
    y2 = dense_encode(y2)
    y = l.concatenate((y1, y2))

    y = l.Dense(out, activation=activation)(y)
    y = l.Softmax()(y)

    return tf.keras.Model(input, y), hist
Example #18
0
def create_q_model():
    # Network defined by the DeepMind paper
    inputs = layers.Input(shape=(WIDTH, HEIGHT, 1))

    # Convolutions on the frames on the screen
    if WIDTH < 40:
        layer1 = layers.Conv2D(32,
                               2,
                               strides=2,
                               padding='same',
                               activation="relu")(inputs)
        layer1a = layers.Conv2D(32,
                                2,
                                strides=1,
                                padding='same',
                                activation="relu")(layer1)
        layer2 = layers.Flatten()(layer1a)
        layer3 = layers.Dense(128, activation="relu")(layer2)
    else:
        # Convolutions on the frames on the screen
        layer1 = layers.Conv2D(32, 4, strides=4, activation="relu")(inputs)
        layer1a = layers.Conv2D(64, 3, strides=2, activation="relu")(layer1)
        layer2 = layers.Flatten()(layer1a)
        layer3 = layers.Dense(256, activation="relu")(layer2)

    layer4 = layers.Dense(N * 4, activation="relu")(layer3)
    shaped = layers.Reshape((4, N))(layer4)
    probabilities = layers.Softmax(axis=1)(shaped)
    return keras.Model(inputs=inputs, outputs=probabilities)
Example #19
0
def _resnet(block, blocks_num, im_width=224, im_height=224, num_classes=1000, include_top=True):
    # tensorflow中的tensor通道排序是NHWC
    # (None, 224, 224, 3)
    input_image = layers.Input(shape=(im_height, im_width, 3), dtype="float32")
    x = layers.Conv2D(filters=64, kernel_size=7, strides=2,
                      padding="SAME", use_bias=False, name="conv1")(input_image)
    x = layers.BatchNormalization(momentum=0.9, epsilon=1e-5, name="conv1/BatchNorm")(x)
    x = layers.ReLU()(x)
    x = layers.MaxPool2D(pool_size=3, strides=2, padding="SAME")(x)

    x = _make_layer(block, x.shape[-1], 64, blocks_num[0], name="block1")(x)
    x = _make_layer(block, x.shape[-1], 128, blocks_num[1], strides=2, name="block2")(x)
    x = _make_layer(block, x.shape[-1], 256, blocks_num[2], strides=2, name="block3")(x)
    x = _make_layer(block, x.shape[-1], 512, blocks_num[3], strides=2, name="block4")(x)

    if include_top:
        x = layers.GlobalAvgPool2D()(x)  # pool + flatten
        x = layers.Dense(num_classes, name="logits")(x)
        predict = layers.Softmax()(x)
    else:
        predict = x

    model = Model(inputs=input_image, outputs=predict)

    return model
Example #20
0
def init(vocabularySize=1, punctuationSize=1, timesteps=50, word_vector_size=100, hidden=100, gpu=False,
         optimizer='adam', use_features=False):
    # input
    if not use_features:
        word_ids = keras.Input(shape=(timesteps,), dtype='int32', name='word_ids')
        word_vec = layers.Embedding(output_dim=word_vector_size, input_dim=vocabularySize, input_length=1,
                                    name='word_vec')(word_ids)
    else:
        word_ids = keras.Input(shape=(timesteps, vocabularySize,), dtype='float32', name='feat_ids')
        word_vec = layers.Dense(word_vector_size, input_dim=vocabularySize, name='feat_matrix', use_bias=True)(word_ids)
    # encoder layer
    gru_input, f_state, b_state = layers.Bidirectional(
        __createGRULayer(hidden, gpu, True, 'gru_input'))(word_vec)

    # decoder
    gru_output = __createGRULayer(
        hidden, gpu, False, 'gru')([gru_input, b_state])

    attention_output = la.Attention(name='attention_layer')(
        [gru_input, gru_output, b_state])
    lf_output = llf.LateFusion(name='late_fusion')(
        [gru_output, attention_output])

    # output
    out = layers.Dense(punctuationSize, name='out')(lf_output)
    out = layers.TimeDistributed(layers.Softmax())(out)
    # ignore first value as we do not predict for the first word
    out = layers.Lambda(lambda x: x[:, 1:])(out)

    # model
    model = keras.Model(inputs=word_ids, outputs=out, name='punctuation')
    model.compile(loss='categorical_crossentropy', optimizer=optimizer)
    return model
Example #21
0
def rnn_model(params, training_dr_lstm=True, training_dr_ll=True):
    """RNN model for text."""
    input_shape = (params['fix_len'])
    seq_input = layers.Input(shape=input_shape)
    # vocab+1 because of padding
    seq_emb = layers.Embedding(params['vocab_size'] + 1,
                               params['emb_size'],
                               input_length=params['fix_len'])(seq_input)
    lstm_out = layers.LSTM(params['hidden_lstm_size'],
                           dropout=params['dropout_rate_lstm'])(
                               seq_emb, training=training_dr_lstm)
    out = layers.Dropout(rate=params['dropout_rate'],
                         seed=params['random_seed'])(lstm_out,
                                                     training=training_dr_ll)
    if params['variational']:
        # scale kl loss by number of training examples.
        # larger training dataset depends less on prior
        def scaled_kl_fn(p, q, _):
            return tfp.distributions.kl_divergence(q, p) / params['n_train']

        logits = tfpl.DenseReparameterization(
            params['n_class_in'],
            activation=None,
            kernel_divergence_fn=scaled_kl_fn,
            bias_posterior_fn=tfpl.util.default_mean_field_normal_fn(),
            name='last_layer')(out)
    else:
        logits = layers.Dense(
            params['n_class_in'],
            activation=None,
            kernel_regularizer=regularizers.l2(params['reg_weight']),
            bias_regularizer=regularizers.l2(params['reg_weight']),
            name='last_layer')(out)
    probs = layers.Softmax(axis=1)(logits)
    return models.Model(seq_input, probs, name='rnn')
Example #22
0
    def __init__(self):
        super(EventDetector, self).__init__()
        self.conv1 = layers.Conv1D(
            64,
            3,
            activation='relu',
            kernel_regularizer=keras.regularizers.L2(0.1))
        self.bn1 = layers.BatchNormalization()
        self.mp1 = layers.MaxPool1D(3)

        self.conv2 = layers.Conv1D(
            128,
            3,
            activation='relu',
            kernel_regularizer=keras.regularizers.L2(0.1))
        self.bn2 = layers.BatchNormalization()
        self.mp2 = layers.MaxPool1D(3)

        self.flatten = layers.Flatten()
        self.dropout = layers.Dropout(0.15)

        self.d1 = layers.Dense(64,
                               activation='relu',
                               kernel_regularizer=keras.regularizers.L2(0.1))
        self.d2 = layers.Dense(32,
                               activation='relu',
                               kernel_regularizer=keras.regularizers.L2(0.1))
        self.d3 = layers.Dense(3,
                               activation='relu',
                               kernel_regularizer=keras.regularizers.L2(0.1))

        self.softmax = layers.Softmax()
Example #23
0
def AttentionModel(sr=16000, iLen=25000):
    
    inputs = L.Input(x_train.shape[1:], name='Input')

    x = L.Conv2D(10, (5, 1), activation='relu', padding='same', name='Conv1')(inputs)
    x = L.BatchNormalization(name='BN1')(x)
    x = L.Conv2D(1, (5, 1), activation='relu', padding='same', name='Conv2')(x)
    x = L.BatchNormalization(name='BN2')(x)

    x = L.Reshape(x.shape[1:-1],name='Squeeze')(x)

    n_units = 64
    x = L.LSTM(n_units, return_sequences=True, name='LSTM_Sequences')(x)  

    # Calculate Unit Importance
    xLast = L.Lambda(lambda q: q[:, -1], name='FinalSequence')(x)  # [b_s, vec_dim]
    xLast = L.Dense(xLast.shape[-1], name='UnitImportance')(xLast)

    # Calculate attention
    attScores = L.Dot(axes=[1, 2],name='AttentionScores')([xLast, x])
    attScores = L.Softmax(name='AttentionSoftmax')(attScores)  

    x = L.Dot(axes=[1, 1], name='AttentionVector')([attScores, x])  
    x = L.Dense(32, activation='relu', name='FC')(x)
    outputs = L.Dense(5, activation='softmax', name='Output')(x)
    model = Model(inputs=[inputs], outputs=[outputs], name='Attention')

    return model
Example #24
0
def external_attention(x,
                       dim,
                       num_heads,
                       dim_coefficient=4,
                       attention_dropout=0,
                       projection_dropout=0):
    _, num_patch, channel = x.shape
    assert dim % num_heads == 0
    num_heads = num_heads * dim_coefficient

    x = layers.Dense(dim * dim_coefficient)(x)
    # create tensor [batch_size, num_patches, num_heads, dim*dim_coefficient//num_heads]
    x = tf.reshape(x,
                   shape=(-1, num_patch, num_heads,
                          dim * dim_coefficient // num_heads))
    x = tf.transpose(x, perm=[0, 2, 1, 3])
    # a linear layer M_k
    attn = layers.Dense(dim // dim_coefficient)(x)
    # normalize attention map
    attn = layers.Softmax(axis=2)(attn)
    # dobule-normalization
    attn = attn / (1e-9 + tf.reduce_sum(attn, axis=-1, keepdims=True))
    attn = layers.Dropout(attention_dropout)(attn)
    # a linear layer M_v
    x = layers.Dense(dim * dim_coefficient // num_heads)(attn)
    x = tf.transpose(x, perm=[0, 2, 1, 3])
    x = tf.reshape(x, [-1, num_patch, dim * dim_coefficient])
    # a linear layer to project original dim
    x = layers.Dense(dim)(x)
    x = layers.Dropout(projection_dropout)(x)
    return x
Example #25
0
def AlexNet_v1(im_height=224, im_width=224, class_num=1000):
    # tensorflow中的tensor通道排序是NHWC
    # 使用函数形式构建模型,必须加上输入层
    input_image = layers.Input(shape=(im_height, im_width, 3),
                               dtype="float32")  # output(None, 224, 224, 3)
    x = layers.ZeroPadding2D(
        ((1, 2), (1, 2)))(input_image)  # output(None, 227, 227, 3)
    x = layers.Conv2D(48, kernel_size=11, strides=4,
                      activation="relu")(x)  # output(None, 55, 55, 48)
    x = layers.MaxPool2D(pool_size=3, strides=2)(x)  # output(None, 27, 27, 48)
    x = layers.Conv2D(128, kernel_size=5, padding="same",
                      activation="relu")(x)  # output(None, 27, 27, 128)
    x = layers.MaxPool2D(pool_size=3,
                         strides=2)(x)  # output(None, 13, 13, 128)
    x = layers.Conv2D(192, kernel_size=3, padding="same",
                      activation="relu")(x)  # output(None, 13, 13, 192)
    x = layers.Conv2D(192, kernel_size=3, padding="same",
                      activation="relu")(x)  # output(None, 13, 13, 192)
    x = layers.Conv2D(128, kernel_size=3, padding="same",
                      activation="relu")(x)  # output(None, 13, 13, 128)
    x = layers.MaxPool2D(pool_size=3, strides=2)(x)  # output(None, 6, 6, 128)

    x = layers.Flatten()(x)  # output(None, 6*6*128)
    x = layers.Dropout(0.2)(x)  # 为下一层的Dense层加入dropout
    x = layers.Dense(2048, activation="relu")(x)  # output(None, 2048)
    x = layers.Dropout(0.2)(x)
    x = layers.Dense(2048, activation="relu")(x)  # output(None, 2048)
    x = layers.Dense(class_num)(
        x
    )  # output(None, 5), 在这里是可以用layers.Dense(10, activation="softmax"),从而省略后面的softmax层
    predict = layers.Softmax()(x)

    model = models.Model(inputs=input_image, outputs=predict)
    return model
Example #26
0
    def build_model(self):
        # 构建网络
        input_state = keras.Input(shape=self.input_shape)
        h1 = layers.Dense(
            32,
            activation='relu',
            kernel_initializer=keras.initializers.GlorotUniform())(input_state)
        h2 = layers.Dense(
            64,
            activation='relu',
            kernel_initializer=keras.initializers.GlorotUniform())(h1)
        h3 = layers.Dense(
            128,
            activation='relu',
            kernel_initializer=keras.initializers.GlorotUniform())(h2)
        h4 = layers.Dropout(0.5)(h3)
        h5 = layers.Dense(
            self.action_space,
            kernel_initializer=keras.initializers.GlorotUniform())(h4)
        out = layers.Softmax()(h5)

        # 定义网络
        model = keras.Model(inputs=input_state, outputs=out)

        keras.utils.plot_model(model,
                               './output/policy_netwrok.jpg',
                               show_shapes=True)
        return model
Example #27
0
def refine_network_header(roi_feature, bn_train, num_classes):
    """classifier and location head of fpn"""

    # TODO: add name to layers
    # roi_feature: [batch, num_rois, 7, 7, channels]
    x = layers.TimeDistributed(layers.Conv2D(1024, 7, padding='valid'),
                               name='mrcnn_class_conv1')(roi_feature)
    x = layers.TimeDistributed(layers.BatchNormalization(),
                               name='mrcnn_class_bn1')(x, bn_train)
    x = layers.ReLU()(x)  # [batch, num_rois, 1, 1, 1024]

    x = layers.TimeDistributed(layers.Conv2D(1024, 1, padding='valid'),
                               name='mrcnn_class_conv2')(x)
    x = layers.TimeDistributed(layers.BatchNormalization(),
                               name='mrcnn_class_bn2')(x, bn_train)
    x = layers.ReLU()(x)  # [batch, num_rois, 1, 1, 1024]

    shared = layers.Lambda(
        lambda t: tf.squeeze(tf.squeeze(t, axis=3), axis=2))(x)

    # classification
    classi_logits = layers.TimeDistributed(layers.Dense(num_classes),
                                           name='mrcnn_class_logits')(shared)
    classification = layers.TimeDistributed(layers.Softmax(),
                                            name='mrcnn_class')(classi_logits)

    # bounding box
    bbox = layers.TimeDistributed(layers.Dense(4 * num_classes),
                                  name='mrcnn_bbox_fc')(shared)
    bbox = layers.Reshape(target_shape=(-1, num_classes, 4),
                          name='mrcnn_bbox')(bbox)

    return classi_logits, classification, bbox
Example #28
0
    def build(self, hp, inputs=None):
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        output_node = input_node

        # Reduce the tensor to a vector.
        if len(output_node.shape) > 2:
            output_node = reduction.SpatialReduction().build(hp, output_node)

        if self.dropout is not None:
            dropout = self.dropout
        else:
            dropout = hp.Choice("dropout", [0.0, 0.25, 0.5], default=0)

        if dropout > 0:
            output_node = layers.Dropout(dropout)(output_node)
        output_node = layers.Dense(self.shape[-1])(output_node)
        if isinstance(self.loss, keras.losses.BinaryCrossentropy):
            output_node = layers.Activation(activations.sigmoid, name=self.name)(
                output_node
            )
        else:
            output_node = layers.Softmax(name=self.name)(output_node)
        return output_node
Example #29
0
    def build(self, hp, inputs=None):
        if self.num_classes:
            expected = self.num_classes if self.num_classes > 2 else 1
            if self.output_shape[-1] != expected:
                raise ValueError('The data doesn\'t match the expected shape. '
                                 'Expecting {} but got {}'.format(
                                     expected, self.output_shape[-1]))
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        output_node = input_node

        # Reduce the tensor to a vector.
        if len(output_node.shape) > 2:
            output_node = reduction.SpatialReduction().build(hp, output_node)

        if self.dropout_rate is not None:
            dropout_rate = self.dropout_rate
        else:
            dropout_rate = hp.Choice('dropout_rate', [0.0, 0.25, 0.5],
                                     default=0)

        if dropout_rate > 0:
            output_node = layers.Dropout(dropout_rate)(output_node)
        output_node = layers.Dense(self.output_shape[-1])(output_node)
        if self.loss == 'binary_crossentropy':
            output_node = keras_layers.Sigmoid(name=self.name)(output_node)
        else:
            output_node = layers.Softmax(name=self.name)(output_node)
        return output_node
Example #30
0
    def __init__(self, units, name='attention'):
        super(Attention, self).__init__()

        self.projection = layers.Dense(units,
                                       activation='tanh',
                                       use_bias=False,
                                       name=name + "_projection")

        self.wt = layers.Dense(units,
                               activation=None,
                               use_bias=False,
                               name=name + "_wt")
        self.wx = layers.Dense(units,
                               activation=None,
                               use_bias=False,
                               name=name + "_wx")

        self.add = layers.Add()
        self.add_act = layers.Activation('tanh')

        self.wa = layers.Dense(1,
                               activation=None,
                               use_bias=False,
                               name=name + "_wa")
        self.softmax = layers.Softmax(axis=1)