示例#1
0
class Extractor_Model(tf.keras.Model):
    def __init__(self, embedding_matrix):
        super(Extractor_Model, self).__init__()

        #hyperparameters
        num_filters = 100
        sequence_length = 540
        embedding_dimension = 100
        num_words = 4860

        #model
        self.model = Sequential()
        self.model.add(layers.Embedding(input_dim=num_words, output_dim=embedding_dimension,
                                        embeddings_initializer=initializers.Constant(embedding_matrix),
                                        input_length=sequence_length, trainable=False))

        self.conv_2 = layers.Conv1D(filters=num_filters, kernel_size=2, padding='same', activation='relu')
        self.conv_3 = layers.Conv1D(filters=num_filters, kernel_size=3, padding='same', activation='relu')
        self.conv_4 = layers.Conv1D(filters=num_filters, kernel_size=4, padding='same', activation='relu')
        self.conv_5 = layers.Conv1D(filters=num_filters, kernel_size=5, padding='same', activation='relu')
        self.conv_6 = layers.Conv1D(filters=num_filters, kernel_size=6, padding='same', activation='relu')

        self.global_max_pool = layers.GlobalMaxPooling1D()

        #optimizer
        learning_rate = 0.001
        decay_rate = learning_rate/((1 + 10 * np.random.randint(0, 2)) ** 0.75)
        self.optimizer = tf.keras.optimizers.Adam(lr=learning_rate, decay=decay_rate)

    @tf.function
    def call(self, inputs):
        embedded_sequences = self.model.call(inputs)

        conv_2 = self.conv_2(embedded_sequences)
        conv_3 = self.conv_3(embedded_sequences)
        conv_4 = self.conv_4(embedded_sequences)
        conv_5 = self.conv_5(embedded_sequences)
        conv_6 = self.conv_6(embedded_sequences)
        conv_2 = self.global_max_pool(conv_2)
        conv_3 = self.global_max_pool(conv_3)
        conv_4 = self.global_max_pool(conv_4)
        conv_5 = self.global_max_pool(conv_5)
        conv_6 = self.global_max_pool(conv_6)

        concat = layers.concatenate([conv_2, conv_3, conv_4, conv_5, conv_6])

        return concat

    @tf.function
    def loss_function(self, detector, discriminator):
        return detector + discriminator
示例#2
0
DATA_WIDTH = len(dataSource.tags_list)

NUM_OF_FRAMES = data.shape[0]-FRAME_SIZE-1

X = np.zeros((NUM_OF_FRAMES, FRAME_SIZE, DATA_WIDTH), dtype=np.float64)
Y = np.zeros((NUM_OF_FRAMES, DATA_WIDTH), dtype=np.float64)

for frame_ in range(NUM_OF_FRAMES):
    X[frame_] = data[frame_:frame_+FRAME_SIZE, ...]
    Y[frame_] = data[frame_+FRAME_SIZE+1]


model = Sequential()
model.add(Dense(100, input_shape = (FRAME_SIZE, DATA_WIDTH)))
model.add(Activation('relu'))
model.add(LSTM(57, return_sequences=True))
model.add(LSTM(DATA_WIDTH*2))
model.add(Dropout(0.5))
model.add(Dense(DATA_WIDTH))
model.add(Activation('sigmoid'))

model.compile(optimizer='rmsprop',
              loss='mse')

model.fit(X, Y, )

A = model.call(X)
print( 1/A )
pass