Example #1
0
class MyAlexNet(Model):
    def __init__(self):
        super(MyAlexNet, self).__init__()

        self.Layers = [
            layers.Conv2D(filters=48 , kernel_size=[3, 3], padding='same', activation=nn.relu, kernel_regularizer=regularizers.l2(hps.lamda)), # 64
            layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

            layers.Conv2D(filters=128, kernel_size=[3, 3],padding='same', activation=nn.relu, kernel_regularizer=regularizers.l2(hps.lamda)),  # 192
            layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

            layers.Conv2D(filters=192, kernel_size=[3, 3], padding='same', activation=nn.relu, kernel_regularizer=regularizers.l2(hps.lamda)), # 384
            layers.Conv2D(filters=192, kernel_size=[3, 3], padding='same', activation=nn.relu, kernel_regularizer=regularizers.l2(hps.lamda)), # 256
            layers.Conv2D(filters=128, kernel_size=[3, 3], padding='same', activation=nn.relu, kernel_regularizer=regularizers.l2(hps.lamda)), # 256
            layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

            layers.Flatten(),

            layers.Dense(2048, activation=nn.relu, kernel_regularizer=regularizers.l2(hps.lamda)),     # 2048
            layers.Dense(2048, activation=nn.relu, kernel_regularizer=regularizers.l2(hps.lamda)),     # 2048
            layers.Dense(10, activation=nn.softmax, kernel_regularizer=regularizers.l2(hps.lamda)),
            # layers.Dense(10, activation=None),
        ]
        self.net = Sequential(self.Layers)
        self.net.build(input_shape=[None, 32, 32, 3])

    def call(self, inputs, training=None, mask=None):
        inputs = tf.reshape(inputs, [-1, 32, 32, 3])
        out = self.net(inputs)
        return out
Example #2
0
def create_model():
    model = Sequential()
    model.add(layers.Conv2D(32, (3, 3), padding='same', activation='relu'))
    model.add(layers.MaxPooling2D(2, 2))
    model.add(layers.Dropout(rate=0.5))
    model.add(layers.Conv2D(64, (3, 3), padding='same', activation='relu'))
    model.add(layers.MaxPooling2D(2, 2))
    model.add(layers.Dropout(rate=0.5))
    model.add(layers.Conv2D(128, (3, 3), padding='same', activation='relu'))
    model.add(layers.MaxPooling2D(2, 2))
    model.add(layers.Conv2D(256, (3, 3), padding='same', activation='relu'))
    model.add(layers.GlobalAveragePooling2D())
    model.add(layers.Dense(15, activation='softmax'))
    model.build(input_shape=(None, 64, 64, 3))

    model.compile(optimizer=tf.keras.optimizers.Adam(),
                  loss=tf.keras.losses.categorical_crossentropy,
                  metrics=['accuracy'])
    return model
Example #3
0
    batch_size = batches[runner_idx]
    input_data = quantized_input[count:count+batch_size]
    batch_size = input_data.shape[0]

    input_data = input_data.reshape(batch_size, num_sequences, runner_in_seq_len)
    output_data = np.empty((batch_size, num_sequences, runner_out_seq_len), dtype=np.int16)
    job_id = runners[runner_idx].execute_async([input_data], [output_data], True)
    runners[runner_idx].wait(job_id)
    out_np[count:count+batch_size, ...] = output_data[..., :output_seq_dim].reshape(
            batch_size, num_sequences*output_seq_dim)

    count += batch_size
    runner_idx = (runner_idx + 1) % num_cores

while runners:
    del runners[0]

lstm_output = quanti_convert_int16_to_float(lstm_output, out_pos)
lstm_output = lstm_output.reshape((num_records, 25, 100))[:, -1, :]

lstm_downstream = Sequential()
lstm_downstream.add(layers.Dense(1, activation='sigmoid'))
lstm_downstream.compile(loss='binary_crossentropy',
                        optimizer='adam', metrics=['accuracy'])
lstm_downstream.build((1, 100))
lstm_downstream.layers[0].set_weights(model.get_layer('dense').get_weights())
score = lstm_downstream.evaluate(lstm_output, Y_test, verbose=0)
t2 = time.time()
print('Accuracy:', score[1])
print('E2E Time:', t2-t1)
Example #4
0
    model.add(
        SeparableConv1D(30,
                        kernel_size_first_layer,
                        input_shape=(peek_interval_days, 1),
                        activation="relu"))
    model.add(AveragePooling1D())
    model.add(SeparableConv1D(10, kernel_size_second_layer, activation="relu"))
    model.add(AveragePooling1D())
    model.add(SeparableConv1D(3, kernel_size_third_layer, activation="relu"))
    model.add(GlobalAveragePooling1D())
    model.add(Dense(1, activation="sigmoid"))
    model.compile(optimizer=Adam(),
                  loss="binary_crossentropy",
                  metrics=["accuracy"])

    model.build()
    model.summary()
    history = model.fit_generator(
        train_generator,
        epochs=1,
        validation_data=validation_generator,
        class_weight={
            0: 0.5,  # false
            1: 0.5  # true
        },
        callbacks=[EarlyStopping(monitor="val_loss", patience=5)])

    def plot_history(history):
        loss = history.history["loss"]
        val_loss = history.history["val_loss"]
        epochs = range(1, len(loss) + 1)