示例#1
0
    def build_pilotnet_model(self):
        def resize(images):
            import tensorflow as tf
            return tf.image.resize_area(images, size=(66, 200))

        def preprocessing(images):
            import tensorflow as tf
            standardization = lambda x: tf.image.per_image_standardization(x)
            normalized_rgb = tf.math.divide(images, 255.0)
            augmented_img_01 = tf.image.random_brightness(normalized_rgb,
                                                          max_delta=0.4)
            augmented_img_02 = tf.image.random_contrast(augmented_img_01,
                                                        lower=0.5,
                                                        upper=1.5)
            std_img = tf.map_fn(standardization, augmented_img_02)
            return std_img

        cropping_top, cropping_down = self.cropping_top, self.cropping_down

        # Build PilotNet model
        inputs = keras.Input(shape=(160, 320, 3), name='inputs')
        cropping = keras.layers.Cropping2D(cropping=((cropping_top,
                                                      cropping_down), (0, 0)),
                                           name='cropping')(inputs)
        resize = keras.layers.Lambda(resize, name='resize')(cropping)
        norm = keras.layers.Lambda(preprocessing, name='normalization')(resize)
        # input = (66, 200, 3), output = (31, 98, 24)
        conv_1 = keras.layers.Conv2D(filters=24,
                                     kernel_size=5,
                                     strides=2,
                                     padding='valid',
                                     name='conv_1')(norm)
        bn_1 = keras.layers.BatchNormalization(name='bn_1')(conv_1)
        relu_1 = keras.layers.ReLU(name='relu_1')(bn_1)

        # input = (31, 98, 24), output = (14, 47, 36)
        conv_2 = keras.layers.Conv2D(filters=36,
                                     kernel_size=5,
                                     strides=2,
                                     padding='valid',
                                     name='conv_2')(relu_1)
        bn_2 = keras.layers.BatchNormalization(name='bn_2')(conv_2)
        relu_2 = keras.layers.ReLU(name='relu_2')(bn_2)

        # input = (14, 47, 36), output = (5, 22, 48)
        conv_3 = keras.layers.Conv2D(filters=48,
                                     kernel_size=5,
                                     strides=2,
                                     padding='valid',
                                     name='conv_3')(relu_2)
        bn_3 = keras.layers.BatchNormalization(name='bn_3')(conv_3)
        relu_3 = keras.layers.ReLU(name='relu_3')(bn_3)

        # input = (5, 22, 48), output = (3, 20, 64)
        conv_4 = keras.layers.Conv2D(filters=64,
                                     kernel_size=3,
                                     strides=1,
                                     padding='valid',
                                     name='conv_4')(relu_3)
        bn_4 = keras.layers.BatchNormalization(name='bn_4')(conv_4)
        relu_4 = keras.layers.ReLU(name='relu_4')(bn_4)

        # input = (3, 20, 64), output = (1, 18, 64)
        conv_5 = keras.layers.Conv2D(filters=64,
                                     kernel_size=3,
                                     strides=1,
                                     padding='valid',
                                     name='conv_5')(relu_4)
        bn_5 = keras.layers.BatchNormalization(name='bn_5')(conv_5)
        relu_5 = keras.layers.ReLU(name='relu_5')(bn_5)

        # input = (1, 18, 64), output = (1152,)
        flatten = keras.layers.Flatten(name='flatten')(relu_5)
        dropout_5 = keras.layers.Dropout(rate=0.5, name='dropout_5')(flatten)

        # input = (1152,), output = (100,)
        dense_6 = keras.layers.Dense(units=100, name='dense_6')(dropout_5)
        bn_6 = keras.layers.BatchNormalization(name='bn_6')(dense_6)
        relu_6 = keras.layers.ReLU(name='relu_6')(bn_6)
        dropout_6 = keras.layers.Dropout(rate=0.5, name='dropout_6')(relu_6)

        # input = (100,), output = (50,)
        dense_7 = keras.layers.Dense(units=50, name='dense_7')(dropout_6)
        bn_7 = keras.layers.BatchNormalization(name='bn_7')(dense_7)
        relu_7 = keras.layers.ReLU(name='relu_7')(bn_7)
        dropout_7 = keras.layers.Dropout(rate=0.5, name='dropout_7')(relu_7)

        # input = (50,), output = (10,)
        dense_8 = keras.layers.Dense(units=10, name='dense_8')(dropout_7)
        bn_8 = keras.layers.BatchNormalization(name='bn_8')(dense_8)
        relu_8 = keras.layers.ReLU(name='relu_8')(bn_8)
        dropout_8 = keras.layers.Dropout(rate=0.5, name='dropout_8')(relu_8)

        # input = (10,), output = (1,)
        outputs = keras.layers.Dense(units=1, name='outputs')(dropout_8)

        # set up cropping2D layer
        self.model = keras.Model(inputs=inputs, outputs=outputs)
        self.model.compile(
            optimizer=keras.optimizers.Adam(0.01),
            loss=keras.losses.logcosh,
            # loss='mse',
            metrics=['mse', 'mae'])
示例#2
0
def iter_conv(data_count, enc_dim, x_train, y_train, train_deltas, epochs=1):

    #resets backend variables
    K.clear_session()

    ###################    define model    #####################
    input_dim = (1, 25, 25)
    input_frame = keras.Input(shape=input_dim)

    #x = keras.layers.Dense(enc_dim)(input_frame)
    x = layers.Conv2D(filters=32, kernel_size=(3, 3),
                      padding='same')(input_frame)  #(x)
    #x = keras.layers.BatchNormalization()(x)

    x = layers.Conv2D(filters=128,
                      kernel_size=(3, 3),
                      activation='relu',
                      padding='same')(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv2D(filters=32,
                      kernel_size=(3, 3),
                      activation='relu',
                      padding='same')(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv2D(filters=64,
                      kernel_size=(3, 3),
                      activation='relu',
                      padding='same')(x)
    out_layer = layers.Dense(25, activation='relu')(x)

    model = keras.models.Model(input_frame, out_layer)

    model.summary()

    #####################    begin training    #####################
    #   good resource for creating a custom training routine (what this is based on)
    #   https://gist.github.com/JVGD/2add7789ab83588a397bbae6ff614dbf

    optimizer = keras.optimizers.Adam(lr=0.001)
    #loss = custom_loss(5)#5 assumes a constant delta of 5 across all data
    loss = keras.losses.categorical_crossentropy(
        keras.Input(shape=input_dim), model(keras.Input(shape=input_dim)))
    update_op = optimizer.get_updates(params=model.trainable_weights,
                                      loss=loss)
    '''loss(keras.Input(shape=input_dim),
    model(keras.Input(shape=input_dim))
    ))#converts input from numpy to tensor'''

    train = K.function(
        inputs=[x_train, y_train],
        outputs=loss,  #outputs=[loss,model.layer[-1].output],
        updates=update_op)

    test = K.function(inputs=[x_train, y_train], outputs=[loss])

    for epoch in range(epochs):

        training_losses = []

        for cur_sample in range(data_count):
            #TODO apply train_deltas to loop rather than constant 5
            sample_delta = 5  #train_deltas[cur_sample]

            #loop to feedback output for delta time steps of prediction
            sample = x_train[cur_sample]
            target = y_train[cur_sample]

            #add batch size as dimension
            sample = np.expand_dims(sample, axis=0)
            target = np.expand_dims(target, axis=0)

            #convert to tensors
            sample = K.constant(sample)
            target = K.constant(target)

            cur_input = cur_sample
            #target = tf.convert_to_tensor(target)
            for i in range(sample_delta):
                #cur_input = tf.convert_to_tensor(cur_input)
                #calculate loss, running a training iteration
                loss_train = train([
                    tf.convert_to_tensor(cur_input),
                    tf.convert_to_tensor(target)
                ])
                training_losses.append(loss_train[0])

                #set next input to current output (out_layer.output)
                cur_input = model.predict(cur_input)

        train_loss_mean = np.mean(training_losses)

        print("Epoch ", epoch, ";  Current mean training loss: ",
              train_loss_mean)
        '''
        #Now compute test values (no training)
        losses_test = []
        for cur_sample in range(data_count)'''
    '''
    model = keras.Model(inputs=input_frame,outputs=out_layer)
    model.compile(loss=Loss,
                  optimizer='adam',
                  metrics=['accuracy'])
    '''
    return model
示例#3
0
import tensorflow as tf
print(tf.__version__)
tf.test.gpu_device_name()
#if you see something like '/device:GPU:0' then the gpu accelerated learning will be enabled. if not, cpu will work, just slower.
"""##Our Neural Network

Here's what our network will look like. Pretty straight foward, 4 layers of convolutions. 64 filters per layer. A 3x3 kernel used in each. With relu activation in each. 

We will give it all the images planes stacked into one image. We will ask it to activate one of four neurons to represent which corner it thinks the circle is hidding at the end.
"""

#Let's define a pretty straight forward conv neural network. It will have about 200K params, given the input image dimension of 100 x 100
import keras
from keras.layers import Flatten, Conv2D, Dense

img = keras.Input(shape=img_shape)

x = img
x = Conv2D(64, (3, 3), strides=(2, 2), activation="relu")(x)
x = Conv2D(64, (3, 3), strides=(2, 2), activation="relu")(x)
x = Conv2D(64, (3, 3), strides=(2, 2), activation="relu")(x)
x = Conv2D(64, (3, 3), strides=(2, 2), activation="relu")(x)
x = Flatten()(x)
x = Dense(100, activation='relu')(x)
x = Dense(num_corners, activation='softmax')(x)

model = keras.Model(img, x)
model.compile(optimizer="adam",
              loss="categorical_crossentropy",
              metrics=["acc"])
print(model.summary())
示例#4
0
# using: CMSSW_10_2_15_patch2/external/slc7_amd64_gcc700/bin/root
# doc from: https://keras.io/guides/functional_api/
# non-linear connectivity topologies - w. layers that are not connected sequentially
# sth Sequential API can not handle.

import keras as kr
inputs = kr.Input(shape=(32, 32, 3), name="img")
x = kr.layers.Conv2D(32, 3, activation='relu')(inputs)
x = kr.layers.Conv2D(64, 3, activation='relu')(x)
block_1_output = kr.layers.MaxPooling2D(3)(x)

x = kr.layers.Conv2D(64, 3, activation='relu', padding="same")(block_1_output)
x = kr.layers.Conv2D(64, 3, activation='relu', padding="same")(x)
block_2_output = kr.layers.add([x, block_1_output])

x = kr.layers.Conv2D(64, 3, activation='relu', padding="same")(block_2_output)
x = kr.layers.Conv2D(64, 3, activation='relu', padding="same")(x)
block_3_output = kr.layers.add([x, block_2_output])

x = kr.layers.Conv2D(64, 3, activation='relu')(block_3_output)
x = kr.layers.GlobalAveragePooling2D()(x)
x = kr.layers.Dense(254, activation='relu')(x)
x = kr.layers.Dropout(0.5)(x)
outputs = kr.layers.Dense(10)(x)

model = kr.Model(inputs, outputs, name="toy_resnet")
model.summary()
# keras.utils.plot_model(model, "mini_resnet.png", show_shapes=True)

(x_train, y_train), (x_test, y_test) = kr.datasets.cifar10.load_data()
x_train = x_train.astype("float32") / 255.
示例#5
0
 def test_output_shape(self):
     inputs = keras.Input(batch_size=16, shape=(4, ), dtype=tf.int64)
     layer = discretization.Discretization(bin_boundaries=[-0.5, 0.5, 1.5])
     outputs = layer(inputs)
     self.assertAllEqual(outputs.shape.as_list(), [16, 4])
def lstm_cross_att_focal_multitask_model():

    # DATA PIPELINING:

    ds_series_train = tf.data.Dataset.from_generator(
        data_gen_train_gender, 
        output_types=((tf.float64, {'accent': tf.float64, 'gender': tf.float32})),
        output_shapes=(((1000, 83), {'accent': (8), 'gender': (1)})) )
        
    ds_series_test = tf.data.Dataset.from_generator(
        data_gen_test_gender, 
        output_types=((tf.float64, {'accent': tf.float64, 'gender': tf.float32})),
        output_shapes=(((1000, 83), {'accent': (8), 'gender': (1)})) )
        
    ds_series_val = tf.data.Dataset.from_generator(
        data_gen_val_gender, 
        output_types=((tf.float64, {'accent': tf.float64, 'gender': tf.float32})),
        output_shapes=(((1000, 83), {'accent': (8), 'gender': (1)})) )



    print('Train Data: {}'.format(ds_series_train))
    print('Test Data: {}'.format(ds_series_test))
    print('Val Data: {}'.format(ds_series_val))

    ds_series_train_batch = ds_series_train.shuffle(125555).padded_batch(64)
    ds_series_test_batch = ds_series_test.shuffle(15000).padded_batch(64)
    ds_series_val_batch = ds_series_val.shuffle(11988).padded_batch(64)


    # MODEL ARCHITECTURE

    inputs = keras.Input(shape=(1000, 83,))

    lstm = LSTM(512, kernel_regularizer=regularizers.l1_l2(l1=0.00001, l2=0.00001), input_shape=(1000, 83), name = 'LSTM',
                                recurrent_regularizer=None, bias_regularizer=regularizers.l2(1e-4),
                                activity_regularizer=regularizers.l2(1e-5), kernel_constraint=None,
                                recurrent_constraint=None, bias_constraint=None, dropout=0.2, return_sequences=True)(inputs) 

    att, att_weight = attention()(lstm)
    #reshaped = Reshape((512,))(K.sum(att,axis=1))
    reshaped = Reshape((1512,))(tf.concat([K.sum(att,axis=1), K.sum(att,axis=2)], axis=1))

    drop1 = keras.layers.Dropout(0.2)(reshaped)
    dense = Dense(128, activation = 'sigmoid', kernel_regularizer=regularizers.l2(0.001), activity_regularizer=tf.keras.regularizers.l2(0.01))(drop1)

    drop2 = keras.layers.Dropout(0.2)(dense)
    output_1 = Dense(8, activation = 'softmax', name='accent', kernel_regularizer=regularizers.l2(0.001), activity_regularizer=tf.keras.regularizers.l2(0.01))(drop2)
    output_2 = Dense(1, activation='relu', name = 'gender', kernel_regularizer=regularizers.l2(0.001), activity_regularizer=tf.keras.regularizers.l2(0.01))(drop2)

    model = keras.Model(inputs=inputs, outputs=[output_1, output_2])
    model.summary()
    #plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)

    model.compile(optimizer='adam', loss={'accent': focal_loss(gamma=5., alpha=0.5), 'gender': 'binary_crossentropy'}, metrics = {'accent': 'accuracy'}, 
                loss_weights = [10, 0.2])

    es = EarlyStopping(monitor='val_loss', mode='min', verbose=5, patience=20)
    mc = ModelCheckpoint('../../lstm_cross_att_multitask_model_wts.h5', monitor='val_loss', mode='min', save_best_only=True, save_weights = True, verbose=1)
    #tb_callback = tf.keras.callbacks.TensorBoard('./logs', update_freq = 1)
        

    hist = model.fit(ds_series_train_batch, validation_data = ds_series_val_batch, epochs=100, callbacks=[es, mc])

    ############################ TESTING THE MODEL ######################################

    print()
    print()
    print('TESTING for LSTM ATTENTION with CROSSENTROPY LOSS.....................................')
    print()
    model.load_weights('../../lstm_cross_att_multitask_model_wts.h5')
    model.evaluate(ds_series_test_batch)

    return model
示例#7
0
def build_model(hidden, features, predict_n, look_back=10, batch_size=1):
    """
    Builds and returns the LSTM model with the parameters given
    :param hidden: number of hidden nodes
    :param features: number of variables in the example table
    :param look_back: Number of time-steps to look back before predicting
    :param batch_size: batch size for batch training
    :return:
    """
    #batch_input_shape=(batch_size, look_back, features)
    inp = keras.Input(shape=(look_back, features),
                      batch_shape=(batch_size, look_back, features))
    x = LSTM(
        hidden,
        input_shape=(look_back, features),
        stateful=True,
        batch_input_shape=(batch_size, look_back, features),
        return_sequences=True,
        # activation='relu',
        dropout=0.1,
        recurrent_dropout=0.1,
        implementation=2,
        unit_forget_bias=True,
    )(inp, training=True)
    x = Dropout(0.2)(x, training=True)
    x = LSTM(
        hidden,
        input_shape=(look_back, features),
        stateful=True,
        batch_input_shape=(batch_size, look_back, features),
        return_sequences=True,
        # activation='relu',
        dropout=0.1,
        recurrent_dropout=0.1,
        implementation=2,
        unit_forget_bias=True,
    )(x, training=True)
    x = Dropout(0.2)(x, training=True)
    x = LSTM(
        hidden,
        input_shape=(look_back, features),
        stateful=True,
        batch_input_shape=(batch_size, look_back, features),
        # activation='relu',
        dropout=0.1,
        recurrent_dropout=0.1,
        implementation=2,
        unit_forget_bias=True,
    )(x, training=True)
    x = Dropout(0.2)(x, training=True)
    out = Dense(
        predict_n,
        activation="relu",
        kernel_initializer="random_uniform",
        bias_initializer="zeros",
    )(x)
    model = keras.Model(inp, out)

    # model = Sequential()
    #
    # model.add(
    #     LSTM(
    #         hidden,
    #         input_shape=(look_back, features),
    #         stateful=True,
    #         batch_input_shape=(batch_size, look_back, features),
    #         return_sequences=True,
    #         # activation='relu',
    #         dropout=0,
    #         recurrent_dropout=0,
    #         implementation=2,
    #         unit_forget_bias=True,
    #     )
    # )
    # model.add(Dropout(0.2))
    # model.add(
    #     LSTM(
    #         hidden,
    #         input_shape=(look_back, features),
    #         stateful=True,
    #         batch_input_shape=(batch_size, look_back, features),
    #         return_sequences=True,
    #         # activation='relu',
    #         dropout=0,
    #         recurrent_dropout=0,
    #         implementation=2,
    #         unit_forget_bias=True,
    #     )
    # )
    # model.add(Dropout(0.2))
    # model.add(
    #     LSTM(
    #         hidden,
    #         input_shape=(look_back, features),
    #         stateful=True,
    #         batch_input_shape=(batch_size, look_back, features),
    #         # activation='relu',
    #         dropout=0,
    #         recurrent_dropout=0,
    #         implementation=2,
    #         unit_forget_bias=True,
    #     )
    # )
    # model.add(Dropout(0.2))
    # model.add(
    #     Dense(
    #         predict_n,
    #         activation="relu",
    #         kernel_initializer="random_uniform",
    #         bias_initializer="zeros",
    #     )
    # )

    start = time()
    model.compile(loss="msle",
                  optimizer="nadam",
                  metrics=["accuracy", "mape", "mse"])
    print("Compilation Time : ", time() - start)
    plot_model(model, to_file="../figures/LSTM_model.png")
    print(model.summary())
    return model
示例#8
0
def _MTLSTMMixed_MatchSpace(X,
                            Y,
                            fit_model_wrapper,
                            T0=None,
                            K_fixed=0,
                            M_sizes=None,
                            dropout_rate=0.2,
                            epochs=2,
                            verbose=0,
                            hidden_length=100,
                            **kwargs):
    # could have just used the LSTM state units direclty, but having this be big and then timeDistributed to narrow down is more expressive/powerful
    with capture_all():  # doesn't have quiet option
        import keras
    if M_sizes is None:
        M_sizes = range(1, 2 * int(np.log(Y.shape[0])))

    if T0 is None:
        T0 = X.shape[1]

    if verbose == 0:
        import os

        if ("TF_CPP_MIN_LOG_LEVEL" in os.environ
                and os.environ["TF_CPP_MIN_LOG_LEVEL"] != "2"
                and os.environ["TF_CPP_MIN_LOG_LEVEL"] != "3"):
            os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
            # Otherwise prints random info about CPU instruction sets

    Cov_F, Cov_TV, Out_pre = _split_LSTM_x_data(X, T0, K_fixed=K_fixed)
    LSTM_x = _shape_LSTM_x_data(Cov_F, Cov_TV, Out_pre)
    LSTM_y = _shape_LSTM_y_data(Out_pre, Y, T0)
    LSTM_K = LSTM_x.shape[2]
    T1 = Y.shape[1]

    # fits_single = {}
    int_layer_single = {}
    Vs_single = {}
    scores = np.zeros((len(M_sizes)))
    for i, M_size in enumerate(M_sizes):
        inp = keras.Input(
            batch_shape=(1, T0, LSTM_K), name="input"
        )  # batch_shape=(1,..) ensures trained on one case at time
        with capture_all():  # doesn't have quiet option
            x1 = keras.layers.LSTM(units=hidden_length,
                                   return_sequences=True)(inp)  ##
            x2 = keras.layers.Dropout(rate=dropout_rate)(x1)  ##
        core = keras.layers.TimeDistributed(keras.layers.Dense(
            units=M_size, activation="elu"),
                                            name="embedding")(x2)
        output_vec = []
        for t in range(T1):
            new_output = keras.layers.Dense(units=1,
                                            activation="linear",
                                            name="yp%s" % (t))(core)
            output_vec.append(new_output)
        model = keras.models.Model(inputs=inp, outputs=output_vec)

        model.compile(loss="mse",
                      optimizer="Adam",
                      metrics=["mean_squared_error"])
        model.fit(x=LSTM_x,
                  y=LSTM_y,
                  batch_size=1,
                  epochs=epochs,
                  verbose=verbose)

        outputs_fit = model.get_layer(
            name="embedding").output  # .get_output_at(node_index = 1)
        intermediate_layer_model = keras.models.Model(inputs=model.input,
                                                      outputs=outputs_fit)

        final_weights = np.empty((T1, M_size))
        n_layers_w = len(model.get_weights())
        for t in range(T1):
            l_i = n_layers_w - 2 - 2 * t
            final_weights[t, :] = model.get_weights()[l_i][:, 0]
        V_i = np.mean(np.abs(final_weights), axis=0)

        transformer_i = LSTMTransformer(T0, K_fixed, intermediate_layer_model)

        sc_fit_i = fit_model_wrapper(transformer_i, V_i)
        # fits_single[i] = sc_fit_i
        int_layer_single[i] = intermediate_layer_model
        Vs_single[i] = V_i
        scores[i] = sc_fit_i.score  # = sc_fit_i.score_R2

    i_best = np.argmin(scores)
    best_M_size = M_sizes[i_best]
    V_best = Vs_single[i_best]
    intermediate_layer_model = int_layer_single[i_best]
    transformer = LSTMTransformer(T0, K_fixed, intermediate_layer_model)
    return transformer, V_best, best_M_size, V_best
def test_minimal_rnn_cell_layer():
    class MinimalRNNCell(keras.layers.Layer):
        def __init__(self, units, **kwargs):
            self.units = units
            self.state_size = units
            super(MinimalRNNCell, self).__init__(**kwargs)

        def build(self, input_shape):
            self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
                                          initializer='uniform',
                                          name='kernel')
            self.recurrent_kernel = self.add_weight(shape=(self.units,
                                                           self.units),
                                                    initializer='uniform',
                                                    name='recurrent_kernel')
            self.built = True

        def call(self, inputs, states):
            prev_output = states[0]
            h = keras.backend.dot(inputs, self.kernel)
            output = h + keras.backend.dot(prev_output, self.recurrent_kernel)
            return output, [output]

        def get_config(self):
            config = {'units': self.units}
            base_config = super(MinimalRNNCell, self).get_config()
            return dict(list(base_config.items()) + list(config.items()))

    # Test basic case.
    x = keras.Input((None, 5))
    cell = MinimalRNNCell(32)
    layer = recurrent.RNN(cell)
    y = layer(x)
    model = keras.models.Model(x, y)
    model.compile(optimizer='rmsprop', loss='mse')
    model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))

    # Test basic case serialization.
    x_np = np.random.random((6, 5, 5))
    y_np = model.predict(x_np)
    weights = model.get_weights()
    config = layer.get_config()
    with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}):
        layer = recurrent.RNN.from_config(config)
    y = layer(x)
    model = keras.models.Model(x, y)
    model.set_weights(weights)
    y_np_2 = model.predict(x_np)
    assert_allclose(y_np, y_np_2, atol=1e-4)

    # Test stacking.
    cells = [MinimalRNNCell(8), MinimalRNNCell(12), MinimalRNNCell(32)]
    layer = recurrent.RNN(cells)
    y = layer(x)
    model = keras.models.Model(x, y)
    model.compile(optimizer='rmsprop', loss='mse')
    model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))

    # Test stacked RNN serialization.
    x_np = np.random.random((6, 5, 5))
    y_np = model.predict(x_np)
    weights = model.get_weights()
    config = layer.get_config()
    with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}):
        layer = recurrent.RNN.from_config(config)
    y = layer(x)
    model = keras.models.Model(x, y)
    model.set_weights(weights)
    y_np_2 = model.predict(x_np)
    assert_allclose(y_np, y_np_2, atol=1e-4)
示例#10
0
    def __init__(self, batch_size):
        #self.khaiii = KhaiiiApi()
        # LSTM Input : (795 + 792)*1
        # LSTM Output : (1000)
        # LSTM Output with time series : (1000 (Feature) * 10 (times))
        # discriminator Input : 1000 * 10
        # discriminator Output : 1 (0 or 1)
        self.batch_size = batch_size
        self.pv_size = 792
        self.stock_size = 795
        self.gen_output = 636
        self.gen_timestep = 10
        #self.scaler = MinMaxScaler()
        self.article = {
            "host": '127.0.0.1',
            "port": 3306,
            "user": '******',
            "password": '******',
            "db": 'mydb',
            'charset': 'utf8'
        }
        self.index = {
            "host": '127.0.0.1',
            "port": 3306,
            "user": '******',
            "password": '******',
            "db": 'mydb',
            'charset': 'utf8'
        }

        self.gen_feature = self.pv_size + self.stock_size
        self.dis_input = self.gen_output * self.gen_timestep
        self.dis_output = 1

        print("Start Building Data")
        self.GAN_trainY = pickle.load(open('v4_trainY.sav', 'rb'))
        GAN_trainX_STOCK = pickle.load(open('v4_trainX_STOCK.sav', 'rb'))
        GAN_trainX_PV = pickle.load(open('trainX_PV.sav', 'rb'))
        self.GAN_testY = pickle.load(open('v4_testY.sav', 'rb'))
        GAN_testX_STOCK = pickle.load(open('v4_testX_STOCK.sav', 'rb'))
        GAN_testX_PV = pickle.load(open('testX_PV.sav', 'rb'))
        GAN_trainX_PV = np.delete(GAN_trainX_PV, (0), axis=0)
        GAN_testX_PV = np.delete(GAN_testX_PV, (0), axis=0)
        self.GAN_trainX = np.concatenate((GAN_trainX_STOCK, GAN_trainX_PV),
                                         axis=2)
        self.GAN_testX = np.concatenate((GAN_testX_STOCK, GAN_testX_PV),
                                        axis=2)
        self.GAN_trainSTOCK = pickle.load(open('v4_trainSTOCK.sav', 'rb'))
        self.GAN_testSTOCK = pickle.load(open('v4_testSTOCK.sav', 'rb'))
        print("Training Data Processing Finished")
        self.discriminator = self.build_discriminator()
        self.generator = self.build_generator(LayerName='LSTM')

        #self.generator.compile(loss=root_mean_squared_error,
        #optimizer=keras.optimizers.Adam(0.00002, 0.5),metrics=['accuracy'])
        self.discriminator.compile(loss='binary_crossentropy',
                                   optimizer=keras.optimizers.Adam(
                                       0.000001, 0.5),
                                   metrics=['accuracy'])
        self.discriminator.trainable = False

        combined_input = keras.Input(shape=((self.gen_timestep),
                                            self.gen_feature),
                                     name='stock_news_input')
        #combined = 기사 + 주식
        gen_stock = self.generator(inputs=combined_input)
        #gen_stock : 795 차원의 10일 뒤 결과

        past_stock = keras.Input(shape=((self.gen_timestep), self.gen_output),
                                 name='past_stock')
        #past_stock : 과거 9일간의 데이터
        combined_stock = keras.layers.concatenate(
            inputs=[past_stock, gen_stock], axis=1, name='combined_stock')
        #combined_stock : 총 10일간의 데이터 past + gen_stock

        valid = self.discriminator(inputs=combined_stock)
        self.combined = keras.Model(inputs=[combined_input, past_stock],
                                    outputs=valid)
        self.combined.compile(loss='binary_crossentropy',
                              optimizer=keras.optimizers.Adam(0.000001, 0.5),
                              metrics=['accuracy'])
        self.combined.summary()
示例#11
0
 def test_add_dynamic_shape(self):
   i1 = keras.Input(batch_shape=(4, None), dtype='float32')
   i2 = keras.Input(batch_shape=(4, 5), dtype='float32')
   layer = keras.layers.Add()
   o = layer([i1, i2])
   self.assertListEqual(o.shape.as_list(), [4, 5])
示例#12
0
 def test_unspecified_weight_dim_fails(self):
     input_tensor = keras.Input(shape=(32, ))
     layer = einsum_dense.EinsumDense(equation="ab,zd->ad", output_shape=64)
     with self.assertRaisesRegex(
             ValueError, ".*Weight dimension 'z' did not have a match "):
         _ = layer(input_tensor)
示例#13
0
def input_nub_text_handler(variable, input_dataframe):
    """
    Create an input nub for text data, by:

     - Finding all derived variables. With a variable `name` and sequence length of 4, there would be 4 derived
     variables, `name_0` through `name_4`
     - Creating an appropriately shaped input layer, embedding layer, and all future layers
     - Return both the input layer and last layer (both are necessary for creating a model)

    :param variable: Name of the variable
    :type variable: str
    :param input_dataframe: A dataframe, containing either the specified variable, or derived variables
    :type input_dataframe: pandas.DataFrame
    :return: A tuple containing the input layer, and the last layer of the nub
    """
    logging.info('Creating text input nub for variable: {}'.format(variable))

    # Get transformed data for shaping
    if variable in input_dataframe.columns:
        variable_list = [variable]
    else:
        variable_name_prefix = variable + '_'
        variable_list = list(
            filter(lambda x: x.startswith(variable_name_prefix),
                   input_dataframe.columns))

    logging.info('Text var has variable / derived variable list: {}'.format(
        variable_list))
    transformed = input_dataframe[variable_list].as_matrix()

    # Set up sequence length for input_layer
    if len(transformed.shape) >= 2:
        input_sequence_length = int(transformed.shape[1])
    else:
        input_sequence_length = 1

    # Get the vocab size (number of rows in the embedding). The additional offsets are due to 1  for len vs indexing w/
    # 0, 1 for unknown token, and the others for something else?
    vocab_size = int(numpy.max(transformed)) + 4

    # Determine the embedding output size (number of columns in the embedding)
    # TODO There must be a better heuristic
    embedding_output_dim = 200

    logging.info(
        'Creating embedding for text_var: {}, with input_sequence_length: {}, vocab size: {}, '
        'and embedding_output_dim: {}'.format(variable, input_sequence_length,
                                              vocab_size,
                                              embedding_output_dim))

    # Create & stack layers
    input_layer = keras.Input(shape=(input_sequence_length, ),
                              name='input_{}'.format(variable))

    embedding_layer = Embedding(input_dim=vocab_size,
                                output_dim=embedding_output_dim,
                                input_length=input_sequence_length,
                                name='embedding_{}'.format(variable))

    x = embedding_layer(input_layer)
    x = Bidirectional(LSTM(128, name='lstm_{}'.format(variable)),
                      name='bidirectiona_lstm_{}'.format(variable))(x)

    return input_layer, x
示例#14
0
    new_df['font_size'] = df.font_size / df.font_size.max()
    new_df['chars'] = df.chars / df.chars.max()
    new_df['lines'] = df.lines / df.lines.max()
    return new_df


# In[107]:


keras_ready = convert_to_padded_seqs(scale_df(df))


# In[115]:


inp = keras.Input((None, 6))
rnn = LSTM(16, activation='tanh', dropout=0.5, return_sequences=True)(inp)
dense1 = TimeDistributed(Dense(3, activation='linear'))(rnn)
dense2 = TimeDistributed(Dense(3, activation='relu'))(rnn)
output = Concatenate()([dense1, dense2])
model = keras.models.Model(input=inp, output=output)
model.compile(loss='mean_squared_error',
              optimizer='adam')


# In[129]:


X = keras_ready[:,:-1,:]
Y = keras_ready[:,1:,:]
h = model.fit(x=X, y=Y, batch_size=1, epochs=10)
numpy.random.seed(seed)
# load dataset
dataframe = read_csv(
    "https://archive.ics.uci.edu/ml/machine-learning-databases/undocumented/connectionist-bench/sonar/sonar.all-data ",
    header=None)
dataset = dataframe.values
# split into input (X) and output (Y) variables
X = dataset[:, 0:60].astype(float)
Y = dataset[:, 60]

# encode class values as integers
dataframe[60] = [0 if x == 'R' else 1 for x in dataframe[60]]
encoded_Y = dataframe[60]
Y = to_categorical(encoded_Y)

inputs = keras.Input(shape=(60, ))
x = Dense(60, activation='relu')(inputs)
x = Dense(30, activation='relu')(x)
outputs = Dense(2, activation='sigmoid')(x)

#creating model

model = keras.Model(inputs, outputs)

# Compile model
sgd = SGD(lr=0.01, momentum=0.8, decay=0.0, nesterov=False)
model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])

model.fit(X, Y, epochs=300)

# Smaller Network
def test_rnn_cell_with_constants_layer_passing_initial_state():
    class RNNCellWithConstants(keras.layers.Layer):
        def __init__(self, units, **kwargs):
            self.units = units
            self.state_size = units
            super(RNNCellWithConstants, self).__init__(**kwargs)

        def build(self, input_shape):
            if not isinstance(input_shape, list):
                raise TypeError('expects constants shape')
            [input_shape, constant_shape] = input_shape
            # will (and should) raise if more than one constant passed

            self.input_kernel = self.add_weight(shape=(input_shape[-1],
                                                       self.units),
                                                initializer='uniform',
                                                name='kernel')
            self.recurrent_kernel = self.add_weight(shape=(self.units,
                                                           self.units),
                                                    initializer='uniform',
                                                    name='recurrent_kernel')
            self.constant_kernel = self.add_weight(shape=(constant_shape[-1],
                                                          self.units),
                                                   initializer='uniform',
                                                   name='constant_kernel')
            self.built = True

        def call(self, inputs, states, constants):
            [prev_output] = states
            [constant] = constants
            h_input = keras.backend.dot(inputs, self.input_kernel)
            h_state = keras.backend.dot(prev_output, self.recurrent_kernel)
            h_const = keras.backend.dot(constant, self.constant_kernel)
            output = h_input + h_state + h_const
            return output, [output]

        def get_config(self):
            config = {'units': self.units}
            base_config = super(RNNCellWithConstants, self).get_config()
            return dict(list(base_config.items()) + list(config.items()))

    # Test basic case.
    x = keras.Input((None, 5))
    c = keras.Input((3, ))
    s = keras.Input((32, ))
    cell = RNNCellWithConstants(32)
    layer = recurrent.RNN(cell)
    y = layer(x, initial_state=s, constants=c)
    model = keras.models.Model([x, s, c], y)
    model.compile(optimizer='rmsprop', loss='mse')
    model.train_on_batch(
        [np.zeros((6, 5, 5)),
         np.zeros((6, 32)),
         np.zeros((6, 3))], np.zeros((6, 32)))

    # Test basic case serialization.
    x_np = np.random.random((6, 5, 5))
    s_np = np.random.random((6, 32))
    c_np = np.random.random((6, 3))
    y_np = model.predict([x_np, s_np, c_np])
    weights = model.get_weights()
    config = layer.get_config()
    custom_objects = {'RNNCellWithConstants': RNNCellWithConstants}
    with keras.utils.CustomObjectScope(custom_objects):
        layer = recurrent.RNN.from_config(config.copy())
    y = layer(x, initial_state=s, constants=c)
    model = keras.models.Model([x, s, c], y)
    model.set_weights(weights)
    y_np_2 = model.predict([x_np, s_np, c_np])
    assert_allclose(y_np, y_np_2, atol=1e-4)

    # verify that state is used
    y_np_2_different_s = model.predict([x_np, s_np + 10., c_np])
    with pytest.raises(AssertionError):
        assert_allclose(y_np, y_np_2_different_s, atol=1e-4)

    # test flat list inputs
    with keras.utils.CustomObjectScope(custom_objects):
        layer = recurrent.RNN.from_config(config.copy())
    y = layer([x, s, c])
    model = keras.models.Model([x, s, c], y)
    model.set_weights(weights)
    y_np_3 = model.predict([x_np, s_np, c_np])
    assert_allclose(y_np, y_np_3, atol=1e-4)
示例#17
0
def createModel(train_data, val_data, train_labels, val_labels, test_data):
    mean = np.mean(train_data, axis=0)
    std = np.std(train_data, axis=0)
    print(mean)

    std_train_data = (train_data - mean) / std
    std_val_data = (val_data - mean) / std
    std_test_data = (test_data - mean) / std

    input_layer = keras.Input(shape=(number_of_features, ))
    hidden_layer_1 = submodel(input_layer, name="1")

    encoded = layers.Dense(encoding_dim, activation='relu',
                           name="2_1")(hidden_layer_1)
    hidden_layer_2 = layers.BatchNormalization(name="2_2")(encoded)
    hidden_layer_2 = layers.Dropout(0.5, name="2_3")(hidden_layer_2)

    hidden_layer_3_1 = submodel(hidden_layer_2, name="3")

    hidden_layer_3_2 = submodel(hidden_layer_2, name="4")

    reg = layers.Dense(1, name="reg_out")(hidden_layer_3_1)
    decoded = layers.Dense(number_of_features,
                           name="dec_out")(hidden_layer_3_2)

    model = keras.Model(inputs=input_layer, outputs=[reg, decoded])

    model.compile(loss={
        'reg_out': 'mean_squared_error',
        'dec_out': 'mean_squared_error'
    },
                  optimizer=Adam(lr=0.001, decay=0.001 / epochs))

    history = model.fit(std_train_data, {
        'reg_out': train_labels,
        'dec_out': std_train_data
    },
                        epochs=epochs,
                        batch_size=16,
                        shuffle=True,
                        validation_data=(std_val_data, {
                            'reg_out': val_labels,
                            'dec_out': std_val_data
                        }))

    encoder = keras.Model(input_layer, encoded)

    regress_input = keras.Input(shape=(encoding_dim, ))
    hidden_layer = layers.BatchNormalization()(regress_input)
    hidden_layer = layers.Dropout(0.5)(hidden_layer)
    hidden_layer = layers.Dense(number_of_neurons,
                                activation='relu')(hidden_layer)
    hidden_layer = layers.BatchNormalization()(hidden_layer)
    hidden_layer = layers.Dropout(0.5)(hidden_layer)
    regress_output = layers.Dense(1)(hidden_layer)
    regression = keras.Model(regress_input, regress_output)

    regression.layers[1].set_weights(model.get_layer("2_2").get_weights())
    regression.layers[2].set_weights(model.get_layer("2_3").get_weights())
    regression.layers[3].set_weights(model.get_layer("3_1").get_weights())
    regression.layers[4].set_weights(model.get_layer("3_2").get_weights())
    regression.layers[5].set_weights(model.get_layer("3_3").get_weights())
    regression.layers[6].set_weights(model.get_layer("reg_out").get_weights())

    decoder_input = keras.Input(shape=(encoding_dim, ))
    hidden_layer = layers.BatchNormalization()(decoder_input)
    hidden_layer = layers.Dropout(0.5)(hidden_layer)
    hidden_layer = layers.Dense(number_of_neurons,
                                activation='relu')(hidden_layer)
    hidden_layer = layers.BatchNormalization()(hidden_layer)
    hidden_layer = layers.Dropout(0.5)(hidden_layer)
    decoder_output = layers.Dense(number_of_features)(hidden_layer)
    decoder = keras.Model(decoder_input, decoder_output)

    decoder.layers[1].set_weights(model.get_layer("2_2").get_weights())
    decoder.layers[2].set_weights(model.get_layer("2_3").get_weights())
    decoder.layers[3].set_weights(model.get_layer("4_1").get_weights())
    decoder.layers[4].set_weights(model.get_layer("4_2").get_weights())
    decoder.layers[5].set_weights(model.get_layer("4_3").get_weights())
    decoder.layers[6].set_weights(model.get_layer("dec_out").get_weights())

    encoder.save("encoder.h5")
    regression.save("regression.h5")
    decoder.save("decoder.h5")

    encods = encodeData(std_test_data, encoder)
    decodeData(encods, decoder, mean, std)
    computeRegression(encods, regression)
示例#18
0
    def test_shared_objects(self):
        class OuterLayer(keras.layers.Layer):
            def __init__(self, inner_layer):
                super(OuterLayer, self).__init__()
                self.inner_layer = inner_layer

            def call(self, inputs):
                return self.inner_layer(inputs)

            def get_config(self):
                return {
                    'inner_layer':
                    generic_utils.serialize_keras_object(self.inner_layer)
                }

            @classmethod
            def from_config(cls, config):
                return cls(
                    generic_utils.deserialize_keras_object(
                        config['inner_layer']))

        class InnerLayer(keras.layers.Layer):
            def __init__(self):
                super(InnerLayer, self).__init__()
                self.v = self.add_weight(name='v', shape=[], dtype=tf.float32)

            def call(self, inputs):
                return self.v + inputs

            @classmethod
            def from_config(cls, config):
                return cls()

        # Create a model with 2 output layers that share the same inner layer.
        inner_layer = InnerLayer()
        outer_layer_1 = OuterLayer(inner_layer)
        outer_layer_2 = OuterLayer(inner_layer)
        input_ = keras.Input(shape=(1, ))
        model = keras.Model(
            inputs=input_,
            outputs=[outer_layer_1(input_),
                     outer_layer_2(input_)])

        # Changes to the shared layer should affect both outputs.
        model.layers[1].inner_layer.v.assign(5)
        self.assertAllEqual(model(1), [6.0, 6.0])
        model.layers[1].inner_layer.v.assign(3)
        self.assertAllEqual(model(1), [4.0, 4.0])

        # After loading, changes to the shared layer should still affect both
        # outputs.
        def _do_assertions(loaded):
            loaded.layers[1].inner_layer.v.assign(5)
            self.assertAllEqual(loaded(1), [6.0, 6.0])
            loaded.layers[1].inner_layer.v.assign(3)
            self.assertAllEqual(loaded(1), [4.0, 4.0])
            loaded.layers[2].inner_layer.v.assign(5)
            self.assertAllEqual(loaded(1), [6.0, 6.0])
            loaded.layers[2].inner_layer.v.assign(3)
            self.assertAllEqual(loaded(1), [4.0, 4.0])

        # We'd like to make sure we only attach shared object IDs when strictly
        # necessary, so we'll recursively traverse the generated config to count
        # whether we have the exact number we expect.
        def _get_all_keys_recursive(dict_or_iterable):
            if isinstance(dict_or_iterable, dict):
                for key in dict_or_iterable.keys():
                    yield key
                for key in _get_all_keys_recursive(dict_or_iterable.values()):
                    yield key
            elif isinstance(dict_or_iterable, string_types):
                return
            else:
                try:
                    for item in dict_or_iterable:
                        for key in _get_all_keys_recursive(item):
                            yield key
                # Not an iterable or dictionary
                except TypeError:
                    return

        with generic_utils.CustomObjectScope({
                'OuterLayer': OuterLayer,
                'InnerLayer': InnerLayer
        }):

            # Test saving and loading to disk
            save_format = testing_utils.get_save_format()
            saved_model_dir = self._save_model_dir()
            keras.models.save_model(model,
                                    saved_model_dir,
                                    save_format=save_format)
            loaded = keras.models.load_model(saved_model_dir)
            _do_assertions(loaded)

            # Test recreating directly from config
            config = model.get_config()
            key_count = collections.Counter(_get_all_keys_recursive(config))
            self.assertEqual(key_count[generic_utils.SHARED_OBJECT_KEY], 2)
            loaded = keras.Model.from_config(config)
            _do_assertions(loaded)
示例#19
0
    )  # 一共6个变量,每个变量6列:t-5,t-4,t-3,t-2,t-1,t;shape应为(原长 - step_size)*((step_size+1)*fea_num)

    # split into train and test sets
    values = reframed.values

    n_train_days = 61 - 1
    train = values[:n_train_days, :]
    test = values[n_train_days:, :]

    return train, test, scaler


step_size = 5
feature_num = 6

generator_input = keras.Input(shape=(step_size, feature_num))
x = layers.LSTM(75, return_sequences=True)(generator_input)
#x = layers.Dropout(0.2)(x)
x = layers.LSTM(25)(x)
x = layers.Dense(1)(x)
x = layers.LeakyReLU()(x)
generator = keras.models.Model(generator_input, x)
generator.summary()

discriminator_input = layers.Input(shape=(step_size + 1, 1))
y = layers.Dense(72)(discriminator_input)
y = layers.LeakyReLU(alpha=0.05)(y)
y = layers.Dense(100)(y)
y = layers.LeakyReLU(alpha=0.05)(y)
y = layers.Dense(10)(y)
y = layers.LeakyReLU(alpha=0.05)(y)
示例#20
0
def _make_graph_network(input_size, output_size):
    inputs = keras.Input(input_size)
    x = keras.layers.Dense(8, activation='relu')(inputs)
    y = keras.layers.Dense(output_size)(x)
    return keras.Model(inputs=inputs, outputs=y)
示例#21
0
from skimage.metrics import structural_similarity as ssim
from enum import auto
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from keras.datasets import mnist
from json import detect_encoding
import keras
from keras import layers
from tensorflow.python.keras.backend import shape
from tensorflow.keras import layers, losses
from keras.callbacks import TensorBoard

#tensorboard --logdir=/tmp/autoencoder

input_img = keras.Input(shape=(28, 28, 1))

x = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
x = layers.MaxPool2D((2, 2), padding='same')(x)

x = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = layers.MaxPool2D((2, 2), padding='same')(x)

x = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x)
encoded = layers.MaxPooling2D((2, 2), padding='same')(x)

x = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)
x = layers.UpSampling2D((2, 2))(x)

x = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = layers.UpSampling2D((2, 2))(x)
示例#22
0
    def testRecompileWithChangingProgramCacheSize(self):
        # This test is thanks to iperov,
        # who reported https://github.com/plaidml/plaidml/issues/274,
        # demonstrating a case where exceeding certain number of ops
        # causes recompiling of kernels (the encoder is slightly modified from
        # his example for reproduciblilty)

        shape = (64, 64, 3)
        LeakyReLU = keras.layers.LeakyReLU

        def encflow(x):
            x = LeakyReLU()(keras.layers.Conv2D(128,
                                                5,
                                                strides=2,
                                                padding="same")(x))
            x = keras.layers.Conv2D(128, 5, strides=2, padding="same")(x)
            x = keras.layers.Conv2D(256, 5, strides=2, padding="same")(x)
            x = keras.layers.Conv2D(256, 5, strides=2, padding="same")(x)
            x = keras.layers.Conv2D(256, 5, strides=2, padding="same")(x)
            x = keras.layers.Conv2D(512, 5, strides=2, padding="same")(x)
            x = keras.layers.Conv2D(512, 5, strides=2, padding="same")(x)
            x = keras.layers.Conv2D(1024, 5, strides=2, padding="same")(x)
            x = keras.layers.Conv2D(1024, 5, strides=2, padding="same")(x)
            x = keras.layers.Conv2D(1024, 5, strides=2, padding="same")(x)
            x = keras.layers.Dense(64)(keras.layers.Flatten()(x))
            x = keras.layers.Dense(4 * 4 * 1024)(x)
            x = keras.layers.Reshape((4, 4, 1024))(x)
            x = keras.layers.Conv2DTranspose(512, 3, strides=2,
                                             padding="same")(x)
            return x

        def decflow(x):
            x = x[0]
            x = LeakyReLU()(keras.layers.Conv2DTranspose(512,
                                                         3,
                                                         strides=2,
                                                         padding="same")(x))
            x = keras.layers.Conv2DTranspose(256, 3, strides=2,
                                             padding="same")(x)
            x = keras.layers.Conv2DTranspose(128, 3, strides=2,
                                             padding="same")(x)
            x = keras.layers.Conv2D(3, 5, strides=1, padding="same")(x)
            return x

        def modelify(model_functor):
            def func(tensor):
                return keras.models.Model(tensor, model_functor(tensor))

            return func

        encoder = modelify(encflow)(keras.Input(shape))
        decoder1 = modelify(decflow)(
            [keras.Input(pkb.int_shape(x)[1:]) for x in encoder.outputs])
        decoder2 = modelify(decflow)(
            [keras.Input(pkb.int_shape(x)[1:]) for x in encoder.outputs])

        inp = x = keras.Input(shape)
        code = encoder(x)
        x1 = decoder1(code)
        x2 = decoder2(code)

        loss = pkb.mean(pkb.square(inp - x1)) + pkb.mean(pkb.square(inp - x2))
        train_func = pkb.function(
            [inp], [loss],
            keras.optimizers.Adam().get_updates(
                loss, encoder.trainable_weights + decoder1.trainable_weights +
                decoder2.trainable_weights))
        view_func1 = pkb.function([inp], [x1])
        view_func2 = pkb.function([inp], [x2])

        for i in range(5):
            print("Loop %i" % i, flush=True)
            data = np.zeros((1, 64, 64, 3))
            train_func([data])
            view_func1([data])
            view_func2([data])
            print("Saving weights", flush=True)
            encoder.save_weights(r"testweights.h5")
            decoder1.save_weights(r"testweights1.h5")
            decoder2.save_weights(r"testweights2.h5")
def fer_vgg(input_shape=(48, 48, 1), input_classes=7):
    inputs = keras.Input(shape=input_shape)
    # Conv Block 1
    x = Conv2D(64, (3, 3), activation='relu', padding='same')(inputs)
    x = Dropout(0.25)(x, training=True)
    x = Conv2D(64, (3, 3), activation='relu', padding='same')(inputs)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
    x = Dropout(0.25)(x, training=True)

    # Conv Block 2
    x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
    x = Dropout(0.25)(x, training=True)
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               kernel_regularizer=l2(0.001))(x)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)
    x = Dropout(0.25)(x, training=True)

    # Conv Block 3
    x = Conv2D(256, (3, 3), padding='same', activation='relu')(x)
    x = Dropout(0.25)(x, training=True)
    x = Conv2D(256, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(0.001))(x)
    x = Dropout(0.25)(x, training=True)
    x = Conv2D(256, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(0.001))(x)
    x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
    x = Dropout(0.25)(x, training=True)

    # Conv Block 4
    x = Conv2D(512, (3, 3), padding='same', activation='relu')(x)
    x = Dropout(0.25)(x, training=True)
    x = Conv2D(512, (3, 3), padding='same', activation='relu')(x)
    x = Dropout(0.25)(x, training=True)
    x = Conv2D(512, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(0.001))(x)
    x = Dropout(0.25)(x, training=True)
    x = Conv2D(512, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(0.001))(x)
    x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
    x = Dropout(0.25)(x, training=True)

    # Conv Block 5
    x = Conv2D(512, (3, 3), padding='same', activation='relu')(x)
    x = Dropout(0.25)(x, training=True)
    x = Conv2D(512, (3, 3), padding='same', activation='relu')(x)
    x = Dropout(0.25)(x, training=True)
    x = Conv2D(512, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(0.001))(x)
    x = Dropout(0.25)(x, training=True)
    x = Conv2D(512, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(0.001))(x)
    x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
    x = Dropout(0.25)(x, training=True)

    # FC Layers
    x = Flatten()(x)
    #x = Dense(4096, activation='relu')(x)
    #x = Dropout(0.25)(x, training = True)
    x = Dense(512, activation='relu')(x)
    x = Dropout(0.25)(x, training=True)
    x = Dense(256, activation='relu')(x)
    x = Dropout(0.25)(x)
    output = Dense(7, activation='softmax')(x)

    model = keras.Model(inputs, output)
    return model
示例#24
0
def fluoro_model(X_talos, y_talos, X_val, y_val, params):
    def root_mean_squared_error(y_true, y_pred):
        return keras.backend.sqrt(
            keras.backend.mean(keras.backend.square(y_pred - y_true)))

    channel_order = 'channels_last'
    img_input_shape = (128, 128, 1)

    # Hyperparameters
    # regularizer = keras.regularizers.l1_l2(l1 = params['regularizer_l1'], l2 = params['regularizer_l2'])
    regularizer = keras.regularizers.l1_l2(l1=0.05, l2=0.2)
    # activation_fn = params['activation_fn']
    # kern_init = params['kern_init']

    activation_fn = 'elu'
    kern_init = 'glorot_uniform'

    # conv_1_filters = params['conv_1_filters']
    # conv_1_kernel = (params['conv_1_kernel'],params['conv_1_kernel'])
    # conv_1_strides = (params['conv_1_strides'],params['conv_1_strides'])
    conv_1_padding = 'valid'

    conv_1_filters = 50
    conv_1_kernel = (10, 10)
    conv_1_strides = (2, 2)

    # spatial_drop_rate_1 = params['spatial_drop_rate_1']

    spatial_drop_rate_1 = 0.3

    pool_1_size = (params['pool_1_size'], params['pool_1_size'])
    pool_1_padding = 'same'

    # pool_1_size = (2,2)

    conv_2_filters = params['conv_2_filters']
    conv_2_kernel = (params['conv_2_kernel'], params['conv_2_kernel'])
    conv_2_strides = (params['conv_2_strides'], params['conv_2_strides'])
    conv_2_padding = 'same'

    # conv_2_filters = 80
    # conv_2_kernel = (3,3)
    # conv_2_strides = (1,1)

    pool_2_size = (params['pool_2_size'], params['pool_2_size'])
    pool_2_padding = 'same'

    # pool_2_size = (2,2)

    # conv_3_filters = params['conv_3_filters']
    # conv_3_kernel = (params['conv_3_kernel'],params['conv_3_kernel'])
    # conv_3_strides = (params['conv_3_strides'],params['conv_3_strides'])
    conv_3_padding = 'valid'

    conv_3_filters = 80
    conv_3_kernel = (2, 2)
    conv_3_strides = (1, 1)

    pool_3_size = (2, 2)
    pool_3_padding = 'valid'

    # dense_1_f_units = params['dense_1_f_units']
    dense_1_f_bias = True

    dense_1_f_units = 80

    # dense_2_f_units = params['dense_2_f_units']
    dense_2_f_units = 120
    dense_2_f_bias = True

    # dense_3_f_units = params['dense_3_f_units']
    dense_3_f_units = 120
    dense_3_f_bias = True

    # dense_1_ca_units = params['dense_1_ca_units']
    dense_1_ca_units = 60
    dense_1_ca_bias = True

    # dense_2_co_units = params['dense_2_co_units']
    dense_2_co_units = 80
    dense_2_co_bias = True

    # drop_1_comb_rate = params['drop_1_comb_rate']
    drop_1_comb_rate = 0.1

    # dense_3_co_units = params['dense_3_co_units']
    dense_3_co_units = 80
    dense_3_co_bias = True

    main_output_units = 6
    main_output_act = 'linear'

    # model_opt = params['model_opt'](lr=params('learning_rate'))
    model_opt = 'adam'
    model_loss = 'mse'
    model_metric = root_mean_squared_error

    # model_epochs = params['model_epochs']
    # model_batchsize = params['model_batchsize']
    model_epochs = 30
    model_batchsize = 10

    input_fluoro_1 = keras.Input(shape=img_input_shape,
                                 dtype='float32',
                                 name='fluoro1_inpt')
    input_fluoro_2 = keras.Input(shape=img_input_shape,
                                 dtype='float32',
                                 name='fluoro2_inpt')
    input_cali = keras.Input(shape=(6, ), dtype='float32', name='cali_inpt')

    bn_1_1 = keras.layers.BatchNormalization()(input_fluoro_1)
    conv_1_1 = keras.layers.Conv2D(filters=conv_1_filters,
                                   kernel_size=conv_1_kernel,
                                   strides=conv_1_strides,
                                   padding=conv_1_padding,
                                   activation=activation_fn,
                                   input_shape=img_input_shape,
                                   data_format=channel_order,
                                   activity_regularizer=regularizer,
                                   kernel_initializer=kern_init)(bn_1_1)
    spat_1_1 = keras.layers.SpatialDropout2D(
        rate=spatial_drop_rate_1)(conv_1_1)
    pool_1_1 = keras.layers.MaxPooling2D(pool_size=pool_1_size,
                                         padding=pool_1_padding,
                                         data_format=channel_order)(spat_1_1)
    conv_2_1 = keras.layers.Conv2D(filters=conv_2_filters,
                                   kernel_size=conv_2_kernel,
                                   strides=conv_2_strides,
                                   padding=conv_2_padding,
                                   activation=activation_fn,
                                   data_format=channel_order,
                                   activity_regularizer=regularizer,
                                   kernel_initializer=kern_init)(pool_1_1)
    pool_2_1 = keras.layers.MaxPooling2D(pool_size=pool_2_size,
                                         padding=pool_2_padding,
                                         data_format=channel_order)(conv_2_1)
    conv_3_1 = keras.layers.Conv2D(filters=conv_3_filters,
                                   kernel_size=conv_3_kernel,
                                   strides=conv_3_strides,
                                   padding=conv_3_padding,
                                   data_format=channel_order,
                                   activity_regularizer=regularizer,
                                   kernel_initializer=kern_init)(pool_2_1)
    pool_3_1 = keras.layers.MaxPooling2D(pool_size=pool_3_size,
                                         padding=pool_3_padding,
                                         data_format=channel_order)(conv_3_1)
    flatten_1_1 = keras.layers.Flatten()(pool_3_1)
    dense_1_f_1 = keras.layers.Dense(units=dense_1_f_units,
                                     activation=activation_fn,
                                     use_bias=dense_1_f_bias,
                                     kernel_initializer=kern_init,
                                     activity_regularizer=regularizer,
                                     name='dense_1_f_1')(flatten_1_1)
    dense_2_f_1 = keras.layers.Dense(units=dense_2_f_units,
                                     activation=activation_fn,
                                     use_bias=dense_2_f_bias,
                                     kernel_initializer=kern_init,
                                     activity_regularizer=regularizer,
                                     name='dense_2_f_1')(dense_1_f_1)
    dense_3_f_1 = keras.layers.Dense(units=dense_3_f_units,
                                     activation=activation_fn,
                                     use_bias=dense_3_f_bias,
                                     kernel_initializer=kern_init,
                                     activity_regularizer=regularizer,
                                     name='dense_3_f_1')(dense_2_f_1)

    bn_1_2 = keras.layers.BatchNormalization()(input_fluoro_2)
    conv_1_2 = keras.layers.Conv2D(filters=conv_1_filters,
                                   kernel_size=conv_1_kernel,
                                   strides=conv_1_strides,
                                   padding=conv_1_padding,
                                   activation=activation_fn,
                                   input_shape=img_input_shape,
                                   data_format=channel_order,
                                   activity_regularizer=regularizer,
                                   kernel_initializer=kern_init)(bn_1_2)
    spat_1_2 = keras.layers.SpatialDropout2D(
        rate=spatial_drop_rate_1)(conv_1_2)
    pool_1_2 = keras.layers.MaxPooling2D(pool_size=pool_1_size,
                                         padding=pool_1_padding,
                                         data_format=channel_order)(spat_1_2)
    conv_2_2 = keras.layers.Conv2D(filters=conv_2_filters,
                                   kernel_size=conv_2_kernel,
                                   strides=conv_2_strides,
                                   padding=conv_2_padding,
                                   activation=activation_fn,
                                   data_format=channel_order,
                                   activity_regularizer=regularizer,
                                   kernel_initializer=kern_init)(pool_1_2)
    pool_2_2 = keras.layers.MaxPooling2D(pool_size=pool_2_size,
                                         padding=pool_2_padding,
                                         data_format=channel_order)(conv_2_2)
    conv_3_2 = keras.layers.Conv2D(filters=conv_3_filters,
                                   kernel_size=conv_3_kernel,
                                   strides=conv_3_strides,
                                   padding=conv_3_padding,
                                   data_format=channel_order,
                                   activity_regularizer=regularizer,
                                   kernel_initializer=kern_init)(pool_2_2)
    pool_3_2 = keras.layers.MaxPooling2D(pool_size=pool_3_size,
                                         padding=pool_3_padding,
                                         data_format=channel_order)(conv_3_2)
    flatten_1_2 = keras.layers.Flatten()(pool_3_2)
    dense_1_f_2 = keras.layers.Dense(units=dense_1_f_units,
                                     activation=activation_fn,
                                     use_bias=dense_1_f_bias,
                                     kernel_initializer=kern_init,
                                     activity_regularizer=regularizer,
                                     name='dense_1_f_2')(flatten_1_2)
    dense_2_f_2 = keras.layers.Dense(units=dense_2_f_units,
                                     activation=activation_fn,
                                     use_bias=dense_2_f_bias,
                                     kernel_initializer=kern_init,
                                     activity_regularizer=regularizer,
                                     name='dense_2_f_2')(dense_1_f_2)
    dense_3_f_2 = keras.layers.Dense(units=dense_3_f_units,
                                     activation=activation_fn,
                                     use_bias=dense_3_f_bias,
                                     kernel_initializer=kern_init,
                                     activity_regularizer=regularizer,
                                     name='dense_3_f_2')(dense_2_f_2)

    dense_1_cali = keras.layers.Dense(units=dense_1_ca_units,
                                      activation=activation_fn,
                                      use_bias=dense_1_ca_bias,
                                      kernel_initializer=kern_init,
                                      name='dense_1_cali')(input_cali)

    dense_1_comb = keras.layers.concatenate(
        [dense_3_f_1, dense_3_f_2, dense_1_cali], name='dense_1_comb')

    dense_2_comb = keras.layers.Dense(units=dense_2_co_units,
                                      activation=activation_fn,
                                      use_bias=dense_2_co_bias,
                                      kernel_initializer=kern_init,
                                      name='dense_2_comb')(dense_1_comb)
    drop_1_comb = keras.layers.Dropout(rate=drop_1_comb_rate)(dense_2_comb)
    dense_3_comb = keras.layers.Dense(units=dense_3_co_units,
                                      activation=activation_fn,
                                      use_bias=dense_3_co_bias,
                                      kernel_initializer=kern_init,
                                      name='dense_3_comb')(drop_1_comb)
    main_output = keras.layers.Dense(units=main_output_units,
                                     activation=main_output_act,
                                     name='main_output')(dense_3_comb)

    model = keras.Model(inputs=[input_fluoro_1, input_fluoro_2, input_cali],
                        outputs=main_output)

    keras.utils.plot_model(model,
                           os.path.abspath(
                               os.path.join(save_dir, expr_name + '_' +
                                            expr_no + '.png')),
                           show_shapes=True)

    model.compile(optimizer=model_opt, loss=model_loss, metrics=[model_metric])

    result = model.fit(x=[
        np.expand_dims(X_talos[0][:, 0, :, :], axis=3),
        np.expand_dims(X_talos[0][:, 1, :, :], axis=3), X_talos[1]
    ],
                       y=y_talos,
                       epochs=model_epochs,
                       batch_size=model_batchsize,
                       validation_data=([
                           np.expand_dims(X_val[0][:, 0, :, :], axis=3),
                           np.expand_dims(X_val[0][:, 1, :, :], axis=3),
                           X_val[1]
                       ], y_val),
                       shuffle=True,
                       verbose=1)

    return result, model
示例#25
0
 def test_output_dtype(self, dtype):
     inputs = keras.Input(batch_size=16, shape=(4, ), dtype="float32")
     layer = discretization.Discretization(bin_boundaries=[-0.5, 0.5, 1.5],
                                           dtype=dtype)
     outputs = layer(inputs)
     self.assertAllEqual(outputs.dtype, dtype)
示例#26
0
print(modelt.predict(X_train))
print(modelt.evaluate(X_test, y_test, verbose=2))
#%%
print(modelt.predict(X_test))


#%%
class TemporalSoftmax(keras.layers.Layer):
    def call(self, inputs, mask=None):
        broadcast_float_mask = tf.expand_dims(tf.cast(mask, "float32"), -1)
        inputs_exp = tf.exp(inputs) * broadcast_float_mask
        inputs_sum = tf.reduce_sum(inputs * broadcast_float_mask,
                                   axis=1,
                                   keepdims=True)
        return inputs_exp / inputs_sum


inputs1 = keras.Input(shape=(3852))
x1 = keras.layers.Embedding(input_dim=3852, output_dim=1283,
                            mask_zero=True)(inputs1)
x1 = keras.layers.Dense(1)(x1)
#lstm = tf.keras.layers.LSTM(3)
outs = TemporalSoftmax()(x1)

#outputs1 = keras.layers.LSTM(1283)(x1)

modelj = keras.Model(inputs1, outputs1)
#modelj.compile(optimizer='Adam', loss='mse' ,metrics=['mae','mse','accuracy'])
#%%
modelj.fit(X_train, y_train)
示例#27
0
}


class Sampling(layers.Layer):
    """Uses (z_mean, z_log_var) to sample z, the vector encoding a digit."""
    def call(self, inputs):
        z_mean, z_log_var = inputs
        batch = tf.shape(z_mean)[0]
        dim = tf.shape(z_mean)[1]
        epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
        return z_mean + tf.exp(0.5 * z_log_var) * epsilon


latent_dim = 2

encoder_inputs = keras.Input(shape=(32, 32, 3))
x = layers.Conv2D(32, 3, activation="relu", strides=2,
                  padding="same")(encoder_inputs)
x = layers.Conv2D(64, 3, activation="relu", strides=2, padding="same")(x)
x = layers.Flatten()(x)
x = layers.Dense(16, activation="relu")(x)
z_mean = layers.Dense(latent_dim, name="z_mean")(x)
z_log_var = layers.Dense(latent_dim, name="z_log_var")(x)
z = Sampling()([z_mean, z_log_var])
encoder = keras.Model(encoder_inputs, [z_mean, z_log_var, z], name="encoder")
encoder.summary()

latent_inputs = keras.Input(shape=(latent_dim, ))
x = layers.Dense(8 * 8 * 64, activation="relu")(latent_inputs)
x = layers.Reshape((8, 8, 64))(x)
x = layers.Conv2DTranspose(64, 3, activation="relu", strides=2,
示例#28
0
    discriminator.load_weights(discriminator_save_path)

if os.path.isfile(generator_save_path):
    generator.load_weights(generator_save_path)

# Use gradient clipping (by specifying a value) in the optimizer
# Use learning rate attenuation for stable training
discriminator_optimizer = keras.optimizers.Adam(lr=discriminator_lr,
                                                beta_1=beta_1)
discriminator.compile(optimizer=discriminator_optimizer,
                      loss='binary_crossentropy')

# Set discriminator's weight not trained (applies to gan models only)
discriminator.trainable = False

gan_input = keras.Input(shape=(latent_dim, ))
gan_output = discriminator(generator(gan_input))
gan = keras.models.Model(gan_input, gan_output)

gan_optimizer = keras.optimizers.Adam(lr=gan_lr, beta_1=beta_1)
gan.compile(optimizer=gan_optimizer, loss='binary_crossentropy')

if not os.path.exists(save_dir):
    os.mkdir(save_dir)

# Start training repetition
epoch = 0

for index, batch in enumerate(batches):

    batch_size = batch.shape[0]
    return norm

note = sys.argv[1]
latent_dim = 100
height = 32
width = 32
channels = 3

# Define possible opitmizer
adam_optimizer = keras.optimizers.Adam(0.0002, 0.5)
discriminator_optimizer = keras.optimizers.RMSprop(lr=0.0006, clipvalue=1.0, decay=1e-8)
gan_optimizer = keras.optimizers.RMSprop(lr=0.0006, clipvalue=1.0, decay=1e-8)

######################### MODEL BEGIN #####################################

generator_input = keras.Input(shape=(latent_dim,))

# First, transform the input into a 8x8 128-channels feature map
x = layers.Dense(128 * 8 * 8, name='g_top_layer')(generator_input)
x = layers.BatchNormalization(momentum=0.9)(x)
x = layers.Activation("relu")(x)
x = layers.Reshape((8, 8, 128))(x)
# Upsampling
x = layers.UpSampling2D()(x)
x = layers.Conv2D(128, 3, padding='same', use_bias=True)(x)
x = layers.BatchNormalization(momentum=0.9)(x)
x = layers.Activation("relu")(x)
x = layers.UpSampling2D()(x)

x = layers.Conv2D(64, 3,  padding='same', use_bias=True)(x)
x = layers.BatchNormalization(momentum=0.9)(x)
示例#30
0
 def test_output_dtype(self, dtype):
     inputs = keras.Input(shape=(1, ), dtype=tf.int32)
     layer = category_encoding.CategoryEncoding(
         num_tokens=4, output_mode=category_encoding.ONE_HOT, dtype=dtype)
     outputs = layer(inputs)
     self.assertAllEqual(outputs.dtype, dtype)