示例#1
0
def MNIST_CNY19(classes, input_shape, weights=None):
    model = Sequential()

    model.add(
        Convolution2D(40, (5, 5),
                      strides=(1, 1),
                      input_shape=input_shape,
                      activation="relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(20, (5, 5), strides=(1, 1), activation="relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(320, activation='relu'))
    model.add(Dense(160, activation='relu'))
    model.add(Dense(80, activation='relu'))
    model.add(Dense(40, activation='relu'))
    model.add(Dense(classes, activation='softmax'))

    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    return model
def CIFAR_CNY19(classes, input_shape, weights=None):
    model = Sequential()

    model.add(
        Convolution2D(40, (5, 5), strides=(1, 1), input_shape=input_shape))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # model.add(Dropout(0.25))

    model.add(Convolution2D(20, (5, 5), strides=(1, 1)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(240, activation='relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(84, activation='relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(classes, activation='softmax'))

    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])

    return model
示例#3
0
文件: train.py 项目: Slownite/Prequel
def create_model():
    units = 512
    middle_units = 256
    dropout_value = 0.3
    activation_function = 'softmax'
    loss_function = 'categorical_crossentropy'
    optimizer = 'rmsprop'
    model = Sequential()
    model.add(
        LSTM(units,
             input_shape=(network_input.shape[1], network_input.shape[2]),
             recurrent_dropout=dropout_value,
             return_sequences=True))
    model.add(
        LSTM(
            units,
            return_sequences=True,
            recurrent_dropout=dropout_value,
        ))
    model.add(LSTM(units))
    model.add(BatchNormalization())
    model.add(Dropout(dropout_value))
    model.add(Dense(middle_units))
    model.add(Activation('relu'))
    model.add(Dropout(dropout_value))
    model.add(BatchNormalization())
    model.add(Dropout(dropout_value))
    model.add(Dense(vocab_size))
    model.add(Activation(activation_function))
    model.compile(loss=loss_function, optimizer=optimizer)
    return model
示例#4
0
def build_mnist_model(layer_data: List[int],
                      num_classes: int,
                      input_shape: Any,
                      learning_rate: float,
                      regularized: bool = False) -> Model:
    model: Model = Sequential()
    model.add(Flatten(input_shape=input_shape))
    if regularized:
        for nodes in layer_data:
            model.add(
                Dense(nodes,
                      activation="relu",
                      kernel_regularizer=keras.regularizers.l1(0.001)))
        model.add(
            Dense(num_classes,
                  activation="softmax",
                  kernel_regularizer=keras.regularizers.l1(0.001)))
    else:
        for nodes in layer_data:
            model.add(Dense(nodes, activation="relu"))
        model.add(Dense(num_classes, activation="softmax"))

    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.Adam(learning_rate),
                  metrics=["accuracy"])
    return model
示例#5
0
def train():

    # Learn to sum 20 nums
    train_samples = tf.random.normal(shape=(10000, 20))
    train_targets = tf.reduce_sum(train_samples, axis=-1)
    test_samples = tf.random.normal(shape=(100, 20))
    test_targets = tf.reduce_sum(test_samples, axis=-1)

    # Model Functional API
    x = Input(shape=[20])
    h = Dense(units=20, activation='relu')(x)
    h = Dense(units=10, activation='relu')(h)
    y = Dense(units=1)(h)
    model = Model(x, y)

    # Training loop
    epochs = 10
    optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)

    for epoch in range(epochs):

        # Fancy progress bar
        pbar = tqdm(range(len(train_samples)))

        # Metrics
        loss_metric = keras.metrics.Mean()

        # Batches iteration, batch_size = 1
        for batch_id in pbar:

            # Getting sample target pair
            sample = train_samples[batch_id]
            target = train_targets[batch_id]

            # Adding batch dim since batch=1
            sample = tf.expand_dims(sample, axis=0)
            target = tf.expand_dims(target, axis=0)

            # Forward pass: needs to be recorded by gradient tape
            with tf.GradientTape() as tape:
                target_pred = model(sample)
                loss = loss_compute(target, target_pred)

            # Backward pass:
            # compute gradients w.r.t. the loss
            # update trainable weights of the model
            gradients = tape.gradient(loss, model.trainable_weights)
            optimizer.apply_gradients(zip(gradients, model.trainable_weights))

            # Tracking progress
            loss_metric(loss)
            pbar.set_description('Training Loss: %.3f' %
                                 loss_metric.result().numpy())

        # At the end of the epoch test the model
        test_targets_pred = model(test_samples)
        test_loss = loss_compute(test_targets, test_targets_pred)
        test_loss_avg = tf.reduce_mean(test_loss)
        print('Validation Loss: %.3f' % test_loss_avg)
示例#6
0
    def build(self):
        inputs = Input(shape=self.input_shape, name='encoder_input')
        x = Dense(self.intermediate_dim,
                  activation=self.activation_fct)(inputs)
        z_mean = Dense(self.latent_dim, name='z_mean')(x)
        z_log_var = Dense(self.latent_dim, name='z_log_var')(x)

        # use reparameterization trick to push the sampling out as input
        # note that "output_shape" isn't necessary with the TensorFlow backend
        z = Lambda(sampling, output_shape=(self.latent_dim, ),
                   name='z')([z_mean, z_log_var])

        # instantiate encoder model
        encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')

        # build decoder model
        latent_inputs = Input(shape=(self.latent_dim, ), name='z_sampling')
        x = Dense(self.intermediate_dim,
                  activation=self.activation_fct)(latent_inputs)
        outputs = Dense(self.original_dim, activation='sigmoid')(x)

        # instantiate decoder model
        decoder = Model(latent_inputs, outputs, name='decoder')

        # instantiate VAE model
        outputs = decoder(encoder(inputs)[2])
        vae = Model(inputs, outputs, name='vae_mlp')

        # VAE Loss = mse_loss or xent_loss + kl_loss
        reconstruction_loss = mse(inputs, outputs)

        reconstruction_loss *= self.original_dim
        kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
        kl_loss = K.sum(kl_loss, axis=-1)
        kl_loss *= -0.5
        vae_loss = K.mean(reconstruction_loss + kl_loss)
        vae.add_loss(vae_loss)

        vae.compile(optimizer=self.optimizer,
                    loss=self.loss,
                    metrics=['accuracy'])

        x_train_split, x_valid_split = train_test_split(
            self.x_train,
            test_size=self.train_test_split,
            random_state=self.seed)

        vae.fit(x_train_split,
                x_train_split,
                batch_size=self.batch_size,
                epochs=self.epochs,
                verbose=self.verbosity,
                shuffle=True,
                validation_data=(x_valid_split, x_valid_split))

        x_train_pred = vae.predict(self.x_train)
        train_mse = np.mean(np.power(self.x_train - x_train_pred, 2), axis=1)
        self.threshold = np.quantile(train_mse, 0.9)
        self.vae = vae
def build_value_network(shape, value_support_size):
    value_input = Input(shape)
    c1 = Conv2D(filters=1, kernel_size=1, padding='same', activation='linear')(value_input)
    b1 = BatchNormalization(axis=-1)(c1)
    l1 = LeakyReLU()(b1)
    f1 = Flatten()(l1)
    d2 = Dense(20, use_bias=False, activation='linear')(f1)
    l2 = LeakyReLU()(d2)
    d2 = Dense(value_support_size, use_bias=False, activation='tanh')(l2)
    value_model = Model(inputs=value_input, outputs=d2)
    return value_model
def train():

    # Learn to sum 20 nums
    train_samples = tf.random.normal(shape=(10000, 20))
    train_targets = tf.reduce_sum(train_samples, axis=-1)
    test_samples = tf.random.normal(shape=(100, 20))
    test_targets = tf.reduce_sum(test_samples, axis=-1)

    # Model Functional API
    x = Input(shape=[20])
    h = Dense(units=20, activation='relu')(x)
    h = Dense(units=10, activation='relu')(h)
    y = Dense(units=1)(h)
    model = Model(x, y)

    # Training loop
    epochs = 10
    optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)

    for epoch in range(epochs):

        # Fancy progress bar
        pbar = tqdm(range(len(train_samples)))

        # Metrics
        loss_metric = keras.metrics.Mean()

        # Batches iteration, batch_size = 1
        for batch_id in pbar:

            # Getting sample target pair
            sample = train_samples[batch_id]
            target = train_targets[batch_id]

            # Adding batch dim since batch=1
            sample = tf.expand_dims(sample, axis=0)
            target = tf.expand_dims(target, axis=0)

            # This is computed in graph mode
            # Computing loss and gradients w.r.t the loss
            loss, gradients = forward_pass(model, sample, target)
            # Updaing model weights
            backward_pass(model, gradients, optimizer)

            # Tracking progress
            loss_metric(loss)
            pbar.set_description('Training Loss: %.3f' %
                                 loss_metric.result().numpy())

        # At the end of the epoch test the model
        test_targets_pred = model(test_samples)
        test_loss = loss_compute(test_targets, test_targets_pred)
        test_loss_avg = tf.reduce_mean(test_loss)
        print('Validation Loss: %.3f' % test_loss_avg)
示例#9
0
    def createNetwork(self):
        input1 = Input(shape=(self.obs_space_length, ))
        d1 = Dense(units=32, batch_size=self.batch_size,
                   activation='relu')(input1)
        d2 = Dense(units=32, batch_size=self.batch_size, activation='relu')(d1)
        output1 = Dense(units=self.act_space_length,
                        batch_size=self.batch_size,
                        activation='softmax')(d2)

        model = Model(inputs=input1, outputs=output1)
        return model
def SingleOutputCNN(
    input_shape,
    output_shape,
    cnns_per_maxpool=1,
    maxpool_layers=1,
    dense_layers=1,
    dense_units=64,
    dropout=0.25,
    regularization=False,
    global_maxpool=False,
    name='',
) -> Model:
    function_name = cast(types.FrameType,
                         inspect.currentframe()).f_code.co_name
    model_name = f"{function_name}-{name}" if name else function_name
    # model_name  = seq([ function_name, name ]).filter(lambda x: x).make_string("-")  # remove dependency on pyfunctional - not in Kaggle repo without internet

    inputs = Input(shape=input_shape)
    x = inputs

    for cnn1 in range(0, maxpool_layers):
        for cnn2 in range(1, cnns_per_maxpool + 1):
            x = Conv2D(32 * cnn2,
                       kernel_size=(3, 3),
                       padding='same',
                       activation='relu')(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)
        x = BatchNormalization()(x)
        x = Dropout(dropout)(x)

    if global_maxpool:
        x = GlobalMaxPooling2D()(x)

    x = Flatten()(x)

    for nn1 in range(0, dense_layers):
        if regularization:
            x = Dense(dense_units,
                      activation='relu',
                      kernel_regularizer=regularizers.l2(0.01),
                      activity_regularizer=regularizers.l1(0.01))(x)
        else:
            x = Dense(dense_units, activation='relu')(x)

        x = BatchNormalization()(x)
        x = Dropout(dropout)(x)

    x = Dense(output_shape, activation='softmax')(x)

    model = Model(inputs, x, name=model_name)
    # plot_model(model, to_file=os.path.join(os.path.dirname(__file__), f"{name}.png"))
    return model
示例#11
0
    def modelMasking(self, code_layer_type, input_dim, code_dim):

        self.code_layer_type = code_layer_type
        assert len(code_dim) > 0

        if self.code_layer_type == 'lstm':
            assert len(input_dim) == 2
            input_data = Input(shape=(input_dim[0], input_dim[1]))
            mask = Masking(mask_value=0.)(input_data)
            if len(code_dim) == 1:
                encoded = LSTM(code_dim[0])(mask)
                decoded = RepeatVector(input_dim[0])(encoded)
            elif len(code_dim) > 1:
                encoded = mask
                for i, units in enumerate(code_dim):
                    if i == len(code_dim) - 1:
                        encoded = LSTM(units)(encoded)
                        continue
                    encoded = LSTM(units, return_sequences=True)(encoded)

                for i, units in enumerate(reversed(code_dim)):
                    if i == 1:
                        decoded = LSTM(units, return_sequences=True)(
                            RepeatVector(input_dim[0])(encoded))
                    elif i > 1:
                        decoded = LSTM(units, return_sequences=True)(decoded)
            else:
                raise ValueError("The codDim must be over 0.")

            decoded = LSTM(input_dim[-1], return_sequences=True)(decoded)
            self.model = Model(input_data, decoded)

        elif self.code_layer_type == 'cov':
            pass
        elif self.code_layer_type == 'dense':
            assert len(input_dim) == 1
            input_data = Input(shape=(input_dim[0], ))
            # encoded = input_data
            # for i, units in enumerate(codeDim):
            # 	encoded = Dense(units, activation='relu')(encoded)
            # decoded = Dense(inputDim[-1], activation='sigmoid')(encoded)
            # self.model = Model(input_data, decoded)
            encoder = Dense(
                code_dim[0],
                activation="tanh",
                activity_regularizer=regularizers.l1(10e-5))(input_data)
            encoder = Dense(int(code_dim[0] / 2), activation="relu")(encoder)
            decoder = Dense(int(code_dim[0] / 2), activation='tanh')(encoder)
            decoder = Dense(input_dim[0], activation='relu')(decoder)
            self.model = Model(input_data, decoder)
示例#12
0
    def __init__(self, latent_dim=49):
        config = ConfigProto()
        config.gpu_options.allow_growth = True
        session = InteractiveSession(config=config)

        # ENCODER
        inp = Input((896, 896, 1))
        e = Conv2D(32, (10, 10), activation='relu')(inp)
        e = MaxPooling2D((10, 10))(e)
        e = Conv2D(64, (6, 6), activation='relu')(e)
        e = MaxPooling2D((10, 10))(e)
        e = Conv2D(64, (3, 3), activation='relu')(e)
        l = Flatten()(e)
        l = Dense(49, activation='softmax')(l)
        # DECODER
        d = Reshape((7, 7, 1))(l)
        d = Conv2DTranspose(64, (3, 3),
                            strides=8,
                            activation='relu',
                            padding='same')(d)
        d = BatchNormalization()(d)
        d = Conv2DTranspose(64, (3, 3),
                            strides=8,
                            activation='relu',
                            padding='same')(d)
        d = BatchNormalization()(d)
        d = Conv2DTranspose(64, (3, 3),
                            strides=2,
                            activation='relu',
                            padding='same')(d)
        d = BatchNormalization()(d)
        d = Conv2DTranspose(32, (3, 3), activation='relu', padding='same')(d)
        decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(d)

        self.CAD = tf.keras.Model(inp, decoded)
        opt = tf.keras.optimizers.RMSprop(lr=0.0001, decay=1e-6)

        self.CAD.compile(loss="binary_crossentropy",
                         optimizer=opt,
                         metrics=["accuracy"])

        self.Flow = tf.keras.Sequential([
            tf.keras.layers.LSTM(32, input_shape=(3, 2),
                                 return_sequences=True),
            tf.keras.layers.Dropout(0.4),
            tf.keras.layers.Bidirectional(
                tf.keras.layers.LSTM(32, return_sequences=True)),
            tf.keras.layers.Dropout(0.4),
            tf.keras.layers.TimeDistributed(
                tf.keras.layers.Dense(10, activation='relu')),
            tf.keras.layers.Flatten(),
            tf.keras.layers.Dense(2, activation='relu')
        ])
        opt = tf.keras.optimizers.RMSprop(lr=0.0001, decay=1e-6)
        self.Flow.compile(loss="binary_crossentropy",
                          optimizer="adam",
                          metrics=["accuracy"])

        print(self.Flow.summary())
        print(self.CAD.summary())
def build_model(X_train, units):
    """
    builds a tensorflow model as defined
    :param X_train: Training data (to define the correct size)
    :param units: number of neuron in layers
    :return: Tensorflow model
    """
    model = tensorflow_core.python.keras.Sequential([
        Dense(units, activation='relu', input_shape=[len(X_train[0])]),
        Dense(1)
    ])

    model.compile(loss='mse',
                  optimizer='rmsprop',
                  metrics=['mae', 'mse'])
    return model
示例#14
0
文件: Z2Model.py 项目: guy120494/gcnn
    def __init__(self):
        super(Z2Model, self).__init__()
        # self.gcnn1 = tf.keras.layers.Conv2D(filters=20, kernel_size=(3, 3), activation='relu')
        # self.gcnn2 = tf.keras.layers.Conv2D(filters=20, kernel_size=(3, 3), activation='relu')
        # self.gcnn3 = tf.keras.layers.Conv2D(filters=20, kernel_size=(3, 3), activation='relu')
        # self.gcnn4 = tf.keras.layers.Conv2D(filters=20, kernel_size=(3, 3), activation='relu')
        # self.gcnn5 = tf.keras.layers.Conv2D(filters=20, kernel_size=(3, 3), activation='relu')
        # self.gcnn6 = tf.keras.layers.Conv2D(filters=20, kernel_size=(3, 3), activation='relu')
        # self.gcnn7 = tf.keras.layers.Conv2D(filters=20, kernel_size=(4, 4), activation='relu')

        self.gcnn1 = ConvBatchLayer(
            conv=Conv2D(filters=20, kernel_size=(3, 3), activation='relu'))
        self.gcnn2 = ConvBatchLayer(
            conv=Conv2D(filters=20, kernel_size=(3, 3), activation='relu'))
        self.gcnn3 = ConvBatchLayer(
            conv=Conv2D(filters=20, kernel_size=(3, 3), activation='relu'))
        self.gcnn4 = ConvBatchLayer(
            conv=Conv2D(filters=20, kernel_size=(3, 3), activation='relu'))
        self.gcnn5 = ConvBatchLayer(
            conv=Conv2D(filters=20, kernel_size=(3, 3), activation='relu'))
        self.gcnn6 = ConvBatchLayer(
            conv=Conv2D(filters=20, kernel_size=(3, 3), activation='relu'))
        self.gcnn7 = ConvBatchLayer(conv=Conv2D(filters=9, kernel_size=(3, 3)))
        self.flatten = Flatten()
        self.dense = Dense(9)
示例#15
0
def modelC(row, col):
    # define LSTM
    model = Sequential()
    model.add(
        TimeDistributed(Conv2D(16, (2, 2), activation='relu'),
                        input_shape=(None, row, col, 1)))
    model.add(Dropout(0.25))
    model.add(BatchNormalization())
    model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
    model.add(Dropout(0.25))
    model.add(TimeDistributed(Flatten()))
    model.add(LSTM(75))
    # model.add(Dropout(0.25))
    model.add(BatchNormalization())

    model.add(RepeatVector(4))
    model.add(LSTM(50, return_sequences=True))
    # model.add(Dropout(0.25))
    model.add(BatchNormalization())
    model.add(TimeDistributed(Dense(4, activation='softmax')))

    # Replicates `model` on 8 GPUs.
    # This assumes that your machine has 8 available GPUs.
    # parallel_model = multi_gpu_model(model, gpus=[2])
    # parallel_model.compile(loss='categorical_crossentropy',
    #                       optimizer='adam', metrics=['accuracy'])

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
示例#16
0
def modelStandard(input_shape, parameter=None):
    # define LSTM
    model = Sequential()
    model.add(
        TimeDistributed(Conv2D(16, (2, 2), activation='relu'),
                        input_shape=input_shape))
    model.add(Dropout(parameter['dropout']))
    model.add(BatchNormalization())
    model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2), strides=2)))
    model.add(Dropout(parameter['dropout']))
    model.add(TimeDistributed(Flatten()))
    model.add(LSTM(parameter['cell1']))
    # model.add(Dropout(0.25))
    model.add(BatchNormalization())

    model.add(RepeatVector(8))
    model.add(LSTM(parameter['cell2'], return_sequences=True))
    # model.add(Dropout(0.25))
    model.add(BatchNormalization())
    model.add(TimeDistributed(Dense(5, activation='softmax')))

    # Replicates `model` on 8 GPUs.
    # This assumes that your machine has 8 available GPUs.
    #parallel_model = multi_gpu_model(model, gpus=2)
    #parallel_model.compile(loss='categorical_crossentropy',
    #                       optimizer='adam', metrics=['accuracy'])

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
示例#17
0
def ModelShare():
    tweet_a = Input(shape=(280, 256))
    tweet_b = Input(shape=(280, 256))

    # This layer can take as input a matrix
    # and will return a vector of size 64
    shared_lstm = LSTM(64, return_sequences=True, name='lstm')

    # When we reuse the same layer instance
    # multiple times, the weights of the layer
    # are also being reused
    # (it is effectively *the same* layer)
    encoded_a = shared_lstm(tweet_a)
    encoded_b = shared_lstm(tweet_b)

    # We can then concatenate the two vectors:
    merged_vector = concatenate([encoded_a, encoded_b], axis=-1)

    # And add a logistic regression on top
    predictions = Dense(1, activation='sigmoid')(merged_vector)

    # We define a trainable model linking the
    # tweet inputs to the predictions
    model = Model(inputs=[tweet_a, tweet_b], outputs=predictions)

    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model
示例#18
0
def _create_decoder(step: Tensorflow2ModelStep,
                    last_encoder_outputs: tf.Tensor,
                    last_encoders_states: List[tf.Tensor]) -> tf.Tensor:
    """
    Create a decoder RNN using GRU cells.

    :param step: The base Neuraxle step for TensorFlow v2 (Tensorflow2ModelStep)
    :param last_encoders_states: last encoder states tensor
    :param last_encoder_outputs: last encoder output tensor
    :return: decoder output
    """
    decoder_lstm = RNN(cell=_create_stacked_rnn_cells(step),
                       return_sequences=True,
                       return_state=False)

    last_encoder_output = tf.expand_dims(last_encoder_outputs, axis=1)
    # last encoder output shape: (batch_size, 1, hidden_dim)

    replicated_last_encoder_output = tf.repeat(
        input=last_encoder_output,
        repeats=step.hyperparams['window_size_future'],
        axis=1)
    # replicated last encoder output shape: (batch_size, window_size_future, hidden_dim)

    decoder_outputs = decoder_lstm(replicated_last_encoder_output,
                                   initial_state=last_encoders_states)
    # decoder outputs shape: (batch_size, window_size_future, hidden_dim)

    decoder_dense = Dense(step.hyperparams['output_dim'])
    # decoder outputs shape: (batch_size, window_size_future, output_dim)

    return decoder_dense(decoder_outputs)
示例#19
0
    def Train(self, input, target):
        X_train, X_test, Y_train, Y_test = train_test_split(input, target, train_size=0.75)
        Y_train = np.asarray(Y_train)
        Y_test = np.array(Y_test)
        X_train = np.reshape(X_train, [-1, X_train[0].shape[0], X_train[0].shape[1]])
        X_test = np.reshape(X_test, [-1, X_train[0].shape[0], X_train[0].shape[1]])

        model = Sequential()
        model.add(Conv1D(16, 3, padding='same', input_shape=input[0].shape))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization())
        model.add(GRU(16, return_sequences=True))
        # model.add(Activation("sigmoid"))
        # model.add(LSTM(lstm_out))

        model.add(Flatten())
        model.add(Dense(8, activity_regularizer=l2(0.001)))
        # model.add(GRU(lstm_out, return_sequences=True))
        # model.add(LSTM(lstm_out))
        # model.add(Dense(20, activity_regularizer=l2(0.001)))
        model.add(Activation("relu"))
        model.add(Dense(2))

        model.compile(loss=mean_absolute_error, optimizer='nadam',
                      metrics=[RootMeanSquaredError(), MAE])
        print(model.summary())

        batch_size = 12
        epochs = 100
        reduce_lr_acc = ReduceLROnPlateau(monitor='val_loss', factor=0.9, patience=epochs / 10, verbose=1, min_delta=1e-4, mode='max')
        model.fit(X_train, Y_train,
                  epochs=epochs,
                  batch_size=batch_size, validation_data=(X_test, Y_test), callbacks=[reduce_lr_acc])
        model.save("PositionEstimation.h5", overwrite=True)
        # acc = model.evaluate(X_test,
        #                      Y_test,
        #                      batch_size=batch_size,
        #                      verbose=0)

        predicted = model.predict(X_test, batch_size=batch_size)
        # predicted = out.ravel()

        res = pd.DataFrame({"predicted_x": predicted[:, 0],
                            "predicted_y": predicted[:, 1],
                            "original_x": Y_test[:, 0],
                            "original_y": Y_test[:, 1]})
        res.to_excel("res.xlsx")
示例#20
0
def create_model():
    model = Sequential()

    model.add(Dense(10, activation="softmax", input_dim=3072))
    model.compile(loss=sparse_categorical_crossentropy,
                  metrics=[sparse_categorical_accuracy])

    return model
示例#21
0
def create_model_pp():
    model = Sequential()
    model.add(Flatten(input_shape=(300, 300, 3)))
    model.add(Dense(4, activation="softmax"))
    model.compile(loss=categorical_crossentropy,
                  metrics=[categorical_accuracy])

    return model
示例#22
0
    def __init__(self, z_input_dim):
        """
        init params

        :param learning_rate: learning rate of optimizer
        :param z_input_dim: input dim of z
        """
        super(GAN, self).__init__()
        self.Dense1 = Dense(512, input_dim=z_input_dim)
        self.LeakyReLU1 = LeakyReLU(0.2)
        self.Dense2 = Dense(128 * 16 * 16)
        self.LeakyReLU2 = LeakyReLU(0.2)
        self.BatchNormalization2 = BatchNormalization()
        self.Reshape3 = Reshape((128, 16, 16), input_shape=(128 * 16 * 16, ))
        self.UpSampling2D3 = UpSampling2D(size=(2, 2))
        self.Conv2D3 = Conv2D(64, (5, 5), padding='same', activation='tanh')
        self.UpSampling2D3 = UpSampling2D(size=(2, 2))
        self.Conv2D4 = Conv2D(1, (5, 5), padding='same', activation='tanh')
示例#23
0
def create_model():
    model = Sequential()

    model.add(Dense(128, input_dim=3072))  # 32 * 32 * 3

    model.compile(loss=sparse_categorical_crossentropy,
                  metrics=[sparse_categorical_accuracy])

    return model
def build_policy_network(shape, action_size, regularizer):
    policy_input = Input(shape)
    c1 = Conv2D(filters=1, kernel_size=1, padding='same', activation='linear',
                kernel_regularizer=regularizer)(policy_input)
    b1 = BatchNormalization(axis=-1)(c1)
    l1 = LeakyReLU()(b1)
    f1 = Flatten()(l1)
    d1 = Dense(action_size, use_bias=False, activation='sigmoid', kernel_regularizer=regularizer)(f1)
    policy_model = Model(inputs=policy_input, outputs=d1)
    return policy_model
示例#25
0
def modelDemoStandardConvLSTMInception(input_shape, parameter=None):
    # define LSTM
    input = Input(shape=input_shape, name='main_input')

    I_1 = TimeDistributed(Conv2D(16, (1, 1),
                                 activation='relu',
                                 padding='same',
                                 name='C_1'),
                          name='I_11')(input)
    I_1 = TimeDistributed(Conv2D(16, (5, 5),
                                 activation='relu',
                                 padding='same',
                                 name='C_2'),
                          name='I_12')(I_1)

    I_2 = TimeDistributed(MaxPooling2D((3, 3),
                                       strides=(1, 1),
                                       padding='same',
                                       name='C_3'),
                          name='I_21')(input)
    I_2 = TimeDistributed(Conv2D(16, (1, 1),
                                 activation='relu',
                                 padding='same',
                                 name='C_4'),
                          name='I_22')(I_2)

    concatenate_output = concatenate([I_1, I_2], axis=-1)

    # x = TimeDistributed(Flatten())(x)
    x = ConvLSTM2D(filters=32,
                   kernel_size=(3, 3),
                   padding='same',
                   return_sequences=False)(concatenate_output)
    #x = MaxPooling2D((3, 3), strides=(1, 1), padding='same', name='M_1')(x)

    x = (Flatten())(x)

    x = RepeatVector(8)(x)
    x = LSTM(50, return_sequences=True)(x)

    output = TimeDistributed(Dense(8, activation='softmax'),
                             name='main_output')(x)
    #with tensorflow.device('/cpu'):
    model = Model(inputs=[input], outputs=[output])
    # compile the model with gpu

    #parallel_model = multi_gpu_model(model, gpus=2)
    #parallel_model.compile(loss={'main_output': 'categorical_crossentropy'},
    #              loss_weights={'main_output': 1.}, optimizer='adam', metrics=['accuracy'])
    #model = multi_gpu(model, gpus=[1, 2])
    model.compile(loss={'main_output': 'categorical_crossentropy'},
                  loss_weights={'main_output': 1.},
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
示例#26
0
def modify_model(model: Model, class_index: int,
                 importance_type: ImportanceType) -> Model:
    gamma_initializer: str = "zeros"
    if importance_type & ImportanceType.GAMMA:
        gamma_initializer = "ones"

    gamma_regularizer = None
    if importance_type & ImportanceType.L1 and not importance_type & ImportanceType.L2:
        gamma_regularizer = l1()
    if not importance_type & ImportanceType.L1 and importance_type & ImportanceType.L2:
        gamma_regularizer = l2()
    if importance_type & ImportanceType.L1 and importance_type & ImportanceType.L2:
        gamma_regularizer = l1_l2()

    max_layer: int = len(model.layers)
    last_output: Input = None
    network_input: Input = None
    for i, layer in enumerate(model.layers):
        if i == 0:
            last_output = layer.output
            network_input = layer.input
        if 0 < i < max_layer:
            new_layer: Union[BatchNormalization,
                             BatchNormalization] = BatchNormalization(
                                 center=(importance_type
                                         & ImportanceType.CENTERING),
                                 gamma_initializer=gamma_initializer,
                                 gamma_regularizer=gamma_regularizer)
            last_output = new_layer(last_output)
        if i == max_layer - 1:
            new_end_layer: Dense = Dense(2,
                                         activation="softmax",
                                         name="binary_output_layer")
            last_output = new_end_layer(last_output)

            old_weights = layer.get_weights()
            old_weights[0] = np.transpose(old_weights[0], (1, 0))
            new_weights: List[np.array] = [
                np.append(old_weights[0][class_index:class_index + 1],
                          np.subtract(
                              np.sum(old_weights[0], axis=0, keepdims=True),
                              old_weights[0][class_index:class_index + 1]),
                          axis=0),
                np.append(old_weights[1][class_index:class_index + 1],
                          np.subtract(
                              np.sum(old_weights[1], axis=0, keepdims=True),
                              old_weights[1][class_index:class_index + 1]),
                          axis=0)
            ]
            new_weights[0] = np.transpose(new_weights[0], (1, 0))
            new_end_layer.set_weights(new_weights)
        elif i > 0:
            last_output = layer(last_output)

    return Model(inputs=network_input, outputs=last_output)
示例#27
0
    def model(self, code_layer_type, input_dim, code_dim):
        self.code_layer_type = code_layer_type
        assert len(code_dim) > 0

        if self.code_layer_type == 'lstm':
            assert len(input_dim) == 2
            input_data = Input(shape=(input_dim[0], input_dim[1]))

            if len(code_dim) == 1:
                encoded = LSTM(code_dim[0])(input_data)
                decoded = RepeatVector(input_dim[0])(encoded)
            elif len(code_dim) > 1:
                encoded = input_data
                for i, units in enumerate(code_dim):
                    if i == len(code_dim) - 1:
                        encoded = LSTM(units)(encoded)
                        continue
                    encoded = LSTM(units, return_sequences=True)(encoded)

                for i, units in enumerate(reversed(code_dim)):
                    if i == 1:
                        decoded = LSTM(units, return_sequences=True)(
                            RepeatVector(input_dim[0])(encoded))
                    elif i > 1:
                        decoded = LSTM(units, return_sequences=True)(decoded)
            else:
                raise ValueError("The codDim must be over 0.")

            decoded = LSTM(input_dim[-1], return_sequences=True)(decoded)
            self.model = Model(input_data, decoded)

        elif self.code_layer_type == 'dense':
            assert len(input_dim) == 1
            input_data = Input(shape=(input_dim[0], ))
            encoded = input_data
            for i, units in enumerate(code_dim):
                encoded = Dense(units, activation='relu')(encoded)
            decoded = Dense(input_dim[-1], activation='sigmoid')(encoded)
            self.model = Model(input_data, decoded)

        elif self.code_layer_type == 'cov':
            pass
示例#28
0
def build_model():
    model = keras.Sequential()
    model.add(
        Conv2D(64, kernel_size=3, activation='relu', input_shape=(28, 28, 1)))
    model.add(Conv2D(32, kernel_size=3, activation='relu'))
    model.add(Flatten())
    model.add(Dense(10, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
示例#29
0
def modelB(row, col, parameter=None):
    # define LSTM
    input = Input(shape=(None, row, col, 1), name='main_input')
    '''    x = TimeDistributed(Conv2D(16, (2, 2)))(input)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Dropout(0.25)(x)
    '''
    # tower_1 = TimeDistributed(Conv2D(16, (1, 1), padding='same', activation='relu'))(input)
    # tower_1 = TimeDistributed(Conv2D(16, (3, 3), padding='same', activation='relu'))(tower_1)

    tower_2 = TimeDistributed(Conv2D(16, (1, 1), padding='same'))(input)
    x = BatchNormalization()(tower_2)
    x = Activation('relu')(x)
    x = Dropout(0.25)(x)
    tower_2 = TimeDistributed(Conv2D(16, (5, 5), padding='same'))(x)
    x = BatchNormalization()(tower_2)
    x = Activation('relu')(x)
    tower_2 = Dropout(0.25)(x)

    tower_3 = TimeDistributed(
        MaxPooling2D((3, 3), strides=(1, 1), padding='same'))(input)
    tower_3 = TimeDistributed(Conv2D(16, (1, 1), padding='same'))(tower_3)
    x = BatchNormalization()(tower_3)
    x = Activation('relu')(x)
    tower_3 = Dropout(0.25)(x)
    concatenate_output = concatenate([tower_2, tower_3], axis=-1)

    x = TimeDistributed(MaxPooling2D(pool_size=(2, 2),
                                     strides=2))(concatenate_output)
    x = Dropout(0.25)(x)
    x = TimeDistributed(Flatten())(x)
    # convLstm = ConvLSTM2D(filters=40, kernel_size=(3, 3),padding='same', return_sequences=False)(x)
    lstm_output = LSTM(75)(x)
    lstm_output = BatchNormalization()(lstm_output)
    # lstm_output = BatchNormalization()(convLstm)
    # auxiliary_output = Dense(1, activation='sigmoid', name='aux_output')(lstm_output)
    # auxiliary_input = Input(shape=(4,), name='aux_input')
    # x = concatenate([lstm_output, auxiliary_input])

    x = RepeatVector(4)(lstm_output)
    x = LSTM(50, return_sequences=True)(x)
    # model.add(Dropout(0.25))
    x = BatchNormalization()(x)
    output = TimeDistributed(Dense(4, activation='softmax'),
                             name='main_output')(x)

    model = Model(inputs=[input], outputs=[output])
    model.compile(loss={'main_output': 'categorical_crossentropy'},
                  loss_weights={'main_output': 1.},
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
示例#30
0
def get_model(X, N_class, total_words=86627, EMBEDDING_DIM=100, maxlen=53):

    embeddings_index = {}

    f = open('glove.6B/glove.6B.100d.txt')
    for line in f:
        values = line.split()
        word = values[0]
        coefs = np.asarray(values[1:], dtype='float32')
        embeddings_index[word] = coefs
    f.close()

    embedding_matrix = np.zeros((total_words, EMBEDDING_DIM))
    for word, i in X.items():
        embedding_vector = embeddings_index.get(word)
        if embedding_vector is not None:
            embedding_matrix[i] = embedding_vector

    inp = Input(shape=(maxlen, ), dtype='int32')
    embedding = Embedding(total_words,
                          EMBEDDING_DIM,
                          embeddings_initializer=Constant(embedding_matrix),
                          input_length=maxlen,
                          trainable=False)(inp)
    x = LSTM(300, dropout=0.25, recurrent_dropout=0.25,
             return_sequences=True)(embedding)
    x = Dropout(0.25)(x)
    merged = Attention_COSTUM(maxlen)(x)
    merged = Dense(256, activation='relu')(merged)
    merged = Dropout(0.25)(merged)
    merged = BatchNormalization()(merged)
    outp = Dense(N_class, activation='softmax')(merged)

    AttentionLSTM = Model(inputs=inp, outputs=outp)
    AttentionLSTM.compile(loss='sparse_categorical_crossentropy',
                          optimizer='adam',
                          metrics=['acc'])

    return AttentionLSTM