Exemplo n.º 1
0
def NN(train_X):

    # Read Heng-Tze Cheng 2016 paper - Wide & Deep

    input = keras.layers.Input(shape=train_X.shape[1:])

    get_custom_objects().update({'Swish': Swish(swish)})

    # # Architecture 1
    # hidden1 = keras.layers.Dense(33, activation='Swish')(input)
    # hidden2 = keras.layers.Dense(33, activation='Swish')(hidden1)
    # concat = keras.layers.Concatenate()([input, hidden2])

    # # Architecture 2
    # hidden1 = keras.layers.Dense(33, activation='Swish')(input)
    # hidden2 = keras.layers.Dense(50, activation='Swish')(hidden1)
    # hidden3 = keras.layers.Dense(20, activation='Swish')(hidden2)
    # concat = keras.layers.Concatenate()([input, hidden2, hidden3])

    #
    #   There are a few different weight constraints to choose from. A good simple constraint for this model is to simply normalize the weights so that the norm is equal to 1.0.
    #   This constraint has the effect of forcing all incoming weights to be small.
    #   unit_norm in Keras
    #

    # Architecture 3
    hidden1 = keras.layers.Dense(33,
                                 activation='relu',
                                 kernel_initializer="he_normal",
                                 kernel_constraint=unit_norm())(input)
    hidden2 = keras.layers.Dense(33,
                                 activation='relu',
                                 kernel_initializer="he_normal",
                                 kernel_constraint=unit_norm())(hidden1)
    hidden3 = keras.layers.Dense(33,
                                 activation='relu',
                                 kernel_initializer="he_normal",
                                 kernel_constraint=unit_norm())(hidden2)
    concat = keras.layers.Concatenate()([input, hidden2, hidden3])

    output = keras.layers.Dense(1)(concat)

    model = keras.models.Model(inputs=[input], outputs=[output])

    model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mse'])

    # model.compile(loss = euclidean_distance_loss,
    #               optimizer='adam',
    #               metrics=['mse'])

    return model
Exemplo n.º 2
0
 def design_CNN_3D(self):
     inputs = Input(shape=self.X_train.shape[1:])
     x = Conv3D(filters=32,
                kernel_size=(3, 3, 11),
                bias_constraint=unit_norm())(inputs)
     x = LeakyReLU()(x)
     x = Conv3D(filters=32,
                kernel_size=(3, 3, 5),
                bias_constraint=unit_norm())(x)
     x = LeakyReLU()(x)
     x = Flatten()(x)
     x = Dense(units=256, activation='relu')(x)
     x = Dropout(rate=0.4)(x)
     x = Dense(units=128, activation='relu')(x)
     x = Dropout(rate=0.4)(x)
     outputs = Dense(units=len(self.labels), activation='softmax')(x)
     self.model = Model(inputs=inputs, outputs=outputs)
Exemplo n.º 3
0
 def compile_3D_CNN(self):
     inputs = Input(shape=self.X_train.shape[1:])
     x = Conv3D(filters=64,
                kernel_size=(3, 3, 11),
                bias_constraint=unit_norm())(inputs)
     x = LeakyReLU(alpha=0.2)(x)
     x = Conv3D(filters=32,
                kernel_size=(2, 2, 5),
                bias_constraint=unit_norm())(x)
     x = LeakyReLU(alpha=0.2)(x)
     x = Flatten()(x)
     x = Dense(units=256, activation='relu')(x)
     x = Dropout(rate=0.4)(x)
     x = Dense(units=128, activation='relu')(x)
     x = Dropout(rate=0.4)(x)
     outputs = Dense(units=len(self.labels), activation='softmax')(x)
     self.model = Model(inputs=inputs, outputs=outputs)
     self.model.compile(optimizer='Adam',
                        loss='sparse_categorical_crossentropy',
                        metrics=['accuracy'])
Exemplo n.º 4
0
def build_dense_model(num_features):
    """ Simple two layer MLP """
    regression_target = layers.Input(shape=(1,), name='ground_truth')
    feature_input = layers.Input(shape=(num_features,), name='feature_input')
    encoder_output = layers.GaussianDropout(0.1)(feature_input)
    encoder_output = layers.Dense(64, activation='tanh', name='encoder_hidden')(encoder_output)
    encoder_output = layers.Dense(64, activation='tanh', name='encoder_hidden_2')(encoder_output)
    z_mean, z_log_var = layers.Dense(LATENT_DIM, name='z_mean')(encoder_output), \
                        layers.Dense(LATENT_DIM, name='z_log_var')(encoder_output)
    r_mean, r_log_var = layers.Dense(1, name='r_mean')(encoder_output), \
                        layers.Dense(1, name='r_log_var')(encoder_output)

    # Sample latent and regression target
    z = layers.Lambda(sampling, output_shape=(LATENT_DIM,), name='z')([z_mean, z_log_var])
    r = layers.Lambda(sampling, output_shape=(1,), name='r')([r_mean, r_log_var])

    # Latent generator
    pz_mean = layers.Dense(LATENT_DIM,
                           kernel_constraint=constraints.unit_norm(),
                           name='pz_mean')(r)

    encoder = Model([feature_input, regression_target],
                    [z_mean, z_log_var, z,
                     r_mean, r_log_var, r,
                     pz_mean],
                    name='encoder')

    latent_input = layers.Input(shape=(LATENT_DIM,), name='decoder_input')
    decoder_output = layers.Dense(64, activation='tanh', name='decoder_hidden')(latent_input)
    decoder_output = layers.Dense(64, activation='tanh', name='decoder_hidden_2')(decoder_output)
    decoder_output = layers.Dense(num_features, name='decoder_output')(decoder_output)

    decoder = Model(latent_input, decoder_output, name='decoder')

    encoder_decoder_output = decoder(encoder([feature_input, regression_target])[2])
    vae = Model([feature_input, regression_target], encoder_decoder_output, name='vae')

    # Manually write up losses
    reconstruction_loss = mse(feature_input, encoder_decoder_output)
    kl_loss = 1 + z_log_var - K.square(z_mean - pz_mean) - K.exp(z_log_var)
    kl_loss = -0.5 * K.sum(kl_loss, axis=-1)
    label_loss = tf.divide(0.5 * K.square(r_mean - regression_target), K.exp(r_log_var)) + 0.5 * r_log_var
    vae_loss = K.mean(reconstruction_loss + kl_loss + label_loss)
    vae.add_loss(vae_loss)
    vae.compile(optimizer=Adam())

    regressor = Model(feature_input, r_mean, name='regressor')

    return vae, regressor
Exemplo n.º 5
0
def build_net3(CATES=12, height=64, width=64, channel=3, using_white_norm=True, using_SE=True):
    base_model = build_shared_plain_network(using_white_norm=using_white_norm, using_SE=using_SE)
    print(base_model.summary())
    x1 = Input(shape=(height, width, channel))
    x2 = Input(shape=(height, width, channel))
    x3 = Input(shape=(height, width, channel))

    y1 = base_model(x1)
    y2 = base_model(x2)
    y3 = base_model(x3)
    cfeat = Concatenate(axis=-1)([l2_normalize(y1, axis=-1), l2_normalize(y2, axis=-1), l2_normalize(y3, axis=-1)])
    cfeat = BatchNormalization()(cfeat)
    cfeat = Dropout(0.5)(cfeat)
    cfeat = Dense(512, use_bias=False)(cfeat)
    cfeat = BatchNormalization()(cfeat)
    cfeat = l2_normalize(cfeat, axis=-1)
     
    print("cates->", CATES, cfeat.shape)
    bulk_feat = Dense(CATES, use_bias=False, kernel_constraint=unit_norm(axis=0), activation=softmax, name="W1")(16 * cfeat)

    age = Dense(1, use_bias=False, name="age")(bulk_feat)

    gender = Dense(2, use_bias=False, kernel_constraint=unit_norm(axis=0), activation=softmax, name="gender")(cfeat)
    return Model(inputs=[x1, x2, x3], outputs=[age, bulk_feat, gender])
Exemplo n.º 6
0
    def build(width, height, depth):

        model = Sequential()
        inputShape = (height, width, depth)

        # if we are using "channels first", update the input shape
        if K.image_data_format() == "channels_first":
            inputShape = (depth, height, width)

        # 1st layer convolutional
        model.add(
            Conv2D(32, (3, 3), input_shape=(150, 150, 1), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

        # 2nd layer convolutional
        model.add(
            Conv2D(32, (3, 3),
                   activation='relu',
                   kernel_constraint=max_norm(3., axis=[0, 1, 2])))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

        # 3rd layer convolutional
        model.add(
            Conv2D(64, (3, 3),
                   activation='relu',
                   kernel_constraint=max_norm(3., axis=[0, 1, 2])))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

        # Flatten
        model.add(Flatten())

        # 4th layer fully connected
        model.add(Dense(64, activation='relu', kernel_constraint=unit_norm()))
        model.add(Dropout(0.5))

        # 5th layer fully connected
        model.add(Dense(1, activation='sigmoid'))

        return model
Exemplo n.º 7
0
def build_keras_model(model_type: str = None) -> keras.Sequential:
    """
    Creates the desired CNN model. Passing in a known string will
    use preconfigured models taken from papers.
    """
    initializer = keras.initializers.HeNormal()
    model = keras.Sequential()
    if model_type == "davies":
        opts = {
            "strides": 2,
            "activation": "relu",
            "kernel_initializer": initializer,
            "padding": "same",
        }
        #lots of small conv w/ max pooling
        model.add(Conv2D(8, 15, **opts))
        model.add(MaxPooling2D(pool_size=2, strides=2, padding="same"))

        model.add(Conv2D(8, 15, **opts))
        model.add(MaxPooling2D(pool_size=2, strides=2, padding="same"))

        model.add(Conv2D(16, 5, **opts))
        model.add(Conv2D(16, 5, **opts))

        model.add(Flatten())
        model.add(Dense(512, kernel_initializer=initializer))
        model.add(
            Dense(1, kernel_initializer=initializer, activation="sigmoid"))
    else:
        #my current model
        restraint = max_norm(3)
        convOpts = {
            "strides": 2,
            "activation": "relu",
            "kernel_initializer": initializer,
            "padding": "same",
            "kernel_constraint": restraint
        }
        maxOpts = {
            "strides": 2,
            "padding": "same",
        }
        model.add(Conv2D(64, 4, **convOpts))
        model.add(MaxPooling2D(4, **maxOpts))
        model.add(Conv2D(64, 4, **convOpts))
        model.add(MaxPooling2D(4, **maxOpts))

        model.add(Dropout(.5))
        model.add(Conv2D(64, 4, **convOpts))
        model.add(MaxPooling2D(4, **maxOpts))

        model.add(Dropout(.25))
        model.add(Conv2D(128, 2, **convOpts))

        model.add(Flatten())
        model.add(
            Dense(512,
                  kernel_initializer=initializer,
                  kernel_constraint=unit_norm()))
        model.add(BatchNormalization())
        model.add(
            Dense(1, kernel_initializer=initializer, activation="sigmoid"))

    model.compile(
        "adam",  # gradient optimizer
        loss="binary_crossentropy",  # loss function
        metrics=["accuracy"],  # what we are optimizing against
    )

    return model
Exemplo n.º 8
0
def meteor_sort() -> None:
    """
    Meteor sort training main script. In this case we carry out the following tasks:
        - Generate the training and validation generator.
        - Create the tensorflow (keras) model.
        - If enabled, run the `meteor_sort_learning_rate()` function.
        - Train the model.
        - If enabled, convert the model to tensorflow lite and run `get_performance_measures()` and
        `plot_acc_and_loss()` methods to get the performance measures (precision, recall and F1-Score) and plot
        the accuracy and loss over the training iterations in both the training and the validation sets.

    :return: None
    """
    tf.keras.backend.clear_session()

    # Data
    data_dir = join(getcwd(), "meteor_data")
    train_dir = join(data_dir, 'train')
    validation_dir = join(data_dir, 'validation')

    # Model handling
    model_to_convert = ""
    model_name = 'model_v2_1'
    results_dir = join(getcwd(), 'results')
    results_dir_weights = join(results_dir, 'weights')

    # Hyperparameters for the training
    image_resolution: tuple = (256, 256)
    image_resolution_gray_scale: tuple = (256, 256, 1)
    epochs: int = 10
    learning_rate: float = 5e-4
    get_ideal_learning_rate: bool = False
    train_set_threshold: float = 0.92
    validation_set_threshold: float = 0.93

    num_training_images = len(listdir(join(train_dir, 'meteors'))) + len(
        listdir(join(train_dir, 'non_meteors')))
    num_validation_images = len(listdir(join(validation_dir, 'meteors'))) \
                            + len(listdir(join(validation_dir, 'non_meteors')))
    batch_size: int = 64
    steps_per_epoch: int = int(num_training_images / batch_size)
    validation_steps: int = int(num_validation_images / batch_size)

    # Rescale all images by 1./255

    train_datagen = ImageDataGenerator(
        rescale=1.0 / 255,
        rotation_range=
        10,  # Range from 0 to 180 degrees to randomly rotate images
        width_shift_range=0.05,
        height_shift_range=0.05,
        shear_range=5,  # Shear the image by 5 degrees
        zoom_range=0.1,
        horizontal_flip=True,
        vertical_flip=True,
        fill_mode='nearest')

    validation_datagen = ImageDataGenerator(rescale=1.0 / 255.)

    train_generator = train_datagen.flow_from_directory(
        train_dir,
        batch_size=batch_size,
        class_mode='binary',
        color_mode='grayscale',
        target_size=image_resolution)

    validation_generator = validation_datagen.flow_from_directory(
        validation_dir,
        batch_size=batch_size,
        class_mode='binary',
        color_mode='grayscale',
        target_size=image_resolution)

    model = Sequential([
        Conv2D(8, (7, 7),
               activation='elu',
               input_shape=image_resolution_gray_scale,
               strides=1,
               kernel_initializer='he_uniform',
               kernel_constraint=unit_norm()),
        MaxPooling2D(pool_size=(3, 3)),
        BatchNormalization(),
        Conv2D(12, (5, 5),
               activation='elu',
               kernel_initializer='he_uniform',
               kernel_constraint=unit_norm()),
        MaxPooling2D(pool_size=(3, 3)),
        BatchNormalization(),
        Conv2D(12, (3, 3),
               activation='elu',
               kernel_initializer='he_uniform',
               kernel_constraint=unit_norm()),
        MaxPooling2D(pool_size=(2, 2)),
        BatchNormalization(),
        Conv2D(8, (3, 3),
               activation='elu',
               kernel_initializer='he_uniform',
               kernel_constraint=unit_norm()),
        MaxPooling2D(pool_size=(2, 2)),
        BatchNormalization(),
        Flatten(),
        Dense(200,
              activation='elu',
              kernel_initializer='he_uniform',
              kernel_constraint=unit_norm()),
        BatchNormalization(),
        Dense(16,
              activation='elu',
              kernel_initializer='he_uniform',
              kernel_constraint=unit_norm()),
        BatchNormalization(),
        Dense(1, activation='sigmoid', kernel_initializer='he_uniform')
    ])

    if get_ideal_learning_rate:
        meteor_sort_learning_rate(model, train_dir, image_resolution,
                                  batch_size, epochs, steps_per_epoch)

    print(model.summary())
    optimizer = Adam(learning_rate=learning_rate)
    model.compile(optimizer=optimizer,
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    my_callback = MeteorSortCallback(train_set_threshold,
                                     validation_set_threshold, model,
                                     model_name, results_dir_weights)

    history = model.fit(train_generator,
                        validation_data=validation_generator,
                        steps_per_epoch=steps_per_epoch,
                        epochs=epochs,
                        validation_steps=validation_steps,
                        shuffle=True,
                        verbose=1,
                        callbacks=[my_callback])

    #  Print model performance and get performance measures

    if model_to_convert != "":
        # Load model to convert weights:
        model.load_weights(join(results_dir, model_to_convert))

        # Convert model to tflite:
        converter = lite.TFLiteConverter.from_keras_model(model)
        tflite_model = converter.convert()
        open("meteorLiteModel.tflite", "wb").write(tflite_model)

        # Get performance measures:
        get_performance_measures(model,
                                 train_dir,
                                 image_resolution,
                                 join(results_dir,
                                      'performance_' + model_name + '.txt'),
                                 threshold=0.50)

    # Plot Accuracy and Loss in both train and validation sets
    plot_acc_and_loss(history, results_dir, model_name[-5:])
Exemplo n.º 9
0
print(opt_layers)
model = tf.keras.Sequential()
for i in range(num_layers):
    if entropy:
        regularizer = entropy_reg(C)
    else:
        regularizer = regularizers.l1(C)

    if initializer == "orthogonal":
        keras_initializer_arg = "orthogonal"
    else:
        print("Using hypothesized optimal solution for layer %d initializer" %
              i)
        keras_initializer_arg = Constant(value=opt_layers[i])
    model.add(
        layers.Dense(n,
                     use_bias=False,
                     kernel_initializer=keras_initializer_arg,
                     kernel_constraint=constraints.unit_norm(axis=1),
                     kernel_regularizer=regularizer))

model.compile(optimizer='adam', loss='mse', metrics=['mae'])

Xtrain = np.random.normal(size=(ntrain, n))
Ytrain = np.matmul(Xtrain, had)

model.fit(Xtrain, Ytrain, epochs=epochs, batch_size=batch)

print("Solution:")
for i in range(num_layers):
    print(model.layers[i].get_weights())