Exemplo n.º 1
0
def create_model(trainable=True):
    image_input = Input(shape=(160, 120, 3))
    model = MobileNetV2(input_tensor=image_input, include_top=False, alpha=ALPHA, weights = None)
    last_layer = model.layers[-1].output
    
    x = GlobalAveragePooling2D()(last_layer)
    x = layers.GaussianDropout(0.3)(x)
    x = Dense(512, activation=layers.ELU(alpha=1.0), name='fc1')(x)
    x = layers.GaussianDropout(0.1)(x)
    x = Dense(64, activation=layers.ELU(alpha=1.0), name='fc2')(x)
    x = layers.GaussianDropout(0.05)(x)
    x = Dense(NUM_CLASSES, activation='linear', name='output')(x)
    
    return Model(inputs=model.input, outputs=x)
def build_dense_model(num_features_1, num_features_2):
    """ Simple two layer MLP """
    inputs_1 = layers.Input(shape=(num_features_1, ))
    output_1 = layers.GaussianDropout(0.1)(inputs_1)

    inputs_2 = layers.Input(shape=(None, num_features_2))
    output_2 = layers.Conv1D(filters=5,
                             kernel_size=4,
                             padding='same',
                             activation='relu')(inputs_2)
    output_2 = layers.Attention()([output_2, output_2])
    output_2 = layers.GlobalMaxPooling1D()(output_2)
    output_2 = layers.Dropout(0.1)(output_2)

    output = layers.concatenate([output_1, output_2])
    output = layers.Dense(64, activation='relu')(output)
    output = layers.BatchNormalization()(output)
    output = layers.Dropout(0.1)(output)
    output = layers.Dense(64, activation='relu')(output)
    output = layers.BatchNormalization()(output)
    output_actual = layers.Dense(NUM_BINS, activation='softmax')(output)
    output_median = layers.Dense(NUM_BINS, activation='softmax')(output)
    model = Model(inputs=[inputs_1, inputs_2],
                  outputs=[output_actual, output_median])

    model.compile(optimizer=Adam(), loss='categorical_crossentropy')

    return model
Exemplo n.º 3
0
    def forward_vec(self, hidden, out_size, name):
        # Always add some noise to spread outputs
        noisy = tfkl.GaussianDropout(0.2)(hidden, self.is_training)

        # Decode to partial state vec
        vec_1 = tfkl.Dense(out_size * 2, name=name + "_fc1_vec")(noisy)
        vec_1 = tf.nn.leaky_relu(vec_1, alpha=0.01, name=name + "_leak1_vec")
        vec_2 = tfkl.Dense(out_size + out_size // 2,
                           name=name + "_fc2_vec")(vec_1)
        vec_2 = tf.nn.leaky_relu(vec_2, alpha=0.01, name=name + "_leak2_vec")
        vec = tfkl.Dense(out_size, name=name + "_fc3_vec")(vec_2)
        return vec
Exemplo n.º 4
0
def dec(n_1=1024,n_2=512,r=0.12):
  model = tf.keras.Sequential()
  model.add(layers.InputLayer(input_shape=(IN_SHAPE)))

  model.add(layers.GaussianDropout((r)))

  model.add(layers.Dense(n_1,))

  model.add(layers.Dense(n_2,'selu'))

  model.add(layers.Dense(8192,))

  return model
Exemplo n.º 5
0
def build_model():
    model = keras.Sequential([
    layers.Dense(1550, activation=tf.nn.relu, kernel_initializer='normal', input_shape=[len(train_dataset.keys())]),
    layers.BatchNormalization(),
    layers.GaussianDropout(.7),  
    layers.Dense(700, activation=tf.nn.relu, kernel_initializer='normal'),
    layers.BatchNormalization(),
    layers.GaussianDropout(.5), 
    layers.Dense(700, activation=tf.nn.relu, kernel_initializer='normal'),
    layers.BatchNormalization(),        
    layers.GaussianDropout(.4),   
    layers.Dense(350, activation=tf.nn.relu, kernel_initializer='normal'),
    layers.BatchNormalization(),    
    layers.GaussianDropout(.2),   
    layers.Dense(1, activation='linear', kernel_initializer='normal')
    ])

    #optimizer = tf.keras.optimizers.RMSprop(0.0001)
    optimizer = tf.keras.optimizers.Adam(0.0001, decay=1e-5)
    model.compile(loss='mean_squared_error',
                optimizer=optimizer,
                metrics=['mean_absolute_error', 'mean_squared_error'])
    return model
Exemplo n.º 6
0
def build_dense_model(num_features):
    """ Simple two layer MLP """
    inputs = layers.Input(shape=(num_features, ))
    output = layers.GaussianDropout(0.1)(inputs)
    output = layers.Dense(64, activation='relu')(output)
    output = layers.BatchNormalization()(output)
    output = layers.Dropout(0.1)(output)
    output = layers.Dense(64, activation='relu')(output)
    output = layers.BatchNormalization()(output)
    output = layers.Dense(1)(output)
    model = Model(inputs=inputs, outputs=output)

    model.compile(optimizer=Adam(), loss='mean_squared_error')
    return model
Exemplo n.º 7
0
def build_dense_model(num_features):
    """ Simple two layer MLP """
    regression_target = layers.Input(shape=(1,), name='ground_truth')
    feature_input = layers.Input(shape=(num_features,), name='feature_input')
    encoder_output = layers.GaussianDropout(0.1)(feature_input)
    encoder_output = layers.Dense(64, activation='tanh', name='encoder_hidden')(encoder_output)
    encoder_output = layers.Dense(64, activation='tanh', name='encoder_hidden_2')(encoder_output)
    z_mean, z_log_var = layers.Dense(LATENT_DIM, name='z_mean')(encoder_output), \
                        layers.Dense(LATENT_DIM, name='z_log_var')(encoder_output)
    r_mean, r_log_var = layers.Dense(1, name='r_mean')(encoder_output), \
                        layers.Dense(1, name='r_log_var')(encoder_output)

    # Sample latent and regression target
    z = layers.Lambda(sampling, output_shape=(LATENT_DIM,), name='z')([z_mean, z_log_var])
    r = layers.Lambda(sampling, output_shape=(1,), name='r')([r_mean, r_log_var])

    # Latent generator
    pz_mean = layers.Dense(LATENT_DIM,
                           kernel_constraint=constraints.unit_norm(),
                           name='pz_mean')(r)

    encoder = Model([feature_input, regression_target],
                    [z_mean, z_log_var, z,
                     r_mean, r_log_var, r,
                     pz_mean],
                    name='encoder')

    latent_input = layers.Input(shape=(LATENT_DIM,), name='decoder_input')
    decoder_output = layers.Dense(64, activation='tanh', name='decoder_hidden')(latent_input)
    decoder_output = layers.Dense(64, activation='tanh', name='decoder_hidden_2')(decoder_output)
    decoder_output = layers.Dense(num_features, name='decoder_output')(decoder_output)

    decoder = Model(latent_input, decoder_output, name='decoder')

    encoder_decoder_output = decoder(encoder([feature_input, regression_target])[2])
    vae = Model([feature_input, regression_target], encoder_decoder_output, name='vae')

    # Manually write up losses
    reconstruction_loss = mse(feature_input, encoder_decoder_output)
    kl_loss = 1 + z_log_var - K.square(z_mean - pz_mean) - K.exp(z_log_var)
    kl_loss = -0.5 * K.sum(kl_loss, axis=-1)
    label_loss = tf.divide(0.5 * K.square(r_mean - regression_target), K.exp(r_log_var)) + 0.5 * r_log_var
    vae_loss = K.mean(reconstruction_loss + kl_loss + label_loss)
    vae.add_loss(vae_loss)
    vae.compile(optimizer=Adam())

    regressor = Model(feature_input, r_mean, name='regressor')

    return vae, regressor
Exemplo n.º 8
0
def make_model(weights=None):
    cb = EfficientNetB2(weights='imagenet',
                        include_top=False,
                        drop_connect_rate=0.4,
                        pooling='avg',
                        input_shape=(256, 256, 3))
    x = cb.output
    x = layers.GaussianDropout(0.3)(x)
    x = layers.Dense(2, activation='softmax', kernel_regularizer='l1_l2')(x)
    model = Model(cb.input, x)
    #model.summary()
    model.compile(optimizer=optimizers.Adam(1e-4),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])
    if weights:
        model.load_weights(weights)
    return model
def get_model():
    seq_inp = KL.Input(shape=(14, 2))

    x = KL.Conv1D(8, 3, activation="relu")(KL.GaussianDropout(0.1)(seq_inp))
    x = KL.Conv1D(32, 3, activation="relu", strides=3)(x)
    x = KL.Conv1D(128, 3, activation="relu")(x)

    x = KL.Flatten()(x)

    out1 = KL.Dense(3, activation="relu")(x)
    out1 = KL.Lambda(lambda x: K.cumsum(x, axis=1), name=targets[0])(out1)

    out2 = KL.Dense(3, activation="relu")(x)
    out2 = KL.Lambda(lambda x: K.cumsum(x, axis=1), name=targets[1])(out2)

    model = Model(inputs=seq_inp, outputs=[out1, out2])
    return model