示例#1
0
def get_autoencoder(input_dimension):
    input_layer = Input(shape=(input_dimension, ))
    autoencoder = input_layer

    step = input_dimension * 10
    steps = [step]
    while (step > input_dimension):
        step = int(step / 3)
        steps.append(step)

    for value in steps:
        autoencoder = Dense(value, activation="relu")(autoencoder)

    steps.pop()
    steps = steps[::-1]

    for value in steps:
        autoencoder = Dense(value, activation="relu")(autoencoder)

    autoencoder = Dense(input_dimension, activation="relu")(autoencoder)
    autoencoder = Model(inputs=input_layer, outputs=autoencoder)
    autoencoder.compile(optimizer="rmsprop", loss="mean_squared_error")
    autoencoder.summary()

    return autoencoder
示例#2
0
def InceptionModel(x_train):
    X_input = Input(x_train.shape)

    X = Cropping2D(cropping=((60, 25), (0, 0)))(X_input)

    # Re-sizes the input with Kera's Lambda layer & attach to cifar_input
    X = Lambda(lambda image: tf.image.resize(image, (input_size, input_size)))(
        X)

    # Feeds the re-sized input into Inception model
    inp = inception(X)

    model = GlobalAveragePooling2D(data_format=None)(
        inception.get_output_at(-1))
    model = Dense(240)(model)
    model = Dense(64)(model)
    predictions = Dense(1, activation='relu')(model)

    # Creates the model, assuming your final layer is named "predictions"
    model = Model(inputs=X_input, outputs=predictions)
    # Compile the model
    model.compile(optimizer='Adam', loss='mse', metrics=['mse'])

    # Check the summary of this new model to confirm the architecture
    model.summary()
    return model
示例#3
0
def build_model(input_shape,
                loss="categorical_crossentropy",
                learning_rate=0.0001):
    """Construction d'un réseau neuronal à l'aide de keras.

    : param input_shape (tuple): Forme du df représentant un data train.
    : param loss (str): fonction de perte à utiliser
    : param learning_rate (float):
    : modèle de retour: modèle TensorFlow
    """

    # LE réseau

    inp = Input(shape=(input_shape, ))
    model = Dense(1024, activation='relu')(inp)
    model = Dropout(0.3)(model)
    model = Dense(512, activation='relu')(model)
    model = Dropout(0.2)(model)
    model = Dense(256, activation='relu')(model)
    model = Dropout(0.2)(model)
    model = Dense(128, activation='relu')(model)
    model = Dropout(0.2)(model)
    model = Dense(64, activation='relu')(model)
    model = Dropout(0.2)(model)
    model = Dense(32, activation='relu')(model)
    model = Dropout(0.2)(model)
    model = Dense(10, activation='softmax')(model)

    model = Model(inputs=inp, outputs=model)
    model.compile(optimizer='adam', loss=loss, metrics=['accuracy'])

    # print summary du modèle
    model.summary()
    return model
def mep():
    y = Dense(4, activation="relu", name="d")(inputep)
    y = Dense(16, activation="relu", name="de")(y)
    y = Dense(256, activation="relu", name="dee")(y)
    y = Model(inputs=inputep, outputs=y)
    print(y.summary())
    return y
def get_model(input_shape):

    base_model = tf.keras.applications.MobileNetV2(input_shape=input_shape,
                                                   include_top=False,
                                                   weights='imagenet')

    model = GlobalAveragePooling2D()(base_model.output)
    model = Dense(EMBEDDING_SIZE, use_bias=USE_BIAS)(model)
    model = BatchNormalization()(model)
    model = Activation("relu")(model)

    predictions_a = Dense(units=1,
                          kernel_initializer=WEIGHT_INIT,
                          use_bias=USE_BIAS,
                          activation="sigmoid",
                          name='age')(model)
    predictions_g = Dense(units=2,
                          kernel_initializer=WEIGHT_INIT,
                          use_bias=USE_BIAS,
                          activation="softmax",
                          name='gender')(model)

    model = Model(inputs=base_model.input,
                  outputs=[predictions_a, predictions_g])
    model.summary()

    opt = Adam(lr=0.001)
    model.compile(optimizer=opt,
                  loss={
                      "age": "mse",
                      "gender": "categorical_crossentropy",
                  },
                  loss_weights={
                      "age": 1,
                      "gender": 1,
                  },
                  metrics={
                      'age': 'mse',
                      'gender': 'acc'
                  })

    return model
示例#6
0
    def init_model(self, load=True):
        if load and self.filename and exists(self.filename):
            logger.info("DNN: pretrained model")
            self.model = load_model(self.filename)
            self.loaded = True
            return
        self.loaded = False

        h = self.hypers
        dims = self.rhs.shape[1]
        input = Input(shape=(dims, ))
        m = input
        last_dim = dims
        for i in range(h.layers):
            d = math.ceil(h[f"l{i}"] * last_dim)
            last_dim = d
            kwargs = dict(activation=h.act)
            kwargs['kernel_initializer'] = 'glorot_uniform' \
                if h.act == 'tanh' else 'he_uniform'
            m = Dense(d, **kwargs)(m)
            # Don't apply batchnorm to the last hidden layer
            if h.bn and i < h.layers - 1:
                m = BatchNormalization()(m)
        m = Dense(1, activation='linear')(m)
        m = Model(input, m)
        # http://zerospectrum.com/2019/06/02/mae-vs-mse-vs-rmse/
        # MAE because we _want_ outliers (user score adjustments)
        opt = Nadam(learning_rate=h.lr) if h.opt == 'nadam'\
            else Adam(learning_rate=h.lr, amsgrad=True) if 'amsgrad'\
            else SGD(lr=h.lr, momentum=0.9, decay=0.01, nesterov=True)
        m.compile(
            loss=h.loss,
            optimizer=opt,
        )
        m.summary()
        self.model = m
示例#7
0
def get_model(input_shape):
    base_model = tf.keras.applications.MobileNetV2(input_shape=input_shape,
                                                   include_top=False,
                                                   weights='imagenet')

    model = GlobalAveragePooling2D()(base_model.output)
    model = Dense(EMBEDDING_SIZE,
                  kernel_initializer=WEIGHT_INIT,
                  use_bias=USE_BIAS)(model)
    model = BatchNormalization()(model)
    model = Activation("relu")(model)

    p_age = Dense(
        units=1,
        kernel_initializer=WEIGHT_INIT,
        use_bias=USE_BIAS,
        activation="tanh",
        name='age')(model)
    p_gender = Dense(
        units=2,
        kernel_initializer=WEIGHT_INIT,
        use_bias=USE_BIAS,
        activation="sigmoid",
        name='gender')(model)
    p_beard = Dense(
        units=2,
        kernel_initializer=WEIGHT_INIT,
        use_bias=USE_BIAS,
        activation="sigmoid",
        name='beard')(model)
    p_eyes_glasses = Dense(
        units=2,
        kernel_initializer=WEIGHT_INIT,
        use_bias=USE_BIAS,
        activation="sigmoid",
        name='eyes_glasses')(model)
    p_eyes_open = Dense(
        units=2,
        kernel_initializer=WEIGHT_INIT,
        use_bias=USE_BIAS,
        activation="sigmoid",
        name='eyes_open')(model)
    p_mouth_open = Dense(
        units=2,
        kernel_initializer=WEIGHT_INIT,
        use_bias=USE_BIAS,
        activation="sigmoid",
        name='mouth_open')(model)
    p_mustache = Dense(
        units=2,
        kernel_initializer=WEIGHT_INIT,
        use_bias=USE_BIAS,
        activation="sigmoid",
        name='mustache')(model)
    p_sunglasses = Dense(
        units=2,
        kernel_initializer=WEIGHT_INIT,
        use_bias=USE_BIAS,
        activation="sigmoid",
        name='sunglasses')(model)
    p_expression = Dense(
        units=8,
        kernel_initializer=WEIGHT_INIT,
        use_bias=USE_BIAS,
        activation="softmax",
        name='expression')(model)

    model = Model(inputs=base_model.input, outputs=[p_age, p_gender, p_beard, p_eyes_glasses, p_eyes_open,
                                                    p_mouth_open, p_mustache, p_sunglasses, p_expression])
    model.summary()

    opt = Adam(lr=0.001)
    model.compile(
        optimizer=opt,
        loss={
            "age": "mae",
            "gender": "binary_crossentropy",
            "beard": "categorical_crossentropy",
            "eyes_glasses": "categorical_crossentropy",
            "eyes_open": "categorical_crossentropy",
            "mouth_open": "categorical_crossentropy",
            "mustache": "categorical_crossentropy",
            "sunglasses": "categorical_crossentropy",
            "expression": "categorical_crossentropy",
        },
        loss_weights={
            "age": 1,
            "gender": 1,
            "beard": 1,
            "eyes_glasses": 1,
            "eyes_open": 1,
            "mouth_open": 1,
            "mustache": 1,
            "sunglasses": 1,
            "expression": 1,
        },
        metrics={
            'age': 'mae',
            'gender': 'binary_accuracy',
            "beard": 'categorical_accuracy',
            "eyes_glasses": 'categorical_accuracy',
            "eyes_open": 'categorical_accuracy',
            "mouth_open": 'categorical_accuracy',
            "mustache": 'categorical_accuracy',
            "sunglasses": 'categorical_accuracy',
            "expression": 'categorical_accuracy'
        })

    return model