예제 #1
0
파일: E_PbP.py 프로젝트: ptgregg/P5Anomaly
def make_regressor(n_hidden_layers=3,
                   hidden_layer_size=100,
                   activation='tanh',
                   learning_rate=0.001,
                   dropout_prob=0.0):
    # Inputs
    input_layer = Input(shape=(settings.n_features,))

    # Network
    hidden_layer = Dense(hidden_layer_size, activation=activation)(input_layer)
    if n_hidden_layers > 1:
        hidden_layer_ = build_hidden_layers(n_hidden_layers - 1,
                                            hidden_layer_size=hidden_layer_size,
                                            activation=activation,
                                            dropout_prob=dropout_prob)
        hidden_layer = hidden_layer_(hidden_layer)
    log_r_hat_layer = Dense(1, activation='linear')(hidden_layer)

    # Translate to s
    r_hat_layer = Lambda(lambda x: K.exp(x))(log_r_hat_layer)
    s_hat_layer = Lambda(lambda x: 1. / (1. + x))(r_hat_layer)

    # Combine outputs
    output_layer = Concatenate()([s_hat_layer, log_r_hat_layer])
    model = Model(inputs=[input_layer], outputs=[output_layer])

    # Compile model
    model.compile(loss=loss_function_ratio_regression,
                  metrics=metrics,
                  optimizer=optimizers.Adam(lr=learning_rate, clipnorm=10.))

    return model
예제 #2
0
def make_classifier_score(n_hidden_layers=3,
                          hidden_layer_size=100,
                          activation='tanh',
                          dropout_prob=0.0,
                          learn_log_r=False,
                          l2_regularization=0.001,
                          learning_rate=1.e-3,
                          lr_decay=0.):
    # Inputs
    input_layer = Input(shape=(settings.n_thetas_features, ))

    # Network
    hidden_layer = Dense(hidden_layer_size, activation=activation)(input_layer)
    if n_hidden_layers > 1:
        hidden_layer_ = build_hidden_layers(
            n_hidden_layers - 1,
            hidden_layer_size=hidden_layer_size,
            activation=activation,
            dropout_prob=dropout_prob)
        hidden_layer = hidden_layer_(hidden_layer)

    if learn_log_r:
        log_r_hat_layer = Dense(1, activation='linear')(hidden_layer)
        r_hat_layer = Lambda(lambda x: K.exp(x))(log_r_hat_layer)
        s_hat_layer = Lambda(lambda x: 1. / (1. + x))(r_hat_layer)

    else:
        s_hat_layer = Dense(1, activation='sigmoid')(hidden_layer)
        r_hat_layer = Lambda(lambda x: (1. - x) / x)(s_hat_layer)
        log_r_hat_layer = Lambda(lambda x: K.log(x))(r_hat_layer)

    # Score
    regularizer_layer = ActivityRegularization(
        l1=0., l2=l2_regularization)(log_r_hat_layer)
    gradient_layer = Lambda(lambda x: K.gradients(x[0], x[1])[0],
                            output_shape=(settings.n_thetas_features, ))(
                                [regularizer_layer, input_layer])
    score_layer = Lambda(lambda x: x[:, -settings.n_params:],
                         output_shape=(settings.n_params, ))(gradient_layer)

    # Combine outputs
    output_layer = Concatenate()([s_hat_layer, log_r_hat_layer, score_layer])
    model = Model(inputs=[input_layer], outputs=[output_layer])

    # Compile model
    model.compile(loss=loss_function_score,
                  metrics=metrics,
                  optimizer=optimizers.Adam(lr=learning_rate,
                                            decay=lr_decay,
                                            clipnorm=1.))

    return model
예제 #3
0
def make_combined_regressor(n_hidden_layers=3,
                            hidden_layer_size=100,
                            activation='tanh',
                            dropout_prob=0.0,
                            alpha=0.005,
                            learning_rate=1.e-3,
                            lr_decay=0.):
    # Inputs
    input_layer = Input(shape=(settings.n_thetas_features, ))

    # Network
    hidden_layer = Dense(hidden_layer_size, activation=activation)(input_layer)
    if n_hidden_layers > 1:
        hidden_layer_ = build_hidden_layers(
            n_hidden_layers - 1,
            hidden_layer_size=hidden_layer_size,
            activation=activation,
            dropout_prob=dropout_prob)
        hidden_layer = hidden_layer_(hidden_layer)
    log_r_hat_layer = Dense(1, activation='linear')(hidden_layer)

    # Translate to s
    r_hat_layer = Lambda(lambda x: K.exp(x))(log_r_hat_layer)
    s_hat_layer = Lambda(lambda x: 1. / (1. + x))(r_hat_layer)

    # Score
    gradient_layer = Lambda(lambda x: K.gradients(x[0], x[1])[0],
                            output_shape=(settings.n_thetas_features, ))(
                                [log_r_hat_layer, input_layer])
    score_layer = Lambda(lambda x: x[:, -settings.n_params:],
                         output_shape=(settings.n_params, ))(gradient_layer)

    # Combine outputs
    output_layer = Concatenate()([s_hat_layer, log_r_hat_layer, score_layer])
    model = Model(inputs=[input_layer], outputs=[output_layer])

    # Compile model
    model.compile(
        loss=lambda x, y: loss_function_combinedregression(x, y, alpha=alpha),
        metrics=metrics,
        optimizer=optimizers.Adam(lr=learning_rate,
                                  decay=lr_decay,
                                  clipnorm=10.))
    return model
예제 #4
0
파일: E_PbP.py 프로젝트: ptgregg/P5Anomaly
def make_classifier(n_hidden_layers=3,
                    hidden_layer_size=100,
                    activation='tanh',
                    dropout_prob=0.0,
                    learning_rate=0.001,
                    learn_log_r=False):
    # Inputs
    input_layer = Input(shape=(settings.n_features,))

    # Network
    hidden_layer = Dense(hidden_layer_size, activation=activation)(input_layer)
    if n_hidden_layers > 1:
        hidden_layer_ = build_hidden_layers(n_hidden_layers - 1,
                                            hidden_layer_size=hidden_layer_size,
                                            activation=activation,
                                            dropout_prob=dropout_prob)
        hidden_layer = hidden_layer_(hidden_layer)

    if learn_log_r:
        log_r_hat_layer = Dense(1, activation='linear')(hidden_layer)
        r_hat_layer = Lambda(lambda x: K.exp(x))(log_r_hat_layer)
        s_hat_layer = Lambda(lambda x: 1. / (1. + r_hat_layer))(log_r_hat_layer)

    else:
        s_hat_layer = Dense(1, activation='sigmoid')(hidden_layer)
        r_hat_layer = Lambda(lambda x: (1. - x) / x)(s_hat_layer)
        log_r_hat_layer = Lambda(lambda x: K.log(x))(r_hat_layer)

    # Combine outputs
    output_layer = Concatenate()([s_hat_layer, log_r_hat_layer])
    model = Model(inputs=[input_layer], outputs=[output_layer])

    # Compile model
    model.compile(loss=loss_function_carl,
                  metrics=metrics,
                  optimizer=optimizers.Adam(lr=learning_rate, clipnorm=1.))

    return model
예제 #5
0
def make_regressor_morphingaware(n_hidden_layers=2,
                                 hidden_layer_size=100,
                                 activation='tanh',
                                 dropout_prob=0.0,
                                 factor_out_sm=True,
                                 epsilon=1.e-4,
                                 learning_rate=1.e-3,
                                 lr_decay=0.):
    # Inputs
    input_layer = Input(shape=(settings.n_thetas_features, ))
    x_layer = Lambda(lambda x: x[:, :settings.n_features],
                     output_shape=(settings.n_features, ))(input_layer)
    theta_layer = Lambda(lambda x: x[:, -settings.n_params:],
                         output_shape=(settings.n_params, ))(input_layer)

    # Morphing weights
    wtilde_layer = generate_wtilde_layer(theta_layer)
    wi_layer = generate_wi_layer(wtilde_layer)

    # Log ratio estimator for SM
    if factor_out_sm:
        hidden_layer = Dense(hidden_layer_size,
                             activation=activation)(input_layer)
        if n_hidden_layers > 1:
            hidden_layer_ = build_hidden_layers(
                n_hidden_layers - 1,
                hidden_layer_size=hidden_layer_size,
                activation=activation,
                dropout_prob=dropout_prob)
            hidden_layer = hidden_layer_(hidden_layer)
        log_r0_hat_layer = Dense(1, activation='linear')(hidden_layer)
        r0_hat_layer = Lambda(lambda x: K.exp(x))(log_r0_hat_layer)

    # Log ratio estimators for each component
    ri_hat_layers = []
    for i in range(settings.n_morphing_samples):
        hidden_layer = Dense(hidden_layer_size, activation=activation)(x_layer)
        if n_hidden_layers > 1:
            hidden_layer_ = build_hidden_layers(
                n_hidden_layers - 1,
                hidden_layer_size=hidden_layer_size,
                activation=activation,
                dropout_prob=dropout_prob)
            hidden_layer = hidden_layer_(hidden_layer)

        if factor_out_sm:
            delta_ri_hat_layer = Dense(1, activation='linear')(hidden_layer)
            ri_hat_layer = Add()([delta_ri_hat_layer, r0_hat_layer])

        else:
            log_ri_hat_layer = Dense(1, activation='linear')(hidden_layer)
            ri_hat_layer = Lambda(lambda x: K.exp(x))(log_ri_hat_layer)

        ri_hat_layers.append(Reshape((1, ))(ri_hat_layer))
    ri_hat_layer = Concatenate()(ri_hat_layers)

    # Combine, clip, transform to \hat{s}
    wi_ri_hat_layer = Multiply()([wi_layer, ri_hat_layer])
    r_hat_layer = Reshape(
        (1, ))(Lambda(lambda x: K.sum(x, axis=1))(wi_ri_hat_layer))
    positive_r_hat_layer = Activation('relu')(r_hat_layer)

    # Translate to s
    s_hat_layer = Lambda(lambda x: 1. / (1. + x))(positive_r_hat_layer)

    # Score
    log_r_hat_layer = Lambda(lambda x: K.log(x + epsilon))(
        positive_r_hat_layer)
    gradient_layer = Lambda(lambda x: K.gradients(x[0], x[1])[0],
                            output_shape=(settings.n_thetas_features, ))(
                                [log_r_hat_layer, input_layer])
    score_layer = Lambda(lambda x: x[:, -settings.n_params:],
                         output_shape=(settings.n_params, ))(gradient_layer)

    # Combine outputs
    output_layer = Concatenate()(
        [s_hat_layer, log_r_hat_layer, score_layer, wi_layer, ri_hat_layer])
    model = Model(inputs=[input_layer], outputs=[output_layer])

    # Compile model
    model.compile(loss=loss_function_ratio_regression,
                  metrics=metrics,
                  optimizer=optimizers.Adam(lr=learning_rate,
                                            decay=lr_decay,
                                            clipnorm=10.))

    return model