コード例 #1
0
def build_UMAL(input_size,
               architecture,
               learning_rate=0.001,
               training=True,
               mu_act='linear',
               b_act=lambda x: elu_modif(x, shift=0., epsilon=1e-15)):
    """
    UMAL building model process.
    
    If training = True, the returned model is the keras trainable model given any DL architecture.
    If training = False, the returned graph is an inference TensorFlow model.
    
    mu_act is the activation function for the condioned position mu parameter of the ALD.
    b_act is the activation function for the conditioned scale b parameter of the ALD.
    
    Requirements: Given any set of hidden layers, noted as NN, the parameter 'architecture' has 
     to be a function with two inputs and returning the last hidden output as follows
    
        def template_architecture(input, name):
            penultimate_output = NN(input,name=name)
            return penultimate_output
        
    return: the built custom Keras model
    
    """

    general_type = K.floatx()
    i = Input(name='input', shape=(input_size, ), dtype=general_type)

    if training:
        n_taus = K.variable(1, dtype=general_type.replace('float', 'int'))
        inputs = Lambda(lambda x: K.reshape(K.repeat(x, n_taus),
                                            shape=[-1, i.shape[-1]]))(i)
        tau = Lambda(lambda x: K.random_uniform(shape=[K.shape(inputs)[0], 1],
                                                minval=1e-2,
                                                maxval=1. - 1e-2,
                                                dtype=general_type))(inputs)
    else:
        sel_taus = tf.placeholder(general_type, shape=[None])
        tau = Lambda(lambda i: K.reshape(
            K.permute_dimensions(K.repeat(K.reshape(sel_taus, [-1, 1]),
                                          K.shape(i)[0]),
                                 pattern=(1, 0, 2)), (-1, 1)))(i)
        inputs = Lambda(lambda i: K.reshape(K.repeat(i,
                                                     K.shape(sel_taus)[0]),
                                            shape=[-1, i.shape[-1]]))(i)

    it = concatenate([inputs, tau], axis=1, name='input_concat_1')
    model = architecture(it, name='_hiden_layers')
    mu = Dense(units=1, activation=mu_act, name='l_mu')(model)
    b = Dense(units=1, activation=b_act, name='l_b')(model)
    model_output = concatenate([mu, b, tau], axis=1, name='main_output')
    model = Model(inputs=[i], outputs=[model_output])

    if training:
        model.n_taus = n_taus
    else:
        model.taus = sel_taus
        n_taus = K.shape(sel_taus)[0]

    opt = Adam(lr=learning_rate)
    model.compile(
        optimizer=opt,
        loss=(lambda y, p: umal_log_pdf(y, p, general_type, n_taus=n_taus)))
    model._hp = {
        'input_size': input_size,
        'architecture': architecture,
        'learning_rate': learning_rate,
        'training': training,
        'mu_act': mu_act,
        'b_act': b_act
    }
    return model