コード例 #1
0
def construct_loss(X, Y, dic, Num, Model_prior, D):
    '''
    This is the function calculating the loss function.
    
    X,Y : data, they are place holders
    dic : this is the ordered dictionally which contains placeholders of U
    Num : how many placeholders are included in dic
    Model_prior : what kind of model we use
    D : how many parameters are in the model
    '''
    F = make_trials(D)
    loss = 0
    for i in range(0, Num * D, D):

        U = [dic["U_" + str(i + j)] for j in range(D)]
        _W = [f(u) for f, u in zip(F, U)]

        #variable transformation
        W = [minus_inf_inf_to_zero_one(__w) for __w in _W]
        raw_params = _W
        #Variable transoformation's Log_det_Jacobian
        minus_Log_det_J = 0
        for i in raw_params:
            minus_Log_det_J += -tf.reduce_sum(
                minus_inf_inf_to_zero_one_Log_det_J(i), 1)
        #likelihood and prior calculation
        #if you want to consider other than logistic regression, change the model and prior
        params = parameter_reshaper(W, b, data_dim, Num_of_hidden)
        Minus_Log_likelihood, Minus_Prior = Model_prior(X, Y, params)

        Posteri_numerator = Minus_Log_likelihood[:,
                                                 None] + Minus_Prior + minus_Log_det_J[:,
                                                                                       None]

        dp_du = [tf.gradients(Posteri_numerator, u)[0] for u in U]

        d2w_du2 = [
            tf.gradients(tf.log(tf.abs(tf.gradients(_p, __u)[0])), __u)[0]
            for _p, __u in zip(raw_params, U)
        ]

        _G = [(i - j)**2 for i, j in zip(d2w_du2, dp_du)]
        L_0 = tf.reduce_sum(_G)

    loss += L_0
    return loss, F
コード例 #2
0
def construct_loss(dic,Num,Model_prior,D):
    '''
    This is the function calculating the loss function.
    
    X,Y : data, they are place holders
    dic : this is the ordered dictionally which contains placeholders of U
    Num : how many placeholders are included in dic
    Model_prior : what kind of model we use
    D : how many parameters are in the model
    '''
    F=make_trials(D)
    loss = 0
    for  i in range(0,Num*D,D):
        
        U=[dic["U_"+ str(i+j)] for j in range (D)]
        _W=[f(u) for f,u in zip(F,U)]

        #variable transformation        
        W=[minus_inf_inf_to_zero_one(__w) for __w in _W]        
        raw_params=_W
        #Variable transoformation's Log_det_Jacobian
        minus_Log_det_J=0
        for i in raw_params:
            minus_Log_det_J+=tf.log(tf.abs(tf.reduce_sum(minus_inf_inf_to_zero_one_Log_det_J(i),1)))
        #likelihood and prior calculation
        #if you want to consider other than logistic regression, change the model and prior
        Log_joint=-0.5*(W[0]**2)

        Posteri_numerator=Log_joint+minus_Log_det_J[:,None]+np.log(1/np.sqrt((2*np.pi*np.square(1.))))

        
        dw_du=[tf.log(tf.abs(tf.gradients(_p,__u)[0])) for _p,__u in zip(raw_params,U)]
        
        #_G=[(i+j-1)**2 for i,j in zip(dw_du,Posteri_numerator)]
        _G=(dw_du[0]+Posteri_numerator)**2
        L_0=tf.reduce_sum(_G)
        
    loss +=L_0
    return loss,F
    Posteri_numerator = Log_likelihood[:, None] + Prior + minus_Log_det_J[:,
                                                                          None]

    dp_du = [tf.gradients(Posteri_numerator, u)[0] for u in U]

    d2w_du2 = [
        tf.gradients(tf.log(tf.abs(tf.gradients(_p, __u)[0])), __u)[0]
        for _p, __u in zip(raw_params, U)
    ]

    _G = [(i + j)**2 for i, j in zip(d2w_du2, dp_du)]
    L_0 = tf.reduce_sum(_G)
    return L_0


F = make_trials(D)


def each_loss(U):
    f = _each_loss(X, Y, Num, Logistic_regression_Model_prior, D, F, U)
    return f


U_B = tf.constant([0.])[:, None]
U_B2 = tf.constant([1.])[:, None]

_w_t = F[0](U_B)
_w_t2 = F[0](U_B2)

loss = _loop(each_loss, 50, 3) + _w_t**2 + (_w_t2 - 1)**2
'''