def _each_loss(X, Y, Num, Model_prior, D, F, U):
    #U=[tf.contrib.distributions.Uniform().sample(sample_shape=(1,1)) for j in range (D)]
    _W = [f(u) for f, u in zip(F, U)]

    #variable transformation
    W = [minus_inf_inf_to_zero_one(__w) for __w in _W]
    raw_params = _W
    #Variable transoformation's Log_det_Jacobian
    minus_Log_det_J = 0
    for i in raw_params:
        minus_Log_det_J += tf.log(
            tf.abs(tf.reduce_sum(minus_inf_inf_to_zero_one_Log_det_J(i), 1)))
    #likelihood and prior calculation
    #if you want to consider other than logistic regression, change the model and prior
    Log_likelihood, Prior, _ = Model_prior(X, Y, W)

    Posteri_numerator = Log_likelihood[:, None] + Prior + minus_Log_det_J[:,
                                                                          None]

    dp_du = [tf.gradients(Posteri_numerator, u)[0] for u in U]

    d2w_du2 = [
        tf.gradients(tf.log(tf.abs(tf.gradients(_p, __u)[0])), __u)[0]
        for _p, __u in zip(raw_params, U)
    ]

    _G = [(i + j)**2 for i, j in zip(d2w_du2, dp_du)]
    L_0 = tf.reduce_sum(_G)
    return L_0
def _each_loss(X, Y, Num, Model_prior, D, F, U):
    _W = [f(u) for f, u in zip(F, U)]

    #variable transformation
    W = [minus_inf_inf_to_zero_one(__w) for __w in _W]
    raw_params = _W
    #Variable transoformation's Log_det_Jacobian
    minus_Log_det_J = 0
    for i in raw_params:
        minus_Log_det_J += -tf.reduce_sum(
            minus_inf_inf_to_zero_one_Log_det_J(i), 1)
    #likelihood and prior calculation
    #if you want to consider other than logistic regression, change the model and prior

    params = parameter_reshaper(W, b, data_dim, Num_of_hidden)
    Minus_Log_likelihood, Minus_Prior = Model_prior(X, Y, params)

    Posteri_numerator = Minus_Log_likelihood[:,
                                             None] + Minus_Prior + minus_Log_det_J[:,
                                                                                   None]

    dp_du = tf.gradients(Posteri_numerator, U)

    d2w_du2 = [
        tf.gradients(tf.log(tf.abs(tf.gradients(_p, __u)[0])), __u)[0]
        for _p, __u in zip(raw_params, U)
    ]

    _G = [(i - j)**2 for i, j in zip(d2w_du2, dp_du)]
    L_0 = tf.reduce_sum(_G)
    return L_0
def construct_loss(X, Y, dic, Num, Model_prior, D):
    '''
    This is the function calculating the loss function.
    
    X,Y : data, they are place holders
    dic : this is the ordered dictionally which contains placeholders of U
    Num : how many placeholders are included in dic
    Model_prior : what kind of model we use
    D : how many parameters are in the model
    '''
    F = make_trials(D)
    loss = 0
    for i in range(0, Num * D, D):

        U = [dic["U_" + str(i + j)] for j in range(D)]
        _W = [f(u) for f, u in zip(F, U)]

        #variable transformation
        W = [minus_inf_inf_to_zero_one(__w) for __w in _W]
        raw_params = _W
        #Variable transoformation's Log_det_Jacobian
        minus_Log_det_J = 0
        for i in raw_params:
            minus_Log_det_J += -tf.reduce_sum(
                minus_inf_inf_to_zero_one_Log_det_J(i), 1)
        #likelihood and prior calculation
        #if you want to consider other than logistic regression, change the model and prior
        params = parameter_reshaper(W, b, data_dim, Num_of_hidden)
        Minus_Log_likelihood, Minus_Prior = Model_prior(X, Y, params)

        Posteri_numerator = Minus_Log_likelihood[:,
                                                 None] + Minus_Prior + minus_Log_det_J[:,
                                                                                       None]

        dp_du = [tf.gradients(Posteri_numerator, u)[0] for u in U]

        d2w_du2 = [
            tf.gradients(tf.log(tf.abs(tf.gradients(_p, __u)[0])), __u)[0]
            for _p, __u in zip(raw_params, U)
        ]

        _G = [(i - j)**2 for i, j in zip(d2w_du2, dp_du)]
        L_0 = tf.reduce_sum(_G)

    loss += L_0
    return loss, F
示例#4
0
def construct_loss(X, Y, F, dic, Num, Model_prior, D):
    '''
    This is the function calculating the loss function.
    
    X,Y : data, they are place holders
    dic : this is the ordered dictionally which contains placeholders of U
    Num : how many placeholders are included in dic
    Model_prior : what kind of model we use
    D : how many parameters are in the model
    '''

    loss = 0
    for i in range(0, Num):

        U = dic["U_" + str(i)]

        _W, sigma_list = F(U)
        _W = tf.nn.sigmoid(_W)
        #variable transformation
        W = minus_inf_inf_to_zero_one(_W)

        #Variable transoformation's Log_det_Jacobian
        Log_det_J = tf.log(
            tf.abs(tf.reduce_sum(minus_inf_inf_to_zero_one_Log_det_J(_W), 1)))
        #likelihood and prior calculation
        #if you want to consider other than logistic regression, change the model and prior
        Log_likelihood, Prior = Model_prior(X, Y, W)

        Posteri_numerator = Log_likelihood[:, None] + Prior + Log_det_J[:,
                                                                        None]

        dp_du = tf.gradients(Posteri_numerator, U)[0]

        log_det_J = tf.reduce_sum(tf.log(sigma_list))

        d2w_du2 = tf.gradients(log_det_J, U)[0]

        L_0 = tf.reduce_sum((d2w_du2 + dp_du)**2)

        loss += L_0
    return loss
示例#5
0
def construct_loss(dic,Num,Model_prior,D):
    '''
    This is the function calculating the loss function.
    
    X,Y : data, they are place holders
    dic : this is the ordered dictionally which contains placeholders of U
    Num : how many placeholders are included in dic
    Model_prior : what kind of model we use
    D : how many parameters are in the model
    '''
    F=make_trials(D)
    loss = 0
    for  i in range(0,Num*D,D):
        
        U=[dic["U_"+ str(i+j)] for j in range (D)]
        _W=[f(u) for f,u in zip(F,U)]

        #variable transformation        
        W=[minus_inf_inf_to_zero_one(__w) for __w in _W]        
        raw_params=_W
        #Variable transoformation's Log_det_Jacobian
        minus_Log_det_J=0
        for i in raw_params:
            minus_Log_det_J+=tf.log(tf.abs(tf.reduce_sum(minus_inf_inf_to_zero_one_Log_det_J(i),1)))
        #likelihood and prior calculation
        #if you want to consider other than logistic regression, change the model and prior
        Log_joint=-0.5*(W[0]**2)

        Posteri_numerator=Log_joint+minus_Log_det_J[:,None]+np.log(1/np.sqrt((2*np.pi*np.square(1.))))

        
        dw_du=[tf.log(tf.abs(tf.gradients(_p,__u)[0])) for _p,__u in zip(raw_params,U)]
        
        #_G=[(i+j-1)**2 for i,j in zip(dw_du,Posteri_numerator)]
        _G=(dw_du[0]+Posteri_numerator)**2
        L_0=tf.reduce_sum(_G)
        
    loss +=L_0
    return loss,F
示例#6
0
        loss += L_0
    return loss


F = MADE_NN(3, 8)
#F=make_trials(3,MF=False)

loss = construct_loss(X, Y, F, dictU, Num, Logistic_regression_Model_prior, D)

optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)

Ut = tf.contrib.distributions.Uniform().sample(sample_shape=(300, D))
_Wt, _ = F(Ut)

Wt = minus_inf_inf_to_zero_one(_Wt)
model_output_t = Logistic_regression_Model_output(X, Wt)

A = tf.reduce_mean(tf.sigmoid(model_output_t), 1)
prediction = tf.round(A)
predictions_correct = tf.cast(tf.equal(prediction[:, None], Y), tf.float32)
accuracy = tf.reduce_mean(predictions_correct)

####sample
_Us = tf.contrib.distributions.Uniform().sample(sample_shape=(10000, D))
_Ws, _ = F(_Us)
Ws = minus_inf_inf_to_zero_one(_Ws)

if True:
    sess = tf.InteractiveSession()
    #writer = tf.summary.FileWriter('./graphs/logistic_reg', sess.graph)
loss = _loop(each_loss, 50, 3) + _w_t**2 + (_w_t2 - 1)**2
'''
loss=0
for _ in range(20):
    _U=tf.contrib.distributions.Uniform().sample(sample_shape=(1,D))
    U=tf.split(axis = 1, num_or_size_splits = D, value = _U)
    loss+=each_loss(U)
'''

# Actual Prediction for test data
_Ut = tf.contrib.distributions.Uniform().sample(sample_shape=(300, D))
Ut = tf.split(axis=1, num_or_size_splits=D, value=_Ut)

_w_t = [f(u) for f, u in zip(F, Ut)]

Wt = [minus_inf_inf_to_zero_one(__w) for __w in _w_t]
model_output_t = Logistic_regression_Model_output(X, Wt)

A = tf.reduce_mean(tf.sigmoid(model_output_t), 1)
prediction = tf.round(A)
predictions_correct = tf.cast(tf.equal(prediction[:, None], Y), tf.float32)
accuracy = tf.reduce_mean(predictions_correct)

optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)

####sample
_Us = tf.contrib.distributions.Uniform().sample(sample_shape=(10000, D))
Us = tf.split(axis=1, num_or_size_splits=D, value=_Us)

_w_s = [f(u) for f, u in zip(F, Us)]
U_B = tf.placeholder(tf.float32, [None, 1], name='U_B_placeholder')
U_B2 = tf.placeholder(tf.float32, [None, 1], name='U_B2_placeholder')
BC0_data = np.array([[0.]])
BC1_data = np.array([[1.]])

_loss, F = construct_loss(dictU, Num, MoG, D)

_w1_B0 = F[0](U_B)
_w1_B1 = F[0](U_B2)

loss = _loss + _w1_B0**2 + (_w1_B1 -
                            1.)**2  #+tf.losses.get_regularization_loss()

_Z = F[0](Ut)
Z = minus_inf_inf_to_zero_one(_Z)

optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)

with tf.Session() as sess:
    #writer = tf.summary.FileWriter('./graphs/logistic_reg', sess.graph)

    start_time = time.time()
    sess.run(tf.global_variables_initializer())
    n_batches = 50  #int(N_train/batch_size)
    U_batches = 50  #int(Batch_U/minibatch_U)
    for i in range(1):  # train the model n_epochs times
        print(i)
        for _ in range(500):
            total_loss = 0
            for _ in range(U_batches):