コード例 #1
0
ファイル: gamma_model.py プロジェクト: uZeroJ/DRSA
    def __init__(self, lr_1, lr_2, l2_loss_weight, batch_size, dimension, theta0, util_train, util_test, campaign):
        self.lr_1 = lr_1
        self.lr_2 = lr_2
        self.util_train = util_train
        self.util_test = util_test
        self.train_data_amt = util_train.get_data_amt()
        self.test_data_amt = util_test.get_data_amt()
        self.batch_size = batch_size
        self.batch_num = int(self.train_data_amt / self.batch_size)
        self.l2_loss_weight = l2_loss_weight
        self.campaign = campaign

        # output directory
        model_name = "{0}_{1}_{2}_{3}".format(self.lr_1, self.lr_2, self.l2_loss_weight, self.batch_size)
        self.output_dir = 'output/gamma/{}/{}/'.format(campaign, model_name)
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)

        # reset graph
        tf.reset_default_graph()

        # placeholders
        self.X = tf.sparse_placeholder(tf.float64)
        self.z = tf.placeholder(tf.float64)
        self.b = tf.placeholder(tf.float64)
        self.y = tf.placeholder(tf.float64)

        # trainable variables
        self.theta = tf.Variable([theta0], name = 'theta', dtype=tf.float64)
        # tf.reshape(self.theta, [1, 1])

        all_train_data = self.util_train.get_all_data_origin()
        self.init_ks_value = all_train_data[3] * all_train_data[2] / theta0 + (1 - all_train_data[3]) * all_train_data[1] / theta0
        self.ks = tf.Variable(self.init_ks_value, name='ks', dtype=tf.float64)
        self.w = tf.Variable(initial_value=tf.truncated_normal(shape=[dimension, 1], dtype=tf.float64), name='w')
        # computation graph phase1
        self.ps = tf.pow(self.z, (self.ks - 1.)) * tf.exp(-self.z / self.theta) \
             / tf.exp(tf.lgamma(self.ks)) / tf.pow(self.theta, self.ks)
        self.cs = tf.igamma(self.ks, self.b / self.theta) / tf.exp(tf.lgamma(self.ks))

        self.loss_win = tf.log(tf.clip_by_value(self.ps, 1e-8, 1.0))
        self.loss_lose = tf.log(tf.clip_by_value(1 - self.cs, 1e-8, 1.0))
        self.loss_phase1 = -tf.reduce_mean(self.y * self.loss_win + (1 - self.y) * self.loss_lose)
        self.optimizer1 = tf.train.GradientDescentOptimizer(self.lr_1)
        self.train_step1 = self.optimizer1.minimize(self.loss_phase1)

        # phase 2
        self.label_phase2 = tf.placeholder(tf.float64)
        self.log_label_phase2 = tf.log(tf.clip_by_value(self.label_phase2, 1e-8, 1.0))
        self.loss_phase2 = tf.reduce_mean(tf.square(tf.sparse_tensor_dense_matmul(self.X, self.w) - self.log_label_phase2)) \
                           + self.l2_loss_weight * tf.nn.l2_loss(self.w)
        self.optimizer2 = tf.train.MomentumOptimizer(self.lr_2, 0.9)
        self.train_step2 = self.optimizer2.minimize(self.loss_phase2)

        # session initialization
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        self.sess = tf.Session(config=config)
        tf.global_variables_initializer().run(session=self.sess)
コード例 #2
0
ファイル: gamma_model.py プロジェクト: uhasan1/drsa
    def test(self):
        print('Test begin')
        self.pred_mp = tf.exp(tf.sparse_tensor_dense_matmul(self.X, self.w))
        self.MSE = tf.reduce_mean(tf.square(self.z - self.pred_mp))

        x, b, z, y = self.util_test.get_all_data_origin()
        feed_dict = {}

        feed_dict[self.X] = tf.SparseTensorValue(
            x, [1] * len(x), [self.test_data_amt, dimension])
        feed_dict[self.z] = z
        feed_dict[self.y] = y
        feed_dict[self.b] = b

        # calculate MSE
        mse = self.sess.run(self.MSE, feed_dict)
        print("MSE: {}".format(mse))

        ks = self.pred_mp / self.theta
        ps = tf.pow(self.z, (ks - 1.)) * tf.exp(-self.z / self.theta) / tf.pow(
            self.theta, ks) / tf.exp(tf.lgamma(ks))
        cs = tf.igamma(ks, self.b / self.theta) / tf.exp(tf.lgamma(ks))
        # calculate AUC and LogLoss
        win_rate = self.sess.run(cs, feed_dict)
        auc = roc_auc_score(y, win_rate)
        print("AUC: {}".format(auc))
        logloss = log_loss(y, win_rate)
        print("Log Loss: {}".format(logloss))

        # calculate ANLP
        logp = -tf.log(ps)
        logp_arr = self.sess.run(logp, feed_dict)
        logp_arr[np.isnan(logp_arr)] = 1e-20  #for overflow values, minor
        logp_arr[logp_arr == 0] = 1e-20

        anlp = np.mean(logp_arr)
        print("ANLP: {}".format(anlp))

        # save result and params
        fin = open(self.output_dir + 'result.txt', 'w')
        fin.writelines([
            "MSE: {0}   AUC: {1}    Log Loss: {2}   ANLP: {3}\n".format(
                mse, auc, logloss, anlp)
        ])
        fin.close()

        np.save(self.output_dir + 'w', self.sess.run(self.w))
        np.save(self.output_dir + 'k', self.sess.run(ks, feed_dict))
        np.save(self.output_dir + 'theta', self.sess.run(self.theta))
コード例 #3
0
 def test_Igamma(self):
     t = tf.igamma(*self.random((3, 3), (3, 3)))
     self.check(t)
コード例 #4
0
    def _nnpom(self, projected, thresholds):
        if self.use_tau == 1:
            projected = K.reshape(projected, shape=[-1]) / self.tau
        else:
            projected = K.reshape(projected, shape=[-1])

        # projected = K.Print(projected, data=[K.reduce_min(projected), K.reduce_max(projected), K.reduce_mean(projected)], message='projected min max mean')

        m = K.shape(projected)[0]
        a = K.reshape(K.tile(thresholds, [m]), shape=[m, -1])
        b = K.transpose(
            K.reshape(K.tile(projected, [self.num_classes - 1]), shape=[-1,
                                                                        m]))
        z3 = a - b

        # z3 = K.cond(K.reduce_min(K.abs(z3)) < 0.01, lambda: K.Print(z3, data=[K.reduce_min(K.abs(z3))], message='z3 abs min', summarize=100), lambda: z3)

        if self.link_function == 'probit':
            a3T = self.dist.cdf(z3)
        elif self.link_function == 'cloglog':
            a3T = 1 - K.exp(-K.exp(z3))
        elif self.link_function == 'glogit':
            a3T = 1.0 / K.pow(1.0 + K.exp(-self.lmbd *
                                          (z3 - self.mu)), self.alpha)
        elif self.link_function == 'cauchit':
            a3T = K.atan(z3 / math.pi) + 0.5
        elif self.link_function == 'lgamma':
            a3T = K.cond(
                self.q < 0,
                lambda: igammac(K.pow(self.q, -2),
                                K.pow(self.q, -2) * K.exp(self.q * z3)),
                lambda: K.cond(
                    self.q > 0, lambda: igamma(
                        K.pow(self.q, -2),
                        K.pow(self.q, -2) * K.exp(self.q * z3)), lambda: self.
                    dist.cdf(z3)))
        elif self.link_function == 'gauss':
            # a3T = 1.0 / 2.0 + K.sign(z3) * K.igamma(1.0 / self.alpha, K.pow(K.abs(z3) / self.r, self.alpha)) / (2 * K.exp(K.lgamma(1.0 / self.alpha)))
            # z3 = K.Print(z3, data=[K.reduce_max(K.abs(z3))], message='z3 abs max')
            # K.sigmoid(z3 - self.p['mu']) - 1)
            a3T = 1.0 / 2.0 + K.tanh(z3 - self.p['mu']) * igamma(
                1.0 / self.p['alpha'],
                K.pow(K.pow((z3 - self.p['mu']) / self.p['r'], 2), self.
                      p['alpha'])) / (2 * K.exp(lgamma(1.0 / self.p['alpha'])))
        elif self.link_function == 'expgauss':
            u = self.lmbd * (z3 - self.mu)
            v = self.lmbd * self.sigma
            dist1 = distributions.Normal(loc=0., scale=v)
            dist2 = distributions.Normal(loc=v, scale=K.pow(v, 2))
            a3T = dist1.cdf(u) - K.exp(-u + K.pow(v, 2) / 2 +
                                       K.log(dist2.cdf(u)))
        elif self.link_function == 'ggamma':
            a3T = igamma(self.p['d'] / self.p['p'],
                         K.pow((z3 / self.p['a']), self.p['p'])) / K.exp(
                             lgamma(self.p['d'] / self.p['p']))
        else:
            a3T = 1.0 / (1.0 + K.exp(-z3))

        a3 = K.concatenate([a3T, K.ones([m, 1])], axis=1)
        a3 = K.concatenate(
            [K.reshape(a3[:, 0], shape=[-1, 1]), a3[:, 1:] - a3[:, 0:-1]],
            axis=-1)

        return a3
コード例 #5
0
ファイル: gamma.py プロジェクト: yuanjing-ma/probability
 def _cdf(self, x):
     x = self._maybe_assert_valid_sample(x)
     # Note that igamma returns the regularized incomplete gamma function,
     # which is what we want for the CDF.
     return tf.igamma(self.concentration, self.rate * x)
コード例 #6
0
import tensorflow as tf
"""功能:计算gamma(a,x)/gamma(a),gamma(a,x)=\intergral_from_0_to_x t^(a-1)*exp^(-t)dt。
输入:x为张量,可以为`float32`, `float64`类型。"""

a = tf.constant(1, tf.float64)
x = tf.constant([[1, 2, 3, 4]], tf.float64)
z = tf.igamma(a, x)

sess = tf.Session()
print(sess.run(z))
sess.close()
# z==>[[0.63212056 0.86466472 0.95021293 0.98168436]]
コード例 #7
0
def GO_Gamma_v2(x, alpha):
    # x sim Gamma(alpha, 1)
    x = tf.cast(x, tf.float64)
    alpha = tf.cast(alpha, tf.float64)

    logx = tf.log(x)
    ex_gamma_xa = tf.exp(x + tf.lgamma(alpha) + (1. - alpha) * logx)
    psi_m_log = tf.digamma(alpha + 1.) - logx
    igamma_up_reg = tf.igammac(alpha, x)

    # Part 1
    indx1 = tf.where(x <= 1e-2)
    x_indx1 = tf.gather_nd(x, indx1)
    alpha_indx1 = tf.gather_nd(alpha, indx1)
    GO_Gamma_alpha_value1 = tf.exp(x_indx1) * x_indx1 / alpha_indx1 * (
            tf.gather_nd(psi_m_log, indx1) +
            x_indx1 / tf.pow(alpha_indx1 + 1., 2) -
            tf.pow(x_indx1 / (alpha_indx1 + 2.), 2) +
            0.5 * tf.pow(x_indx1, 3) / tf.pow(alpha_indx1 + 3., 2)
    )

    # Part 2
    N_alpha = tf.round(tf.exp(
        - 0.488484605941243044124888683654717169702053070068359375 * tf.log(alpha)
        + 1.6948389987594634220613443176262080669403076171875
    ))
    indx2 = tf.where(tf.logical_and(
        tf.logical_and(x > 1e-2, alpha <= 3.),
        (x <= (alpha + N_alpha * tf.sqrt(alpha)))
    ))
    KK = 15
    kk = tf.cast(tf.range(1, KK + 1), tf.float64)
    x_indx2 = tf.gather_nd(x, indx2)
    alpha_indx2 = tf.gather_nd(alpha, indx2)
    GO_Gamma_alpha_value2 = tf.gather_nd(ex_gamma_xa, indx2) * (
            -tf.gather_nd(psi_m_log, indx2) * tf.gather_nd(igamma_up_reg, indx2) +
            (
                    tf.digamma(alpha_indx2 + KK + 1.) - tf.gather_nd(logx, indx2) -
                    tf.reduce_sum(
                        tf.igammac(tf.expand_dims(alpha_indx2, 1) + tf.expand_dims(kk, 0), tf.expand_dims(x_indx2, 1)) /
                        (tf.expand_dims(alpha_indx2, 1) + tf.expand_dims(kk, 0))
                        , 1)
            )
    )

    # Part 2_1
    indx2_1 = tf.where(tf.logical_and(
        tf.logical_and(x > 1e-2, alpha <= 3.),
        (x > (alpha + N_alpha * tf.sqrt(alpha)))
    ))
    KK = 15
    kk = tf.cast(tf.range(1, KK + 1), tf.float64)
    x_indx2_1 = tf.gather_nd(x, indx2_1)
    alpha_indx2_1 = tf.gather_nd(alpha, indx2_1)
    GO_Gamma_alpha_value2_1 = tf.gather_nd(ex_gamma_xa, indx2_1) * (
            -tf.gather_nd(psi_m_log, indx2_1) * tf.gather_nd(igamma_up_reg, indx2_1) +
            (
                    tf.digamma(alpha_indx2_1 + KK + 1.) - tf.gather_nd(logx, indx2_1) -
                    tf.reduce_sum(
                        tf.igammac(tf.expand_dims(alpha_indx2_1, 1) + tf.expand_dims(kk, 0),
                                   tf.expand_dims(x_indx2_1, 1)) /
                        (tf.expand_dims(alpha_indx2_1, 1) + tf.expand_dims(kk, 0))
                        , 1)
            )
    )
    GO_Gamma_alpha_value2_1 = tf.maximum(
        GO_Gamma_alpha_value2_1,
        1. / alpha_indx2_1 - tf.gather_nd(ex_gamma_xa, indx2_1) *
        tf.gather_nd(psi_m_log, indx2_1) * tf.gather_nd(igamma_up_reg, indx2_1)
    )

    # Part 3
    indx3 = tf.where(
        tf.logical_and(
            tf.logical_and(x > 1e-2, alpha > 3.),
            alpha <= 500.
        )
    )
    KK = 10
    kk = tf.cast(tf.range(1, KK + 1), tf.float64)
    x_indx3 = tf.gather_nd(x, indx3)
    alpha_indx3 = tf.gather_nd(alpha, indx3)

    x_l = alpha_indx3 - tf.log(alpha_indx3) * tf.sqrt(alpha_indx3)

    logx_l = tf.log(x_l)
    ex_gamma_xa_l = tf.exp(x_l + tf.lgamma(alpha_indx3) + (1. - alpha_indx3) * logx_l)
    psi_m_log_l = tf.digamma(alpha_indx3 + 1.) - logx_l
    igamma_low_reg_l = tf.igamma(alpha_indx3, x_l)
    # igamma_up_reg_l = tf.igammac(alpha_indx3, x_l)
    # f_l = ex_gamma_xa_l * (
    #         -psi_m_log_l * igamma_up_reg_l +
    #         (tf.digamma(alpha_indx3 + KK + 1.) - logx_l -
    #         tf.reduce_sum(
    #             tf.igammac(tf.expand_dims(alpha_indx3, 1) + tf.expand_dims(kk, 0), tf.expand_dims(x_l, 1)) /
    #             (tf.expand_dims(alpha_indx3, 1) + tf.expand_dims(kk, 0))
    #             , 1))
    # )
    f_l = ex_gamma_xa_l * (
            psi_m_log_l * igamma_low_reg_l +
            tf.reduce_sum(
                tf.igamma(tf.expand_dims(alpha_indx3, 1) + tf.expand_dims(kk, 0), tf.expand_dims(x_l, 1)) /
                (tf.expand_dims(alpha_indx3, 1) + tf.expand_dims(kk, 0))
                , 1)
    )

    g_l = (1. + (1. - alpha_indx3) / x_l) * f_l + (
            -ex_gamma_xa_l / x_l * igamma_low_reg_l + (psi_m_log_l +
                                                       tf.reduce_sum(
                                                           tf.exp(
                                                               tf.expand_dims(kk, 0) * tf.log(tf.expand_dims(x_l, 1)) +
                                                               tf.lgamma(tf.expand_dims(alpha_indx3, 1)) -
                                                               tf.lgamma(
                                                                   tf.expand_dims(alpha_indx3, 1) + tf.expand_dims(kk,
                                                                                                                   0) + 1.)
                                                           )
                                                           , 1))
    )

    x_m = alpha_indx3

    f_m = 1. + 0.167303227226226980395296095593948848545551300048828125 / \
          (
                  tf.pow(x_m, 1.0008649793164192676186985409003682434558868408203125) -
                  0.07516433982238841793321881823430885560810565948486328125
          )

    x_r = 2. * alpha_indx3 - x_l

    f_r = 1. / alpha_indx3 - tf.exp(x_r + tf.lgamma(alpha_indx3) + (1. - alpha_indx3) * tf.log(x_r)) * (
            (tf.digamma(alpha_indx3 + 1.) - tf.log(x_r)) * tf.igammac(alpha_indx3, x_r)
    )
    lambda_r = tf.exp(
        959.627335718427275423891842365264892578125 / (
                tf.pow(alpha_indx3, 1.324768828487964622553363369661383330821990966796875) +
                142.427456986662718918523751199245452880859375
        )
        - 13.01439996187340142341781756840646266937255859375
    )

    x_mat_i = tf.concat([tf.expand_dims(x_l, 1), tf.expand_dims(x_m, 1), tf.expand_dims(x_r, 1)], 1)
    x_mat_bar_i = x_mat_i - tf.expand_dims(alpha_indx3, 1)
    x_mat_hat_i = tf.sqrt(x_mat_i) - tf.sqrt(tf.expand_dims(alpha_indx3, 1))
    f_mat_i = tf.concat([tf.expand_dims(f_l, 1), tf.expand_dims(f_m, 1), tf.expand_dims(f_r, 1)], 1)
    lambda_mat_i = tf.concat([tf.expand_dims(tf.ones_like(alpha_indx3), 1),
                              tf.expand_dims(tf.ones_like(alpha_indx3), 1),
                              tf.expand_dims(lambda_r, 1)
                              ], 1)

    x_mat_j = tf.expand_dims(x_l, 1)
    g_mat_j = tf.expand_dims(g_l, 1)
    lambda_mat_j = tf.expand_dims(tf.ones_like(alpha_indx3), 1)

    A = tf.reduce_sum(lambda_mat_i * tf.pow(x_mat_bar_i, 2), 1) + tf.reduce_sum(lambda_mat_j, 1)
    B = tf.reduce_sum(lambda_mat_i * x_mat_bar_i * x_mat_hat_i, 1) + \
        tf.reduce_sum(lambda_mat_j / 2. / tf.sqrt(x_mat_j), 1)
    C = tf.reduce_sum(lambda_mat_i * x_mat_bar_i, 1)
    D = tf.reduce_sum(lambda_mat_i * tf.pow(x_mat_hat_i, 2), 1) + tf.reduce_sum(lambda_mat_j / 4. / x_mat_j, 1)
    E = tf.reduce_sum(lambda_mat_i * x_mat_hat_i, 1)
    F = tf.reduce_sum(lambda_mat_i, 1)
    G = tf.reduce_sum(lambda_mat_i * x_mat_bar_i * f_mat_i, 1) + tf.reduce_sum(lambda_mat_j * g_mat_j, 1)
    H = tf.reduce_sum(lambda_mat_i * x_mat_hat_i * f_mat_i, 1) + \
        tf.reduce_sum(lambda_mat_j / 2. / tf.sqrt(x_mat_j) * g_mat_j, 1)
    I = tf.reduce_sum(lambda_mat_i * f_mat_i, 1)

    Z = F * tf.pow(B, 2) - 2. * B * C * E + D * tf.pow(C, 2) + A * tf.pow(E, 2) - A * D * F

    a_cor = 1. / Z * (G * (tf.pow(E, 2) - D * F) + H * (B * F - C * E) - I * (B * E - C * D))
    b_cor = 1. / Z * (G * (B * F - C * E) + H * (tf.pow(C, 2) - A * F) - I * (B * C - A * E))
    c_cor = 1. / Z * (-G * (B * E - C * D) + I * (tf.pow(B, 2) - A * D) - H * (B * C - A * E))

    GO_Gamma_alpha_value3 = a_cor * (x_indx3 - alpha_indx3) + b_cor * (tf.sqrt(x_indx3) - tf.sqrt(alpha_indx3)) + c_cor
    GO_Gamma_alpha_value3 = tf.maximum(
        GO_Gamma_alpha_value3,
        1. / alpha_indx3 - tf.gather_nd(ex_gamma_xa, indx3) *
        tf.gather_nd(psi_m_log, indx3) * tf.gather_nd(igamma_up_reg, indx3)
    )

    # Part 4
    # indx4 = tf.where(
    #     tf.logical_and(
    #         tf.logical_and(x > 1e-2, alpha > 500.),
    #         (x <= (alpha + 2. * tf.log(alpha) * tf.sqrt(alpha)))
    #     )
    # )
    indx4 = tf.where(
        tf.logical_and(x > 1e-2, alpha > 500.)
    )

    x_indx4 = tf.gather_nd(x, indx4)
    alpha_indx4 = tf.gather_nd(alpha, indx4)

    f_m_large = 1. + 0.167303227226226980395296095593948848545551300048828125 / \
                (
                        tf.pow(alpha_indx4, 1.0008649793164192676186985409003682434558868408203125) -
                        0.07516433982238841793321881823430885560810565948486328125
                )
    g_m_large = 0.54116502161502622048061539317131973803043365478515625 * \
                tf.pow(alpha_indx4, -1.010274491769996618728555404231883585453033447265625)

    GO_Gamma_alpha_value4 = f_m_large + g_m_large * (x_indx4 - alpha_indx4)

    # Part 4_1
    # indx4_1 = tf.where(
    #     tf.logical_and(
    #         tf.logical_and(x > 1e-2, alpha > 500.),
    #         (x > (alpha + 2. * tf.log(alpha) * tf.sqrt(alpha)))
    #     )
    # )
    # alpha_indx4_1 = tf.gather_nd(alpha, indx4_1)
    # GO_Gamma_alpha_value4_1 = 1. / alpha_indx4_1 - tf.gather_nd(ex_gamma_xa, indx4_1) * \
    #     tf.gather_nd(psi_m_log, indx4_1) * tf.gather_nd(igamma_up_reg, indx4_1)

    # Summerize
    GO_Gamma_alpha = tf.sparse_to_dense(indx1, x.shape, GO_Gamma_alpha_value1) + \
                     tf.sparse_to_dense(indx2, x.shape, GO_Gamma_alpha_value2) + \
                     tf.sparse_to_dense(indx2_1, x.shape, GO_Gamma_alpha_value2_1) + \
                     tf.sparse_to_dense(indx3, x.shape, GO_Gamma_alpha_value3) + \
                     tf.sparse_to_dense(indx4, x.shape, GO_Gamma_alpha_value4)
    # + \
    # tf.sparse_to_dense(indx4_1, x.shape, GO_Gamma_alpha_value4_1)

    GO_Gamma_alpha = tf.cast(GO_Gamma_alpha, tf.float32)

    return GO_Gamma_alpha  # , x_l, x_r, f_l, f_m, f_r, g_l
コード例 #8
0
tf.resource_strided_slice_assign()
tf.strided_slice_assign()
tf.strided_slice_grad()

tf.gather()
tf.gather_nd()
tf.gather_v2()
tf.get_summary_op()
tf.gradients()
tf.boolean_mask()
tf.sparse_mask()
tf.sequence_mask()

tf.random_gamma()
tf.digamma()
tf.igamma()
tf.lgamma()
tf.polygamma()
tf.igammac()

tf.tensor_shape.as_shape()

# gfile
tf.gfile.Copy()
tf.gfile.DeleteRecursively()
tf.gfile.Exists()
tf.gfile.Glob()
tf.gfile.IsDirectory()
tf.gfile.ListDirectory()
tf.gfile.MakeDirs()
tf.gfile.MkDir()