Exemple #1
0
    def create_standAlone_test_graph(self, test_ind, test_y):
        print("Create testing graph")
        self.test_ind = test_ind
        self.test_y = test_y

        self.ind_uniq_test = np.unique(test_ind, axis=0)
        self.num_uniq_ind_test = len(self.ind_uniq_test)
        self.num_test_events = len(test_ind)

        # Integral Term
        # sum_i < int_0^T lam_i>
        # placeholders
        self.entry_ind_test = tf.constant(self.ind_uniq_test, dtype=tf.int32)
        self.event_ind_test = tf.constant(self.test_ind, dtype=tf.int32)
        self.event_y_test = tf.constant(self.test_y, dtype=FLOAT_TYPE)

        self.tf_T_test = tf.constant(self.test_y[-1][0] - self.test_y[0][0],
                                     dtype=FLOAT_TYPE)

        # sample posterior base rate ( f )
        self.gp_base_rate_entries_test = utils_funcs.log_CP_base_rate(
            self.tf_U, self.entry_ind_test)
        self.base_rate_entries_test = tf.exp(self.gp_base_rate_entries_test)

        # int term 1, using entryEvent
        self.int_part_test = self.tf_T_test * tf.reduce_sum(
            self.base_rate_entries_test)

        # sample event base rate
        self.gp_base_rate_events_test = utils_funcs.log_CP_base_rate(
            self.tf_U, self.event_ind_test)

        # event sum term 1
        self.event_sum_test = tf.reduce_sum(self.gp_base_rate_events_test)
        self.llk_test = self.event_sum_test - self.int_part_test

        self.isTestGraphInitialized = True

        return self
    def create_standAlone_test_graph(self, test_ind, test_y):
        print("Create testing graph")
        self.test_ind = test_ind
        self.test_y = test_y

        self.num_test_events = len(test_ind)
        self.uniq_ind_test, self.n_i_test, self.sq_sum_test, self.log_sum_test = utils_funcs.extract_event_tensor_Reileigh(
            self.test_ind, self.test_y)
        self.num_uniq_ind_test = len(self.uniq_ind_test)

        # Integral Term
        # sum_i < int_0^T lam_i>
        # placeholders
        self.entry_ind_test = tf.constant(self.uniq_ind_test, dtype=tf.int32)
        self.entry_n_i_test = tf.constant(self.n_i_test, dtype=FLOAT_TYPE)
        self.entry_sq_sum_test = tf.constant(self.sq_sum_test,
                                             dtype=FLOAT_TYPE)
        self.entry_log_sum_test = tf.constant(self.log_sum_test,
                                              dtype=FLOAT_TYPE)

        self.gp_base_rate_entries_test = utils_funcs.log_CP_base_rate(
            self.tf_U, self.entry_ind_test)
        self.base_rate_entries_test = tf.exp(self.gp_base_rate_entries_test)

        # int term 1, using entryEvent
        self.int_part_test = tf.reduce_sum(self.base_rate_entries_test *
                                           self.entry_sq_sum_test)

        # event sum term 1
        self.event_sum_test = tf.reduce_sum(self.gp_base_rate_entries_test *
                                            self.entry_n_i_test +
                                            self.entry_log_sum_test)
        self.llk_test = self.event_sum_test - self.int_part_test

        self.isTestGraphInitialized = True

        return self
    def __init__(self, train_ind, train_y, init_config):
        self.train_ind = train_ind
        self.train_y = train_y
        self.nmod = train_ind.shape[1]
        self.uniq_ind, self.n_i, self.sq_sum, self.log_sum = utils_funcs.extract_event_tensor_Reileigh(
            self.train_ind, self.train_y)

        self.num_entries = len(self.uniq_ind)

        #self.log_file_name = init_config['log_file_name']
        self.init_U = init_config['U']
        self.batch_size_entry = init_config['batch_size_entry']
        self.learning_rate = init_config['learning_rate']

        self.GP_SCOPE_NAME = "gp_params"
        #GP parameters
        with tf.variable_scope(self.GP_SCOPE_NAME):
            # Embedding params
            self.tf_U = [
                tf.Variable(self.init_U[k], dtype=FLOAT_TYPE)
                for k in range(self.nmod)
            ]

        # Integral Term
        # sum_i < int_0^T lam_i>
        # placeholders
        self.batch_entry_ind = tf.placeholder(
            dtype=tf.int32, shape=[self.batch_size_entry, self.nmod])
        self.batch_entry_n_i = tf.placeholder(dtype=FLOAT_TYPE,
                                              shape=[self.batch_size_entry, 1])
        self.batch_entry_log_sum = tf.placeholder(
            dtype=FLOAT_TYPE, shape=[self.batch_size_entry, 1])
        self.batch_entry_sq_sum = tf.placeholder(
            dtype=FLOAT_TYPE, shape=[self.batch_size_entry, 1])

        self.tf_T = tf.constant(self.train_y[-1][0] - self.train_y[0][0],
                                dtype=FLOAT_TYPE)
        self.tf_T0 = tf.constant(self.train_y[0][0], dtype=FLOAT_TYPE)
        self.tf_T1 = tf.constant(self.train_y[-1][0], dtype=FLOAT_TYPE)

        # sample posterior base rate ( f )
        self.gp_base_rate_entries = utils_funcs.log_CP_base_rate(
            self.tf_U, self.batch_entry_ind)
        self.base_rate_entries = tf.exp(self.gp_base_rate_entries)

        #int term 1, using entryEvent
        self.int_part1 = self.num_entries / self.batch_size_entry * tf.reduce_sum(
            self.base_rate_entries * self.batch_entry_sq_sum)

        # event sum term 1
        self.eventSum = (self.batch_entry_n_i * self.gp_base_rate_entries +
                         self.batch_entry_log_sum)
        self.event_sum_part1 = self.num_entries / self.batch_size_entry * (
            tf.reduce_sum(self.eventSum))

        self.ELBO = self.event_sum_part1 - self.int_part1
        self.neg_ELBO = -self.ELBO
        self.ELBO_hist = []

        # setting
        self.min_opt = tf.train.AdamOptimizer(self.learning_rate)
        self.min_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                            scope=self.GP_SCOPE_NAME)
        #print( self.min_params) ##
        self.min_step = self.min_opt.minimize(self.neg_ELBO,
                                              var_list=self.min_params)

        # GPU settings
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        self.sess = tf.Session(config=config)
        self.sess.run(tf.global_variables_initializer())

        self.entries_ind_y_gnrt = utils_funcs.DataGenerator(
            self.uniq_ind,
            np.concatenate([self.n_i, self.sq_sum, self.log_sum], axis=1))
        self.isTestGraphInitialized = False
Exemple #4
0
    def __init__(self, train_ind, train_y, init_config):
        self.train_ind = train_ind
        self.train_y = train_y
        self.nmod = train_ind.shape[1]
        self.uniq_ind = np.unique(self.train_ind, axis=0)

        self.num_events = len(self.train_ind)
        self.num_entries = len(self.uniq_ind)

        #self.log_file_name = init_config['log_file_name']
        self.init_U = init_config['U']

        self.batch_size_event = init_config['batch_size_event']
        self.batch_size_entry = init_config['batch_size_entry']

        self.learning_rate = init_config['learning_rate']

        self.tf_U = [
            tf.Variable(self.init_U[k], dtype=FLOAT_TYPE)
            for k in range(self.nmod)
        ]

        # Integral Term
        # sum_i < int_0^T lam_i>
        # placeholders
        self.batch_entry_ind = tf.placeholder(
            dtype=tf.int32, shape=[self.batch_size_entry, self.nmod])
        self.batch_event_ind = tf.placeholder(
            dtype=tf.int32, shape=[self.batch_size_event, self.nmod])
        self.batch_event_y = tf.placeholder(dtype=FLOAT_TYPE,
                                            shape=[self.batch_size_event, 1])

        self.tf_T = tf.constant(self.train_y[-1][0] - self.train_y[0][0],
                                dtype=FLOAT_TYPE)
        self.tf_T0 = tf.constant(self.train_y[0][0], dtype=FLOAT_TYPE)
        self.tf_T1 = tf.constant(self.train_y[-1][0], dtype=FLOAT_TYPE)

        # sample posterior base rate ( f )
        self.base_rate_entries = tf.exp(
            utils_funcs.log_CP_base_rate(self.tf_U, self.batch_entry_ind))

        #int term 1, using entryEvent
        self.int_part1 = self.num_entries / self.batch_size_entry * self.tf_T * tf.reduce_sum(
            self.base_rate_entries)

        # sample event base rate

        self.gp_base_rate_events = utils_funcs.log_CP_base_rate(
            self.tf_U, self.batch_event_ind)

        # event sum term 1
        self.event_sum_part1 = self.num_events / self.batch_size_event * tf.reduce_sum(
            self.gp_base_rate_events)

        self.ELBO = self.event_sum_part1 - self.int_part1
        self.neg_ELBO = -self.ELBO
        self.ELBO_hist = []

        # setting
        self.min_opt = tf.train.AdamOptimizer(self.learning_rate)
        self.min_step = self.min_opt.minimize(self.neg_ELBO)

        # GPU settings
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        self.sess = tf.Session(config=config)
        self.sess.run(tf.global_variables_initializer())

        self.entries_ind_gnrt = utils_funcs.DataGenerator(self.uniq_ind)
        self.event_ind_y_gnrt = utils_funcs.DataGenerator(
            self.train_ind, self.train_y)

        self.isTestGraphInitialized = False