def __homogeneous_poisson_sampling(T, S, maximum): """ To generate a homogeneous Poisson point pattern in space S X T, it basically takes two steps: 1. Simulate the number of events n = N(S) occurring in S according to a Poisson distribution with mean lam * |S X T|. 2. Sample each of the n location according to a uniform distribution on S respectively. Args: lam: intensity (or maximum intensity when used by thining algorithm) S: [(min_t, max_t), (min_x, max_x), (min_y, max_y), ...] indicates the range of coordinates regarding a square (or cubic ...) region. Returns: samples: point process samples: [(t1, x1, y1), (t2, x2, y2), ..., (tn, xn, yn)] """ _S = [T] + S # sample the number of events from S n = utils.lebesgue_measure(_S) N = tf.random.poisson(lam=maximum * n, shape=[1], dtype=tf.int32) # simulate spatial sequence and temporal sequence separately. points = [ tf.random.uniform(shape=N, minval=_S[i][0], maxval=_S[i][1]) for i in range(len(_S)) ] # sort the temporal sequence ascendingly. points[0] = tf.contrib.framework.sort(points[0], direction="ASCENDING") points = tf.transpose(tf.stack(points)) return points
def _homogeneous_poisson_sampling(self, T=[0, 1], S=[[0, 1], [0, 1]]): """ To generate a homogeneous Poisson point pattern in space S X T, it basically takes two steps: 1. Simulate the number of events n = N(S) occurring in S according to a Poisson distribution with mean lam * |S X T|. 2. Sample each of the n location according to a uniform distribution on S respectively. Args: lam: intensity (or maximum intensity when used by thining algorithm) S: [(min_t, max_t), (min_x, max_x), (min_y, max_y), ...] indicates the range of coordinates regarding a square (or cubic ...) region. Returns: samples: point process samples: [(t1, x1, y1), (t2, x2, y2), ..., (tn, xn, yn)] """ _S = [T] + S # sample the number of events from S n = utils.lebesgue_measure(_S) N = np.random.poisson(size=1, lam=self.lam.upper_bound() * n) # simulate spatial sequence and temporal sequence separately. points = [ np.random.uniform(_S[i][0], _S[i][1], N) for i in range(len(_S)) ] points = np.array(points).transpose() # sort the sequence regarding the ascending order of the temporal sample. points = points[points[:, 0].argsort()] return points
def pdf_with_history(): # triggering probability log_trig_prob = tf.log(self._lambda(x, y, t, x_his, y_his, t_his)) # variables for calculating tail probability tn, ti = points[-2, 0], points[:-1, 0] t_ti, tn_ti = t - ti, tn - ti # tail probability log_tail_prob = - \ self.mu * (t - t_his[-1]) * utils.lebesgue_measure(S) - \ tf.reduce_sum(tf.scan( lambda a, i: self.C * (tf.exp(- self.beta * tn_ti[i]) - tf.exp(- self.beta * t_ti[i])) / self.beta, tf.range(tf.shape(t_ti)[0]), initializer=np.array(0., dtype=np.float32))) return log_trig_prob + log_tail_prob
def pdf_with_history(): # triggering probability log_trig_prob = tf.log( tf.clip_by_value(self._lambda(t, s, his_t, his_s), 1e-8, 1e+10)) # variables for calculating tail probability tn, ti = points[-2, 0], points[:-1, 0] t_ti, tn_ti = t - ti, tn - ti # tail probability # TODO: change to gaussian mixture (add phi) log_tail_prob = - \ self.mu * (t - his_t[-1]) * utils.lebesgue_measure(self.S) - \ tf.reduce_sum(tf.scan( lambda a, i: self.C * (tf.exp(- self.beta * tn_ti[i]) - tf.exp(- self.beta * t_ti[i])) / \ tf.clip_by_value(self.beta, 1e-8, 1e+10), tf.range(tf.shape(t_ti)[0]), initializer=np.array(0., dtype=np.float32))) return log_trig_prob + log_tail_prob