Esempio n. 1
0
    def traj_dist(self, prev_times, prev_obs, pred_times, recursive=True):
        batch_size = 1
        D = prev_obs[0].shape[-1]
        #1) First normalize and then encode. Very important!
        curr_ix = int(round((prev_times[-1] - prev_times[0]) / self.deltaT))
        t1 = time.time()
        pred_time = 0.0

        Sigma_y = self.Sigma_y if self.Sigma_y is not None else 0.01 * np.eye(
            D)
        noise = np.random.multivariate_normal(mean=np.zeros(D),
                                              cov=Sigma_y,
                                              size=(self.samples,
                                                    self.length - 1))
        Xn, Xobs = utils.encode_fixed_dt([prev_times],
                                         [self.normalizer.transform(prev_obs)],
                                         self.length - 1, self.deltaT)
        if recursive:
            Xsamples = np.repeat(Xn, repeats=self.samples, axis=0)
            Xobs_samples = np.repeat(Xobs, repeats=self.samples, axis=0)
            y_n = None
            for i in range(curr_ix, self.length - 1):
                t2 = time.time()
                y_n = self.model.predict([Xsamples, Xobs_samples]) + noise
                t3 = time.time()
                pred_time += t3 - t2
                if i + 1 < self.length - 1:
                    Xsamples[:, i + 1] = y_n[:, i]
                    Xobs_samples[:, i + 1] = 1.0
            if y_n is None:
                y_n = self.model.predict([Xsamples, Xobs_samples]) + noise
            y = utils.apply_scaler(self.normalizer.inverse_transform, y_n)
        else:
            y_n = self.model.predict([Xn, Xobs]) + noise
            y = utils.apply_scaler(self.normalizer.inverse_transform, y_n)

        ixs = [
            int(round((x - prev_times[0]) / self.deltaT)) - 1
            for x in pred_times
        ]
        means = []
        covs = []
        for i in ixs:
            if i < self.length - 1:
                y_mu = np.mean(y[:, i, :], axis=0)
                y_Sigma = np.cov(y[:, i, :], rowvar=False)
                bias_scale = self.samples / (self.samples - 1)
                y_Sigma = bias_scale * y_Sigma
            else:
                y_mu = np.mean(y[:, -1, :], axis=0)
                y_Sigma = self.default_Sigma_y * np.eye(y.shape[-1])
            means.append(y_mu)
            covs.append(y_Sigma)
        t4 = time.time()
        print(
            "Total pred time: {}, Computing the LSTM prediction: {}, Avg. LSTM pred: {}"
            .format(t4 - t1, pred_time,
                    pred_time / (self.length - curr_ix - 1)))
        return np.array(means), np.array(covs)
Esempio n. 2
0
    def __data_generation(self, times, X):
        assert (len(times) == len(X))
        n_times, n_x = utils.shift_time_duration(
            times, X, self.duration)  # arbitrary start point
        Y, Yobs = utils.encode_fixed_dt(n_times, n_x, self.length, self.deltaT)

        N, T, K = Y.shape
        ts_lens = np.random.randint(low=0, high=T, size=N)
        is_obs = np.array([
            np.logical_and(
                np.arange(T) < x,
                np.random.rand(T) >= self.fake_missing_p) for x in ts_lens
        ])
        Xobs = Yobs * is_obs.reshape((-1, T, 1))
        X = Xobs * Y

        return X, Xobs, Y, Yobs
Esempio n. 3
0
 def traj_dist(self, prev_times, prev_obs, pred_times):
     batch_size = 1
     #1) First normalize and then encode. Very important!
     Xn, Xobs = utils.encode_fixed_dt([prev_times],
                                      [self.normalizer.transform(prev_obs)],
                                      self.length, self.deltaT)
     #z = np.random.normal(loc=0.0, scale=1.0, size=(self.samples,batch_size,self.z_size))
     z = np.random.normal(loc=0.0,
                          scale=1.0,
                          size=(self.samples, self.z_size))
     if self.partial_encoder is not None:
         t1 = time.time()
         mu_z, log_sig_z = self.partial_encoder.predict([Xn, Xobs])
         t2 = time.time()
         logging.info("DCGM Encoding time: {}".format(t2 - t1))
         sig_z = np.sqrt(np.exp(log_sig_z))
         z = mu_z + z * sig_z
     Xn_rep = np.tile(Xn, (self.samples, 1, 1))
     Xobs_rep = np.tile(Xobs, (self.samples, 1, 1))
     t1 = time.time()
     y_n = self.decoder.predict([Xn_rep, Xobs_rep, z])
     t2 = time.time()
     logging.info("DCGM Decoding time: {}".format(t2 - t1))
     y = utils.apply_scaler(self.normalizer.inverse_transform, y_n)
     ixs = [
         int(round((x - prev_times[0]) / self.deltaT)) for x in pred_times
     ]
     t1 = time.time()
     means_model, covs_model = utils.empirical_traj_dist(y)
     limit = bisect.bisect_left(ixs, self.length)
     means = means_model[ixs[0:limit]]
     covs = covs_model[ixs[0:limit]]
     if limit < len(ixs):
         missing = len(ixs) - limit
         means = np.concatenate((means, np.tile(means[-1], (missing, 1))),
                                axis=0)
         covs = np.concatenate(
             (covs,
              np.tile(self.default_Sigma_y * np.eye(y.shape[-1]),
                      (missing, 1, 1))),
             axis=0)
     t2 = time.time()
     logging.info("DCGM Comp. distribution time: {}".format(t2 - t1))
     return np.array(means), np.array(covs)
Esempio n. 4
0
 def __data_generation(self, times, X):
     assert (len(times) == len(X))
     n_times, n_x = utils.shift_time(times, X, self.length)
     Y, Yobs = utils.encode_fixed_dt(n_times, n_x, self.length, self.deltaT)
     return Y, Yobs