示例#1
0
    def _process_integration(self, a):

        i, rnd, noise = a
        num_exploding = 0
        repeats = 20
        xx = {}
        x = {}
        for l in self.fit_mat.keys():
            xx[l] = np.zeros((repeats, self.input_pcs.shape[0]))
            if self.delay_model:
                xx[l][:self.delay, :] = rnd[l].T
            else:
                xx[l][0, :] = rnd[l]

            x[l] = np.zeros((self.int_length, self.input_pcs.shape[0]))
            if self.delay_model:
                x[l][:self.delay, :] = xx[l][:self.delay, :]
            else:
                x[l][0, :] = xx[l][0, :]

        step0 = 0
        if self.delay_model:
            step = self.delay
        else:
            step = 1
        blow_counter = 0
        zz = {}
        for n in range(repeats*int(np.ceil(self.int_length/repeats))):
            for k in range(1, repeats):
                if (self.delay_model and k < self.delay) and step == self.delay:
                    continue
                if blow_counter >= 10:
                    raise Exception("Model blowed up 10 times.")
                if step >= self.int_length:
                    break
                # prepare predictors
                for l in self.fit_mat.keys():
                    zz[l] = xx[0][k-1, :]
                    for lr in range(l):
                        zz[l] = np.r_[zz[l], xx[lr+1][k-1, :]]
                for l in self.fit_mat.keys():
                    if l == 0:
                        if self.quad:
                            q = np.tril(np.outer(zz[l].T, zz[l]), -1)
                            quad_pred = q[np.nonzero(q)]
                        if self.delay_model:
                            zz[l] = np.tanh(self.kappa * x[l][step - self.delay, :])
                        if self.harmonic_pred in ['all', 'first']:
                            if self.quad:
                                zz[l] = np.r_[quad_pred, zz[l], zz[l]*self.xsin[step], zz[l]*self.xcos[step], 
                                    self.xsin[step], self.xcos[step], 1]
                            else:
                                zz[l] = np.r_[zz[l], zz[l]*self.xsin[step], zz[l]*self.xcos[step], 
                                    self.xsin[step], self.xcos[step], 1]
                        else:
                            if self.quad:
                                zz[l] = np.r_[quad_pred, zz[l], 1]
                            else:
                                zz[l] = np.r_[zz[l], 1]
                    else:
                        if self.harmonic_pred == 'all':
                            zz[l] = np.r_[zz[l], zz[l]*self.xsin[step], zz[l]*self.xcos[step], 
                                    self.xsin[step], self.xcos[step], 1]
                        else:
                            zz[l] = np.r_[zz[l], 1]

                if 'cond' in self.noise_type:
                    n_PCs = 1
                    n_samples = 100
                    if not self.combined:
                        ndx = np.argsort(np.sum(np.power(self.pcs[:, :n_PCs] - xx[k-1, :n_PCs], 2), axis = 1))
                        Q = np.cov(self.last_level_res[ndx[:n_samples], :], rowvar = 0)
                        self.rr = np.linalg.cholesky(Q).T
                    elif self.combined:
                        ndx1 = np.argsort(np.sum(np.power(self.pcs[:, :n_PCs] - xx[k-1, :n_PCs], 2), axis = 1))
                        ndx2 = np.argsort(np.sum(np.power(self.pcs[:, self.no_input_ts:self.no_input_ts+n_PCs] - xx[k-1, self.no_input_ts:self.no_input_ts+n_PCs], 2), axis = 1))
                        res1 = self.last_level_res[ndx1[:n_samples], :]
                        res2 = self.last_level_res[ndx2[:n_samples], :]
                        Q = np.cov(np.concatenate((res1, res2), axis = 0), rowvar = 0)
                        self.rr = np.linalg.cholesky(Q).T
                
                # integration step
                for l in sorted(self.fit_mat, reverse = True):
                    if (l == self.no_levels-1):
                        forcing = np.dot(self.rr, np.random.normal(0,self.sigma,(self.rr.shape[0],)).T)
                        if 'seasonal' in self.noise_type:
                            forcing *= self.rr_last_std_ts[step%self.rr_last_std_ts.shape[0], :]
                    else:
                        forcing = xx[l+1][k, :]
                    xx[l][k, :] = xx[l][k-1, :] + np.dot(zz[l], self.fit_mat[l]) + forcing
                    # xx[l][k, :] = xx[l][k-1, :] + self.regressor.predict(zz[l]) + forcing

                step += 1

            # check if integration blows
            if np.amax(np.abs(xx[0])) <= 2*self.maxpc and not np.any(np.isnan(xx[0])):
                for l in self.fit_mat.keys():
                    x[l][step-repeats + 1 : step, :] = xx[l][1:, :]
                    # set first to last
                    xx[l][0, :] = xx[l][-1, :]
            else:
                for l in self.fit_mat.keys():
                    if l == 0:
                        xx[l][0, :] = np.dot(np.random.normal(0, self.sigma, (self.input_pcs.shape[0],)), self.diagpc)
                    else:
                        xx[l][0, :] = np.dot(np.random.normal(0, self.sigma, (self.input_pcs.shape[0],)), self.diagres[l-1])
                if step != step0:
                    num_exploding += 1
                    step0 = step
                step -= repeats + 1
                blow_counter += 1

        x = x[0].copy()

        # center
        x -= np.mean(x, axis = 0)

        # preserve total energy level
        x *= np.sqrt(np.sum(self.varpc)/np.sum(np.var(x, axis = 0, ddof = 1)))

        if self.diagnostics:
            xm = np.mean(x, axis = 0)
            xv = np.var(x, axis = 0, ddof = 1)
            xs = sts.skew(x, axis = 0)
            xk = sts.kurtosis(x, axis = 0)

            lc = np.zeros((2*self.max_lag + 1, self.input_pcs.shape[0]))
            kden = np.zeros((100, self.input_pcs.shape[0], 2))
            for k in range(self.input_pcs.shape[0]):
                lc[:, k] = cross_correlation(x[:, k], x[:, k], max_lag = self.max_lag)
                kden[:, k, 0], kden[:, k, 1] = kdensity_estimate(x[:, k], kernel = 'epanechnikov')
            ict = np.sum(np.abs(lc), axis = 0)

            return i, x, num_exploding, xm, xv, xs, xk, lc, kden, ict

        else:
            return i, x, num_exploding
示例#2
0
    def _process_integration(self, a):

        i, rnd, noise = a
        num_exploding = 0
        repeats = 20
        xx = {}
        x = {}
        for l in self.fit_mat.keys():
            xx[l] = np.zeros((repeats, self.input_pcs.shape[0]))
            if self.delay_model:
                xx[l][:self.delay, :] = rnd[l].T
            else:
                xx[l][0, :] = rnd[l]

            x[l] = np.zeros((self.int_length, self.input_pcs.shape[0]))
            if self.delay_model:
                x[l][:self.delay, :] = xx[l][:self.delay, :]
            else:
                x[l][0, :] = xx[l][0, :]

        step0 = 0
        if self.delay_model:
            step = self.delay
        else:
            step = 1
        blow_counter = 0
        zz = {}
        for n in range(repeats * int(np.ceil(self.int_length / repeats))):
            for k in range(1, repeats):
                if (self.delay_model
                        and k < self.delay) and step == self.delay:
                    continue
                if blow_counter >= 10:
                    raise Exception("Model blowed up 10 times.")
                if step >= self.int_length:
                    break
                # prepare predictors
                for l in self.fit_mat.keys():
                    zz[l] = xx[0][k - 1, :]
                    for lr in range(l):
                        zz[l] = np.r_[zz[l], xx[lr + 1][k - 1, :]]
                for l in self.fit_mat.keys():
                    if l == 0:
                        if self.quad:
                            q = np.tril(np.outer(zz[l].T, zz[l]), -1)
                            quad_pred = q[np.nonzero(q)]
                        if self.delay_model:
                            zz[l] = np.tanh(self.kappa *
                                            x[l][step - self.delay, :])
                        if self.harmonic_pred in ['all', 'first']:
                            if self.quad:
                                zz[l] = np.r_[quad_pred, zz[l],
                                              zz[l] * self.xsin[step],
                                              zz[l] * self.xcos[step],
                                              self.xsin[step], self.xcos[step],
                                              1]
                            else:
                                zz[l] = np.r_[zz[l], zz[l] * self.xsin[step],
                                              zz[l] * self.xcos[step],
                                              self.xsin[step], self.xcos[step],
                                              1]
                        else:
                            if self.quad:
                                zz[l] = np.r_[quad_pred, zz[l], 1]
                            else:
                                zz[l] = np.r_[zz[l], 1]
                    else:
                        if self.harmonic_pred == 'all':
                            zz[l] = np.r_[zz[l], zz[l] * self.xsin[step],
                                          zz[l] * self.xcos[step],
                                          self.xsin[step], self.xcos[step], 1]
                        else:
                            zz[l] = np.r_[zz[l], 1]

                if 'cond' in self.noise_type:
                    n_PCs = 1
                    n_samples = 100
                    if not self.combined:
                        ndx = np.argsort(
                            np.sum(np.power(
                                self.pcs[:, :n_PCs] - xx[k - 1, :n_PCs], 2),
                                   axis=1))
                        Q = np.cov(self.last_level_res[ndx[:n_samples], :],
                                   rowvar=0)
                        self.rr = np.linalg.cholesky(Q).T
                    elif self.combined:
                        ndx1 = np.argsort(
                            np.sum(np.power(
                                self.pcs[:, :n_PCs] - xx[k - 1, :n_PCs], 2),
                                   axis=1))
                        ndx2 = np.argsort(
                            np.sum(np.power(
                                self.pcs[:, self.no_input_ts:self.no_input_ts +
                                         n_PCs] -
                                xx[k - 1,
                                   self.no_input_ts:self.no_input_ts + n_PCs],
                                2),
                                   axis=1))
                        res1 = self.last_level_res[ndx1[:n_samples], :]
                        res2 = self.last_level_res[ndx2[:n_samples], :]
                        Q = np.cov(np.concatenate((res1, res2), axis=0),
                                   rowvar=0)
                        self.rr = np.linalg.cholesky(Q).T

                # integration step
                for l in sorted(self.fit_mat, reverse=True):
                    if (l == self.no_levels - 1):
                        forcing = np.dot(
                            self.rr,
                            np.random.normal(0, self.sigma,
                                             (self.rr.shape[0], )).T)
                        if 'seasonal' in self.noise_type:
                            forcing *= self.rr_last_std_ts[
                                step % self.rr_last_std_ts.shape[0], :]
                    else:
                        forcing = xx[l + 1][k, :]
                    xx[l][k, :] = xx[l][k - 1, :] + np.dot(
                        zz[l], self.fit_mat[l]) + forcing
                    # xx[l][k, :] = xx[l][k-1, :] + self.regressor.predict(zz[l]) + forcing

                step += 1

            # check if integration blows
            if np.amax(np.abs(xx[0])) <= 2 * self.maxpc and not np.any(
                    np.isnan(xx[0])):
                for l in self.fit_mat.keys():
                    x[l][step - repeats + 1:step, :] = xx[l][1:, :]
                    # set first to last
                    xx[l][0, :] = xx[l][-1, :]
            else:
                for l in self.fit_mat.keys():
                    if l == 0:
                        xx[l][0, :] = np.dot(
                            np.random.normal(0, self.sigma,
                                             (self.input_pcs.shape[0], )),
                            self.diagpc)
                    else:
                        xx[l][0, :] = np.dot(
                            np.random.normal(0, self.sigma,
                                             (self.input_pcs.shape[0], )),
                            self.diagres[l - 1])
                if step != step0:
                    num_exploding += 1
                    step0 = step
                step -= repeats + 1
                blow_counter += 1

        x = x[0].copy()

        # center
        x -= np.mean(x, axis=0)

        # preserve total energy level
        x *= np.sqrt(np.sum(self.varpc) / np.sum(np.var(x, axis=0, ddof=1)))

        if self.diagnostics:
            xm = np.mean(x, axis=0)
            xv = np.var(x, axis=0, ddof=1)
            xs = sts.skew(x, axis=0)
            xk = sts.kurtosis(x, axis=0)

            lc = np.zeros((2 * self.max_lag + 1, self.input_pcs.shape[0]))
            kden = np.zeros((100, self.input_pcs.shape[0], 2))
            for k in range(self.input_pcs.shape[0]):
                lc[:, k] = cross_correlation(x[:, k],
                                             x[:, k],
                                             max_lag=self.max_lag)
                kden[:, k,
                     0], kden[:, k,
                              1] = kdensity_estimate(x[:, k],
                                                     kernel='epanechnikov')
            ict = np.sum(np.abs(lc), axis=0)

            return i, x, num_exploding, xm, xv, xs, xk, lc, kden, ict

        else:
            return i, x, num_exploding
示例#3
0
    def integrate_model(self, n_realizations, int_length = None, noise_type = 'white', sigma = 1., n_workers = 3, diagnostics = True):
        """
        Integrate trained model.
        noise_type:
        -- white - classic white noise, spatial correlation by cov. matrix of last level residuals
        -- cond - find n_samples closest to the current space in subset of n_pcs and use their cov. matrix
        -- seasonal - seasonal dependence of the residuals, fit n_harm harmonics of annual cycle, could also be used with cond.
        except 'white', one can choose more settings like ['seasonal', 'cond']
        """

        if self.verbose:
            print("preparing to integrate model...")

        pcs = self.input_pcs.copy()
        pcs = pcs.T # time x dim

        pcmax = np.amax(pcs, axis = 0)
        pcmin = np.amin(pcs, axis = 0)
        self.varpc = np.var(pcs, axis = 0, ddof = 1)
        
        self.int_length = pcs.shape[0] if int_length is None else int_length

        self.diagnostics = diagnostics

        if self.harmonic_pred in ['all', 'first']:
            if self.verbose:
                print("...using harmonic predictors (with annual frequency)...")
            self.xsin = np.sin(2*np.pi*np.arange(self.int_length) / 12.)
            self.xcos = np.cos(2*np.pi*np.arange(self.int_length) / 12.)

        if self.verbose:
            print("...preparing noise forcing...")

        self.sigma = sigma
        if isinstance(noise_type, basestring):
            if noise_type not in ['white', 'cond', 'seasonal']:
                raise Exception("Unknown noise type to be used as forcing. Use 'white', 'cond', or 'seasonal'.")
        elif isinstance(noise_type, list):
            noise_type = frozenset(noise_type)
            if not noise_type.issubset(set(['white', 'cond', 'seasonal'])):
                raise Exception("Unknown noise type to be used as forcing. Use 'white', 'cond', or 'seasonal'.")
        
        self.last_level_res = self.residuals[max(self.residuals.keys())]
        self.noise_type = noise_type
        if noise_type == 'white':
            if self.verbose:
                print("...using spatially correlated white noise...")
            Q = np.cov(self.last_level_res, rowvar = 0)
            self.rr = np.linalg.cholesky(Q).T

        if 'seasonal' in noise_type:
            n_harmonics = 5
            if self.verbose:
                print("...fitting %d harmonics to estimate seasonal modulation of last level's residual..." % n_harmonics)
            if self.delay_model:
                resid_delayed = self.last_level_res[-(self.last_level_res.shape[0]//12)*12:].copy()
                rr_last = np.reshape(resid_delayed, (12, self.last_level_res.shape[0]//12, self.last_level_res.shape[1]), order = 'F')
            else:
                rr_last = np.reshape(self.last_level_res, (12, self.last_level_res.shape[0]//12, self.last_level_res.shape[1]), order = 'F')
            rr_last_std = np.nanstd(rr_last, axis = 1, ddof = 1)
            predictors = np.zeros((12, 2*n_harmonics + 1))
            for nh in range(n_harmonics):
                predictors[:, 2*nh] = np.cos(2*np.pi*(nh+1)*np.arange(12) / 12)
                predictors[:, 2*nh+1] = np.sin(2*np.pi*(nh+1)*np.arange(12) / 12)
            predictors[:, -1] = np.ones((12,))
            bamp = np.zeros((predictors.shape[1], pcs.shape[1]))
            for k in range(bamp.shape[1]):
                bamp[:, k] = np.linalg.lstsq(predictors, rr_last_std[:, k])[0]
            rr_last_std_ts = np.dot(predictors, bamp)
            self.rr_last_std_ts = np.repeat(rr_last_std_ts, repeats = self.last_level_res.shape[0]//12, axis = 0)
            if self.delay_model:
                resid_delayed /= self.rr_last_std_ts
                Q = np.cov(resid_delayed, rowvar = 0)
            else:
                self.last_level_res /= self.rr_last_std_ts
                Q = np.cov(self.last_level_res, rowvar = 0)

            self.rr = np.linalg.cholesky(Q).T


        if diagnostics:
            if self.verbose:
                print("...running diagnostics for the data...")
            # ACF, kernel density, integral corr. timescale for data
            self.max_lag = 50
            lag_cors = np.zeros((2*self.max_lag + 1, pcs.shape[1]))
            kernel_densities = np.zeros((100, pcs.shape[1], 2))
            for k in range(pcs.shape[1]):
                lag_cors[:, k] = cross_correlation(pcs[:, k], pcs[:, k], max_lag = self.max_lag)
                kernel_densities[:, k, 0], kernel_densities[:, k, 1] = kdensity_estimate(pcs[:, k], kernel = 'epanechnikov')
            integral_corr_timescale = np.sum(np.abs(lag_cors), axis = 0)

            # init for integrations
            lag_cors_int = np.zeros([n_realizations] + list(lag_cors.shape))
            kernel_densities_int = np.zeros([n_realizations] + list(kernel_densities.shape))
            stat_moments_int = np.zeros((4, n_realizations, pcs.shape[1])) # mean, variance, skewness, kurtosis
            int_corr_scale_int = np.zeros((n_realizations, pcs.shape[1]))

        self.diagpc = np.diag(np.std(pcs, axis = 0, ddof = 1))
        self.maxpc = np.amax(np.abs(pcs))
        self.diagres = {}
        self.maxres = {}
        for l in self.residuals.keys():
            self.diagres[l] = np.diag(np.std(self.residuals[l], axis = 0, ddof = 1))
            self.maxres[l] = np.amax(np.abs(self.residuals[l]))

        self.pcs = pcs

        if n_workers > 1:
            # from multiprocessing import Pool
            from pathos.multiprocessing import ProcessingPool
            pool = ProcessingPool(n_workers)
            map_func = pool.amap
            if self.verbose:
                print("...running integration of %d realizations using %d workers..." % (n_realizations, n_workers))
        else:
            map_func = map
            if self.verbose:
                print("...running integration of %d realizations single threaded..." % n_realizations)

        rnds = []
        for n in range(n_realizations):
            r = {}
            for l in self.fit_mat.keys():
                if l == 0:
                    if self.delay_model:
                        r[l] = np.dot(self.diagpc, np.random.normal(0, sigma, (pcs.shape[1], self.delay)))
                    else:
                        r[l] = np.dot(np.random.normal(0, sigma, (pcs.shape[1],)), self.diagpc)
                else:
                    if self.delay_model:
                        r[l] = np.dot(self.diagres[l-1], np.random.normal(0, sigma, (pcs.shape[1], self.delay)))
                    else:
                        r[l] = np.dot(np.random.normal(0, sigma, (pcs.shape[1],)), self.diagres[l-1])
            rnds.append(r)
        args = [[i, rnd, noise_type] for i, rnd in zip(range(n_realizations), rnds)]
        results = map_func(self._process_integration, args)

        del args
        if n_workers > 1:
            pool.close()

        self.integration_results = np.zeros((n_realizations, pcs.shape[1], self.int_length))
        self.num_exploding = np.zeros((n_realizations,))

        if n_workers > 1:
            results = results.get()

        if self.diagnostics:
            # x, num_exploding, xm, xv, xs, xk, lc, kden, ict
            for i, x, num_expl, xm, xv, xs, xk, lc, kden, ict in results:
                self.integration_results[i, ...] = x.T
                self.num_exploding[i] = num_expl
                stat_moments_int[0, i, :] = xm
                stat_moments_int[1, i, :] = xv
                stat_moments_int[2, i, :] = xs
                stat_moments_int[3, i, :] = xk
                lag_cors_int[i, ...] = lc
                kernel_densities_int[i, ...] = kden
                int_corr_scale_int[i, ...] = ict
        else:
            for i, x, num_expl in results:
                self.integration_results[i, ...] = x.T
                self.num_exploding[i] = num_expl

        if self.verbose:
            print("...integration done, now saving results...")

        if self.verbose:
            print("...results saved to structure.")
            print("there was %d expolding integration chunks in %d realizations." % (np.sum(self.num_exploding), n_realizations))
        
        if self.diagnostics:
            if self.verbose:
                print("plotting diagnostics...")
            
            import matplotlib.pyplot as plt
            # plot all diagnostic stuff
            ## mean, variance, skewness, kurtosis, integral corr. time scale
            t**s = ['MEAN', 'VARIANCE', 'SKEWNESS', 'KURTOSIS', 'INTEGRAL CORRELATION TIME SCALE']
            plot = [np.mean(pcs, axis = 0), np.var(pcs, axis = 0, ddof = 1), sts.skew(pcs, axis = 0), sts.kurtosis(pcs, axis = 0), integral_corr_timescale]
            xplot = np.arange(1, pcs.shape[1]+1)
            for i, tit, p in zip(range(5), t**s, plot):
                plt.figure()
                plt.title(tit, size = 20)
                plt.plot(xplot, p, linewidth = 3, color = '#3E3436')
                if i < 4:
                    plt.plot(xplot, np.percentile(stat_moments_int[i, :, :], q = 2.5, axis = 0), '--', linewidth = 2.5, color = '#EA3E36')
                    plt.plot(xplot, np.percentile(stat_moments_int[i, :, :], q = 97.5, axis = 0), '--', linewidth = 2.5, color = '#EA3E36')
                else:
                    plt.plot(xplot, np.percentile(int_corr_scale_int, q = 2.5, axis = 0), '--', linewidth = 2.5, color = '#EA3E36')
                    plt.plot(xplot, np.percentile(int_corr_scale_int, q = 97.5, axis = 0), '--', linewidth = 2.5, color = '#EA3E36')
                plt.xlabel("# PC", size = 15)
                plt.xlim([xplot[0], xplot[-1]])
                plt.show()
                plt.close()

            ## lagged correlations, PDF - plot first 9 PCs (or less if input number of pcs is < 9)
            t**s = ['AUTOCORRELATION', 'PDF']
            plot = [[lag_cors, lag_cors_int], [kernel_densities, kernel_densities_int]]
            xlabs = ['LAG', '']
            for i, tit, p, xlab in zip(range(2), t**s, plot, xlabs):
                plt.figure()
                plt.suptitle(tit, size = 25)
                no_plts = 9 if self.no_input_ts > 9 else self.no_input_ts
                for sub in range(0,no_plts):
                    plt.subplot(3, 3, sub+1)
                    if i == 0:
                        xplt = np.arange(0, self.max_lag+1)
                        plt.plot(xplt, p[0][p[0].shape[0]//2:, sub], linewidth = 3, color = '#3E3436')
                        plt.plot(xplt, np.percentile(p[1][:, p[0].shape[0]//2:, sub], q = 2.5, axis = 0), '--', linewidth = 2.5, color = '#EA3E36')
                        plt.plot(xplt, np.percentile(p[1][:, p[0].shape[0]//2:, sub], q = 97.5, axis = 0), '--', linewidth = 2.5, color = '#EA3E36')
                        plt.xlim([xplt[0], xplt[-1]])
                    else:
                        plt.plot(p[0][:, sub, 0], p[0][:, sub, 1], linewidth = 3, color = '#3E3436')
                        plt.plot(p[1][0, :, sub, 0], np.percentile(p[1][:, :, sub, 1], q = 2.5, axis = 0), '--', linewidth = 2.5, color = '#EA3E36')
                        plt.plot(p[1][0, :, sub, 0], np.percentile(p[1][:, :, sub, 1], q = 97.5, axis = 0), '--', linewidth = 2.5, color = '#EA3E36')
                        plt.xlim([p[0][0, sub, 0], p[0][-1, sub, 0]])
                    plt.xlabel(xlab, size = 15)
                    plt.title("PC %d" % (int(sub)+1), size = 20)
                # plt.tight_layout()
                plt.show()
                plt.close()
示例#4
0
    def integrate_model(self,
                        n_realizations,
                        int_length=None,
                        noise_type='white',
                        sigma=1.,
                        n_workers=3,
                        diagnostics=True):
        """
        Integrate trained model.
        noise_type:
        -- white - classic white noise, spatial correlation by cov. matrix of last level residuals
        -- cond - find n_samples closest to the current space in subset of n_pcs and use their cov. matrix
        -- seasonal - seasonal dependence of the residuals, fit n_harm harmonics of annual cycle, could also be used with cond.
        except 'white', one can choose more settings like ['seasonal', 'cond']
        """

        if self.verbose:
            print("preparing to integrate model...")

        pcs = self.input_pcs.copy()
        pcs = pcs.T  # time x dim

        pcmax = np.amax(pcs, axis=0)
        pcmin = np.amin(pcs, axis=0)
        self.varpc = np.var(pcs, axis=0, ddof=1)

        self.int_length = pcs.shape[0] if int_length is None else int_length

        self.diagnostics = diagnostics

        if self.harmonic_pred in ['all', 'first']:
            if self.verbose:
                print(
                    "...using harmonic predictors (with annual frequency)...")
            self.xsin = np.sin(2 * np.pi * np.arange(self.int_length) / 12.)
            self.xcos = np.cos(2 * np.pi * np.arange(self.int_length) / 12.)

        if self.verbose:
            print("...preparing noise forcing...")

        self.sigma = sigma
        if isinstance(noise_type, basestring):
            if noise_type not in ['white', 'cond', 'seasonal']:
                raise Exception(
                    "Unknown noise type to be used as forcing. Use 'white', 'cond', or 'seasonal'."
                )
        elif isinstance(noise_type, list):
            noise_type = frozenset(noise_type)
            if not noise_type.issubset(set(['white', 'cond', 'seasonal'])):
                raise Exception(
                    "Unknown noise type to be used as forcing. Use 'white', 'cond', or 'seasonal'."
                )

        self.last_level_res = self.residuals[max(self.residuals.keys())]
        self.noise_type = noise_type
        if noise_type == 'white':
            if self.verbose:
                print("...using spatially correlated white noise...")
            Q = np.cov(self.last_level_res, rowvar=0)
            self.rr = np.linalg.cholesky(Q).T

        if 'seasonal' in noise_type:
            n_harmonics = 5
            if self.verbose:
                print(
                    "...fitting %d harmonics to estimate seasonal modulation of last level's residual..."
                    % n_harmonics)
            if self.delay_model:
                resid_delayed = self.last_level_res[-(
                    self.last_level_res.shape[0] // 12) * 12:].copy()
                rr_last = np.reshape(resid_delayed,
                                     (12, self.last_level_res.shape[0] // 12,
                                      self.last_level_res.shape[1]),
                                     order='F')
            else:
                rr_last = np.reshape(self.last_level_res,
                                     (12, self.last_level_res.shape[0] // 12,
                                      self.last_level_res.shape[1]),
                                     order='F')
            rr_last_std = np.nanstd(rr_last, axis=1, ddof=1)
            predictors = np.zeros((12, 2 * n_harmonics + 1))
            for nh in range(n_harmonics):
                predictors[:, 2 * nh] = np.cos(2 * np.pi * (nh + 1) *
                                               np.arange(12) / 12)
                predictors[:, 2 * nh + 1] = np.sin(2 * np.pi * (nh + 1) *
                                                   np.arange(12) / 12)
            predictors[:, -1] = np.ones((12, ))
            bamp = np.zeros((predictors.shape[1], pcs.shape[1]))
            for k in range(bamp.shape[1]):
                bamp[:, k] = np.linalg.lstsq(predictors, rr_last_std[:, k])[0]
            rr_last_std_ts = np.dot(predictors, bamp)
            self.rr_last_std_ts = np.repeat(
                rr_last_std_ts,
                repeats=self.last_level_res.shape[0] // 12,
                axis=0)
            if self.delay_model:
                resid_delayed /= self.rr_last_std_ts
                Q = np.cov(resid_delayed, rowvar=0)
            else:
                self.last_level_res /= self.rr_last_std_ts
                Q = np.cov(self.last_level_res, rowvar=0)

            self.rr = np.linalg.cholesky(Q).T

        if diagnostics:
            if self.verbose:
                print("...running diagnostics for the data...")
            # ACF, kernel density, integral corr. timescale for data
            self.max_lag = 50
            lag_cors = np.zeros((2 * self.max_lag + 1, pcs.shape[1]))
            kernel_densities = np.zeros((100, pcs.shape[1], 2))
            for k in range(pcs.shape[1]):
                lag_cors[:, k] = cross_correlation(pcs[:, k],
                                                   pcs[:, k],
                                                   max_lag=self.max_lag)
                kernel_densities[:, k,
                                 0], kernel_densities[:, k,
                                                      1] = kdensity_estimate(
                                                          pcs[:, k],
                                                          kernel='epanechnikov'
                                                      )
            integral_corr_timescale = np.sum(np.abs(lag_cors), axis=0)

            # init for integrations
            lag_cors_int = np.zeros([n_realizations] + list(lag_cors.shape))
            kernel_densities_int = np.zeros([n_realizations] +
                                            list(kernel_densities.shape))
            stat_moments_int = np.zeros(
                (4, n_realizations,
                 pcs.shape[1]))  # mean, variance, skewness, kurtosis
            int_corr_scale_int = np.zeros((n_realizations, pcs.shape[1]))

        self.diagpc = np.diag(np.std(pcs, axis=0, ddof=1))
        self.maxpc = np.amax(np.abs(pcs))
        self.diagres = {}
        self.maxres = {}
        for l in self.residuals.keys():
            self.diagres[l] = np.diag(np.std(self.residuals[l], axis=0,
                                             ddof=1))
            self.maxres[l] = np.amax(np.abs(self.residuals[l]))

        self.pcs = pcs

        if n_workers > 1:
            # from multiprocessing import Pool
            from pathos.multiprocessing import ProcessingPool
            pool = ProcessingPool(n_workers)
            map_func = pool.amap
            if self.verbose:
                print(
                    "...running integration of %d realizations using %d workers..."
                    % (n_realizations, n_workers))
        else:
            map_func = map
            if self.verbose:
                print(
                    "...running integration of %d realizations single threaded..."
                    % n_realizations)

        rnds = []
        for n in range(n_realizations):
            r = {}
            for l in self.fit_mat.keys():
                if l == 0:
                    if self.delay_model:
                        r[l] = np.dot(
                            self.diagpc,
                            np.random.normal(0, sigma,
                                             (pcs.shape[1], self.delay)))
                    else:
                        r[l] = np.dot(
                            np.random.normal(0, sigma, (pcs.shape[1], )),
                            self.diagpc)
                else:
                    if self.delay_model:
                        r[l] = np.dot(
                            self.diagres[l - 1],
                            np.random.normal(0, sigma,
                                             (pcs.shape[1], self.delay)))
                    else:
                        r[l] = np.dot(
                            np.random.normal(0, sigma, (pcs.shape[1], )),
                            self.diagres[l - 1])
            rnds.append(r)
        args = [[i, rnd, noise_type]
                for i, rnd in zip(range(n_realizations), rnds)]
        results = map_func(self._process_integration, args)

        del args
        if n_workers > 1:
            pool.close()

        self.integration_results = np.zeros(
            (n_realizations, pcs.shape[1], self.int_length))
        self.num_exploding = np.zeros((n_realizations, ))

        if n_workers > 1:
            results = results.get()

        if self.diagnostics:
            # x, num_exploding, xm, xv, xs, xk, lc, kden, ict
            for i, x, num_expl, xm, xv, xs, xk, lc, kden, ict in results:
                self.integration_results[i, ...] = x.T
                self.num_exploding[i] = num_expl
                stat_moments_int[0, i, :] = xm
                stat_moments_int[1, i, :] = xv
                stat_moments_int[2, i, :] = xs
                stat_moments_int[3, i, :] = xk
                lag_cors_int[i, ...] = lc
                kernel_densities_int[i, ...] = kden
                int_corr_scale_int[i, ...] = ict
        else:
            for i, x, num_expl in results:
                self.integration_results[i, ...] = x.T
                self.num_exploding[i] = num_expl

        if self.verbose:
            print("...integration done, now saving results...")

        if self.verbose:
            print("...results saved to structure.")
            print(
                "there was %d expolding integration chunks in %d realizations."
                % (np.sum(self.num_exploding), n_realizations))

        if self.diagnostics:
            if self.verbose:
                print("plotting diagnostics...")

            import matplotlib.pyplot as plt
            # plot all diagnostic stuff
            ## mean, variance, skewness, kurtosis, integral corr. time scale
            t**s = [
                'MEAN', 'VARIANCE', 'SKEWNESS', 'KURTOSIS',
                'INTEGRAL CORRELATION TIME SCALE'
            ]
            plot = [
                np.mean(pcs, axis=0),
                np.var(pcs, axis=0, ddof=1),
                sts.skew(pcs, axis=0),
                sts.kurtosis(pcs, axis=0), integral_corr_timescale
            ]
            xplot = np.arange(1, pcs.shape[1] + 1)
            for i, tit, p in zip(range(5), t**s, plot):
                plt.figure()
                plt.title(tit, size=20)
                plt.plot(xplot, p, linewidth=3, color='#3E3436')
                if i < 4:
                    plt.plot(xplot,
                             np.percentile(stat_moments_int[i, :, :],
                                           q=2.5,
                                           axis=0),
                             '--',
                             linewidth=2.5,
                             color='#EA3E36')
                    plt.plot(xplot,
                             np.percentile(stat_moments_int[i, :, :],
                                           q=97.5,
                                           axis=0),
                             '--',
                             linewidth=2.5,
                             color='#EA3E36')
                else:
                    plt.plot(xplot,
                             np.percentile(int_corr_scale_int, q=2.5, axis=0),
                             '--',
                             linewidth=2.5,
                             color='#EA3E36')
                    plt.plot(xplot,
                             np.percentile(int_corr_scale_int, q=97.5, axis=0),
                             '--',
                             linewidth=2.5,
                             color='#EA3E36')
                plt.xlabel("# PC", size=15)
                plt.xlim([xplot[0], xplot[-1]])
                plt.show()
                plt.close()

            ## lagged correlations, PDF - plot first 9 PCs (or less if input number of pcs is < 9)
            t**s = ['AUTOCORRELATION', 'PDF']
            plot = [[lag_cors, lag_cors_int],
                    [kernel_densities, kernel_densities_int]]
            xlabs = ['LAG', '']
            for i, tit, p, xlab in zip(range(2), t**s, plot, xlabs):
                plt.figure()
                plt.suptitle(tit, size=25)
                no_plts = 9 if self.no_input_ts > 9 else self.no_input_ts
                for sub in range(0, no_plts):
                    plt.subplot(3, 3, sub + 1)
                    if i == 0:
                        xplt = np.arange(0, self.max_lag + 1)
                        plt.plot(xplt,
                                 p[0][p[0].shape[0] // 2:, sub],
                                 linewidth=3,
                                 color='#3E3436')
                        plt.plot(xplt,
                                 np.percentile(p[1][:, p[0].shape[0] // 2:,
                                                    sub],
                                               q=2.5,
                                               axis=0),
                                 '--',
                                 linewidth=2.5,
                                 color='#EA3E36')
                        plt.plot(xplt,
                                 np.percentile(p[1][:, p[0].shape[0] // 2:,
                                                    sub],
                                               q=97.5,
                                               axis=0),
                                 '--',
                                 linewidth=2.5,
                                 color='#EA3E36')
                        plt.xlim([xplt[0], xplt[-1]])
                    else:
                        plt.plot(p[0][:, sub, 0],
                                 p[0][:, sub, 1],
                                 linewidth=3,
                                 color='#3E3436')
                        plt.plot(p[1][0, :, sub, 0],
                                 np.percentile(p[1][:, :, sub, 1],
                                               q=2.5,
                                               axis=0),
                                 '--',
                                 linewidth=2.5,
                                 color='#EA3E36')
                        plt.plot(p[1][0, :, sub, 0],
                                 np.percentile(p[1][:, :, sub, 1],
                                               q=97.5,
                                               axis=0),
                                 '--',
                                 linewidth=2.5,
                                 color='#EA3E36')
                        plt.xlim([p[0][0, sub, 0], p[0][-1, sub, 0]])
                    plt.xlabel(xlab, size=15)
                    plt.title("PC %d" % (int(sub) + 1), size=20)
                # plt.tight_layout()
                plt.show()
                plt.close()