def fit(self, time_coefficients, y):
        p = 1  #VAR model order
        K = 3  #Number of states
        wlen = 50  #Window length
        shift = 1  #Window shift
        min_r_len = 5  #Minimum length for each regime
        N, T = y.shape

        C, St_km = self._vns_clustering(time_coefficients.T, K)
        # Pooling samples for regimes
        St = np.zeros((N, T, K))
        tj = np.zeros(K, dtype="i")
        for j in range(K):
            t = 0
            for i in range(T):
                if St_km[i] == j:
                    St[:, t, j] = y[:, i]
                    t = t + 1
            tj[j] = t - 1

        # Estimate state-specific VAR
        A_km = np.zeros((N, N * p, K))
        for j in range(K):
            A_km[:, :,
                 j] = var_model.VAR(St[:, :tj[j],
                                       j].T).fit(maxlags=p,
                                                 method="ols",
                                                 ic=None,
                                                 trend="nc").params  #Fit a VAR

        self.cluster_centres = C
        self.clustered_coefficients = St_km
        self.expanded_time_series = St
        self.length_by_cluster = tj
        self.time_varying_states_var_coefficients = A_km
Beispiel #2
0
    def transform(self, seriess, debug):
        lhs, rhs = seriess[0], seriess[1]
        NodeTransformer.validate_input_steps_spans(lhs, rhs)
        if(lhs.span() != rhs.span()):
            raise ValueError("Inputs must have same span")

        endog = np.transpose([s.pdseries.tolist() for s in seriess])

        p, output = self.get_params()
        calc_p = timedelta_to_period(p, seriess[0].step())

        model = var_model.VAR(endog)
        model_fit = model.fit(calc_p)

        result = None
        if output == 'predicted':
            result = model_fit.fittedvalues
        elif output == 'resid':
            result = model_fit.resid
        else:
            raise ValueError('Invalid output: ' + output)

        # Drop offset_start elements
        offset_start = len(seriess[0].pdseries.index) - len(result)
        # Debug info
        if debug:
            debug_info = {
                "summary": str(model_fit.summary()),
                "offset_start": offset_start,
            }
        else:
            debug_info = {}
        result = pd.Series([np.sum(np.abs(r)) for r in result], index=seriess[0].pdseries.index[offset_start:])
        return (result, debug_info)
    def fit(self, y):
        p = 1  #VAR model order
        K = 3  #Number of states
        wlen = 50  #Window length
        shift = 1  #Window shift
        min_r_len = 5  #Minimum length for each regime
        N, T = y.shape
        tvvar_vec = np.zeros((p * N**2, T))  #TV-VAR coeffs
        win = np.ones(wlen)  #form a window

        # initialize the indexes
        indx = 0
        t = 0
        Yw = np.zeros((N, wlen))

        # Short-Time VAR Analysis
        while indx + wlen <= T:
            for i in range(N):
                Yw[i, :] = y[i, indx:indx + wlen] * win.T
            At = var_model.VAR(Yw.T).fit(maxlags=p,
                                         method="ols",
                                         ic=None,
                                         trend="nc").params  #Fit a VAR
            tvvar_vec[:, t] = At[:].ravel()  #update time-varying matrix
            indx = indx + shift
            t = t + 1  #update the indexes
        self.coefficients = tvvar_vec
def ar_stability(data, order, n_eigs):
    '''
    Builds an vector autoregressive (VAR) model of iEEG data,
    constructs a state matrix, and returns first n_eigs eigenvalues
    of this matrix.
    :param data:
    :param order:
    :param n_eigs:
    :return:
    '''
    n, N = data.shape # N : number of channels, n: number of observations
    model = var_model.VAR(data)
    mvar_fit = model.fit(order)
    A = mvar_fit.coefs

    # build the state matrix
    top = np.concatenate(A,axis=1)
    bottom = np.eye(N*(order-1), N*order)
    state_mat = np.concatenate(np.array([top,bottom]), axis=0)

    # compute the top n_eigs largest eigenvalues
    eigs = linalg.eig(state_mat,right=False, overwrite_a=True)
    abs_eigs = np.abs(eigs)
    abs_eigs =np.sort(abs_eigs)
    return abs_eigs[-n_eigs:]
Beispiel #5
0
    def __init__(self, nid, connection=None, freq='T', maxlags=_maxlags):
        super().__init__(nid, connection, freq)

        endog = self.readings.dropna()
        if endog.empty:
            raise AttributeError('Empty endogenous variable')
        try:
            model = vm.VAR(endog=endog)
            fit = model.fit(maxlags=maxlags)
            self.irf = fit.irf(maxlags)
        except (LinAlgError, ValueError) as err:
            raise AttributeError(err)
Beispiel #6
0
def var(df, lags = -1, **kwargs):

    model = var_model.VAR(df, dates=df.index, freq="Q")

    if lags > 0:
        print(f"Using given lag order ({lags})...")
        results = model.fit(lags, verbose=True, **kwargs)
        
    else:
        print("Finding optimum lag order...")
        results = model.fit(verbose=True, **kwargs) 

    return results
Beispiel #7
0
def var_(*args):
    (_, nid, cargs) = args

    node = nd.Cluster(nid)
    log.info('var: {0}'.format(str(node)))
    endog = node.readings.dropna()
    if not endog.empty and cargs.lags:
        maxlags = max(cargs.lags)
        try:
            res = vm.VAR(endog=endog).fit(maxlags=maxlags)
            mkplot(node, 'var', res, cargs.output, maxlags)
        except (LinAlgError, ValueError) as err:
            log.error(err)
Beispiel #8
0
 def fit(self, training_df=None):
     """This function fits the loaded model
     Returns:
         model_fit (model): The trained model
     """
     # First load the training data
     if training_df is None:
         train = self.training_data[:int(0.8 * (len(self.training_data)))]
     else:
         train = training_df[:int(0.8 * (len(training_df)))]
         self.training_data = training_df
     # Now fit the model
     model = var_model.VAR(endog=train)
     self.ml_model = model.fit()
     return self.ml_model
Beispiel #9
0
def select_lr(df, maxlags = 15, **kwargs):

    lag_list = make_lag_iter(maxlags)

    model = var_model.VAR(df, dates=df.index, freq="Q")

    for more_l, less_l in lag_list:
        big_model = model.fit(more_l, **kwargs)
        small_model = model.fit(less_l, **kwargs)

        reject = lr_test(big_model.llf, small_model.llf, big_model.df_resid)

        if reject:
            return more_l

    else:
        raise ValueError("Never rejected, increase maxlags")
        
Beispiel #10
0
def forecast_stock_price_var(stock_prices, time_horizon, max_order=5):

    # transpose this sucker
    stock_prices_t = np.transpose(stock_prices)

    # select optimal order
    var_mod = var_model.VAR(stock_prices_t)
    order = var_mod.select_order(max_order)
    opt_order = order['aic']

    # fit data with optimal order
    results = var_mod.fit(maxlags=opt_order)

    # predict price to time horizon
    forecasted_price_t = results.forecast(stock_prices_t, time_horizon)
    forecasted_price = np.transpose(forecasted_price_t)

    return forecasted_price
def mvar_pred_err(data, pred_window_size=1000, order=30):
    '''
    Fits a multivatiate autoregressive model to data
    :param data:
    :param pred_window_size:
    :param order:
    :return:
    '''
    model_var = var_model.VAR(data[:-pred_window_size, :])
    model_results = model_var.fit(maxlags=order)
    pred_data = np.asarray(
        model_results.forecast(
            data[-pred_window_size - order:-pred_window_size],
            pred_window_size))

    fun_energy = function_energy(data[-pred_window_size:, :])
    err_energy = function_energy(data[-pred_window_size:, :] - pred_data)

    return np.append(fun_energy, err_energy)
Beispiel #12
0
def rolling_estimate(args):
    data, idx, larger_constraints, less_constraints = args

    fit_lag = 100
    result_lst = []

    for i in xrange(len(data)):
        if i < fit_lag:
            continue
        print i
        this_data = data.iloc[i - fit_lag:i, :]
        var = sm.VAR(endog=this_data.values)
        result = var.fit(1, trend="nc")

        a_matrix = irf(result,
                       idx,
                       larger_constraints,
                       less_constraints,
                       2,
                       200,
                       2,
                       plot=False)
        result_lst.append(a_matrix.mean(1))

    result_ar = np.array(result_lst)

    fig = plt.figure(figsize=(20, 10))
    plt.plot(result_ar[:, 0], label="0")
    plt.plot(result_ar[:, 1], label="1")
    plt.plot(result_ar[:, 2], label="2")
    plt.plot(result_ar[:, 3], label="3")
    plt.plot(result_ar[:, 4], label="4")
    plt.plot(result_ar[:, 5], label="5")
    plt.legend()
    plt.savefig("{}_100.png".format(idx))

    with open(str(idx) + "_100", "wb") as fp:
        cp.dump(result_ar, fp)
# try pdc_dtf.py code
A_bar, sigma_bar = mv.mvar_fit(X, p)
est_total = sum(np.array([np.linalg.norm(A_bar[i],ord='fro') for i in range(p)]))
true_total = sum(np.array([np.linalg.norm(A[i],ord='fro') for i in range(p)]))
for i in range(p):
    est_power = np.linalg.norm(A_bar[i],ord='fro') / est_total
    true_power = np.linalg.norm(A[i],ord='fro') / true_total
    print "For i=" + str(i) +", the estimated regression coefficients (with norm " + str(est_power) + " | " + str(true_power) + ") : "
    print str(A_bar[i])
print "Noise variances: "
print str(sigma_bar)

# # create known VAR process with parameters A and sigma -- useful later?
# known = var_model.VARProcess(coefs=A, intercept=np.zeros(N), sigma_u=sigma)
# known.plotsim()
# print "Simulation successful"

# create the VAR with data X
model = var_model.VAR(X.T)
print "Model Successfully created"

# fit the model -- trying a method
results = model.fit(ic='aic', maxlags=10, trend="nc", verbose=True)
results.summary()


# for i in range(p):
#     print "For i=" + str(i) +", the difference of estimated and true regression coefficients: "
#     print str(A[i]-A_bar[i])
# print "Noise variances: "
# print str(sigma_bar)
Beispiel #14
0
 def varfit(self, p, S):
     return var_model.VAR(S).fit(maxlags=p,
                                 method="ols",
                                 ic=None,
                                 trend="nc").params  # Fit a VAR
Beispiel #15
0
 def var(self, input, output):
     model = var_model.VAR()
     model.fi
     model.fit(input, output)
     return model
Beispiel #16
0
    def start_params(self):
        params = np.zeros(self.k_params, dtype=np.float64)

        # A. Run a multivariate regression to get beta estimates
        endog = self.endog.copy()
        exog = self.exog.copy() if self.k_exog > 0 else None

        # Although the Kalman filter can deal with missing values in endog,
        # conditional sum of squares cannot
        if np.any(np.isnan(endog)):
            endog = endog[~np.isnan(endog)]
            if exog is not None:
                exog = exog[~np.isnan(endog)]

        # Regression effects via OLS
        exog_params = np.zeros(0)
        if self.k_exog > 0:
            exog_params = np.linalg.pinv(exog).dot(endog).T
            endog -= np.dot(exog, exog_params.T)

        # B. Run a VAR model on endog to get trend, AR parameters
        ar_params = []
        k_ar = self.k_ar if self.k_ar > 0 else 1
        mod_ar = var_model.VAR(endog)
        res_ar = mod_ar.fit(maxlags=k_ar, ic=None, trend=self.trend)
        ar_params = np.array(res_ar.params.T)
        if self.trend == 'c':
            trend_params = ar_params[:, 0]
            if self.k_ar > 0:
                ar_params = ar_params[:, 1:].ravel()
            else:
                ar_params = []
        elif self.k_ar > 0:
            ar_params = ar_params.ravel()
        else:
            ar_params = []
        endog = res_ar.resid

        # Test for stationarity
        if self.k_ar > 0 and self.enforce_stationarity:
            coefficient_matrices = (ar_params.reshape(self.k_endog * self.k_ar,
                                                      self.k_endog).T).reshape(
                                                          self.k_endog,
                                                          self.k_endog,
                                                          self.k_ar).T

            stationary = is_invertible([1] + list(-coefficient_matrices))

            if not stationary:
                raise ValueError(
                    'Non-stationary starting autoregressive'
                    ' parameters found with `enforce_stationarity`'
                    ' set to True.')

        # C. Run a VAR model on the residuals to get MA parameters
        ma_params = []
        if self.k_ma > 0:
            mod_ma = var_model.VAR(endog)
            res_ma = mod_ma.fit(maxlags=self.k_ma, ic=None, trend='nc')
            ma_params = np.array(res_ma.params.T).ravel()

            # Test for invertibility
            if self.enforce_invertibility:
                coefficient_matrices = (ma_params.reshape(
                    self.k_endog * self.k_ma,
                    self.k_endog).T).reshape(self.k_endog, self.k_endog,
                                             self.k_ma).T

                invertible = is_invertible([1] + list(-coefficient_matrices))

                if not invertible:
                    raise ValueError(
                        'Non-invertible starting moving-average'
                        ' parameters found with `enforce_stationarity`'
                        ' set to True.')

        # 1. Intercept terms
        if self.trend == 'c':
            params[self._params_trend] = trend_params

        # 2. AR terms
        params[self._params_ar] = ar_params

        # 3. MA terms
        params[self._params_ma] = ma_params

        # 4. Regression terms
        if self.mle_regression:
            params[self._params_regression] = exog_params.ravel()

        # 5. State covariance terms
        if self.error_cov_type == 'diagonal':
            params[self._params_state_cov] = res_ar.sigma_u.diagonal()
        elif self.error_cov_type == 'unstructured':
            cov_factor = np.linalg.cholesky(res_ar.sigma_u)
            params[self._params_state_cov] = (
                cov_factor[self._idx_lower_state_cov].ravel())

        # 5. Measurement error variance terms
        if self.measurement_error:
            if self.k_ma > 0:
                params[self._params_obs_cov] = res_ma.sigma_u.diagonal()
            else:
                params[self._params_obs_cov] = res_ar.sigma_u.diagonal()

        return params
def var_network(data, connections, weightType, order, n_fft=None):
    """
	Builds a networkx graph using connectivity measurements
	derived from vector autoregressive models (VAR)

	Parameters
	----------
	data : ndarray, shape (n, N)
		iEEG data with N channels and n samples 
	connections : list 
		list of either (i) integers
			the integers are the channels that will become nodes
			and the graph will be complete
		or (ii) length 2 lists of integers
			the integers correspond to directed edges
	weightType: str
		string to indicate the connectivity measurement used
		see build_network for more details
	order: int
		VAR model order 
	n_fft: int
		length of FFT in PDC, DTF computations

	Returns
	-------
	G : a weighted networkx graph

	"""

    # Notes:
    #	1) Data is centered and scaled --> other transformations?
    # 	2) Do we want to pass parameters for these transformations?

    # check that n_fft is supplied when required
    if weightType == "directed_transfer_function" or weightType == "partial_directed_coherence":
        if n_fft is None:
            raise AttributeError("n_fft is not supplied")

    # get parameters
    n, N = data.shape  # N : number of channels, n: number of observations

    # normalize the channels
    d_mean = np.reshape(np.mean(data, axis=0), (1, N))
    d_std = np.reshape(np.std(data, axis=0), (1, N))
    data = (data - d_mean) / d_std

    # fit MVAR and obtain coefficients
    model = var_model.VAR(data)
    mvar_fit = model.fit(order)
    A = mvar_fit.coefs
    sigma = np.diagonal(mvar_fit.sigma_u_mle)

    # compute connectivity measurement
    if weightType == "directed_transfer_function":
        W, freqs = DTF(A=A, sigma=sigma, n_fft=n_fft)
        keyword = "dtf"
    elif weightType == "partial_directed_coherence":
        W, freqs = PDC(A=A, n_fft=n_fft)
        keyword = "pdc"
    elif weightType == "granger_causality":
        # granger causality code from statsmodels
        keyword = "gc"

    # create directed graph
    G = nx.DiGraph()

    # create an edge list from connections
    if type(connections[0]) is list:
        edges = connections
    elif type(connections[0]) is int:
        edges = []
        for node1 in connections:
            for node2 in connections:
                if node1 != node2:
                    edges.append([node1, node2])

    # build the graph with edge weights
    G = nx.DiGraph()
    for edge in edges:
        attr = {keyword: W[edge[0], edge[1]]}
        G.add_edge(edge[0], edge[1], attr)

    return G
Beispiel #18
0
def df_granger(df, maxlags=10, test='F-test'):
    ts = df.values
    VAR_model = var_model.VAR(ts)
    results = VAR_model.fit(ic='aic', maxlags=maxlags)
    return results
Beispiel #19
0
def multi_dim_granger(X_ts, Y_ts, maxlags=5, test='F-test'):
    ts = np.hstack((X, Y))
    VAR_model = var_model.VAR(ts)
    results = VAR_model.fit(ic='aic', maxlags=maxlags)
    #return var_results.coefs
    return results
Beispiel #20
0
from statsmodels.tsa.tsatools import detrend
#%%

sub_id = 'DiAs'
proc = 'preproc'
stage = '_BP_montage_HFB_raw.fif'
picks = ['LTo1-LTo2', 'LTo5-LTo6']
sfreq = 250
tmin_crop = 0
tmax_crop = 1.75

#%%
subject = cf.Subject(sub_id)
datadir = subject.processing_stage_path(proc=proc)
visual_populations = subject.pick_visual_chan()
ts, time = hf.chan_specific_category_ts(picks,
                                        sub_id=sub_id,
                                        proc=proc,
                                        sfreq=sfreq,
                                        stage=stage,
                                        tmin_crop=tmin_crop,
                                        tmax_crop=tmax_crop)
(nchan, nobs, ntrials, ncat) = ts.shape
#%%

X = ts[:, :, 2, 1]
VAR = tsa.VAR(X)

#%%

lag_results = VAR.select_order(trend='n')
Beispiel #21
0
# https://www.statsmodels.org/dev/generated/statsmodels.tsa.stattools.periodogram.html
from statsmodels.tsa.stattools import periodogram
plt.plot(periodogram(bprice))
plt.show()
plt.plot(periodogram(bprice_1d))

# ### There is no seasonality

# #### Q13

# In[27]:

final_1d = final_date_2017.diff().dropna()
final_1d
from statsmodels.tsa.vector_ar import var_model
var = var_model.VAR(final_1d)
np.argmin(var.select_order().ics['aic'])
var_results = var.fit(maxlags=1)
var_results.summary()

# In[28]:

resid.columns
resid = pd.DataFrame(var_results.resid)
for i in resid.columns:
    print(i)
    resid[i].plot()
    plt.show()
    resid[i].plot(kind='kde')
    plt.show()
def var_order_sel(data, maxorder=5000):
    model = var_model.VAR(data)
    order = model.select_order(maxorder)
    return order['aic']
Beispiel #23
0
    def start_params(self):
        params = np.zeros(self.k_params, dtype=np.float64)

        # A. Run a multivariate regression to get beta estimates
        endog = pd.DataFrame(self.endog.copy())
        endog = endog.interpolate()
        endog = endog.fillna(method='backfill').values
        exog = None
        if self.k_trend > 0 and self.k_exog > 0:
            exog = np.c_[self._trend_data, self.exog]
        elif self.k_trend > 0:
            exog = self._trend_data
        elif self.k_exog > 0:
            exog = self.exog

        # Although the Kalman filter can deal with missing values in endog,
        # conditional sum of squares cannot
        if np.any(np.isnan(endog)):
            mask = ~np.any(np.isnan(endog), axis=1)
            endog = endog[mask]
            if exog is not None:
                exog = exog[mask]

        # Regression and trend effects via OLS
        trend_params = np.zeros(0)
        exog_params = np.zeros(0)
        if self.k_trend > 0 or self.k_exog > 0:
            trendexog_params = np.linalg.pinv(exog).dot(endog)
            endog -= np.dot(exog, trendexog_params)
            if self.k_trend > 0:
                trend_params = trendexog_params[:self.k_trend].T
            if self.k_endog > 0:
                exog_params = trendexog_params[self.k_trend:].T

        # B. Run a VAR model on endog to get trend, AR parameters
        ar_params = []
        k_ar = self.k_ar if self.k_ar > 0 else 1
        mod_ar = var_model.VAR(endog)
        res_ar = mod_ar.fit(maxlags=k_ar, ic=None, trend='nc')
        if self.k_ar > 0:
            ar_params = np.array(res_ar.params).T.ravel()
        endog = res_ar.resid

        # Test for stationarity
        if self.k_ar > 0 and self.enforce_stationarity:
            coefficient_matrices = (ar_params.reshape(self.k_endog * self.k_ar,
                                                      self.k_endog).T).reshape(
                                                          self.k_endog,
                                                          self.k_endog,
                                                          self.k_ar).T

            stationary = is_invertible([1] + list(-coefficient_matrices))

            if not stationary:
                warn('Non-stationary starting autoregressive parameters'
                     ' found. Using zeros as starting parameters.')
                ar_params *= 0

        # C. Run a VAR model on the residuals to get MA parameters
        ma_params = []
        if self.k_ma > 0:
            mod_ma = var_model.VAR(endog)
            res_ma = mod_ma.fit(maxlags=self.k_ma, ic=None, trend='nc')
            ma_params = np.array(res_ma.params.T).ravel()

            # Test for invertibility
            if self.enforce_invertibility:
                coefficient_matrices = (ma_params.reshape(
                    self.k_endog * self.k_ma,
                    self.k_endog).T).reshape(self.k_endog, self.k_endog,
                                             self.k_ma).T

                invertible = is_invertible([1] + list(-coefficient_matrices))

                if not invertible:
                    warn('Non-stationary starting moving-average parameters'
                         ' found. Using zeros as starting parameters.')
                    ma_params *= 0

        # Transform trend / exog params from mean form to intercept form
        if self.k_ar > 0 and self.k_trend > 0 or self.mle_regression:
            coefficient_matrices = (ar_params.reshape(self.k_endog * self.k_ar,
                                                      self.k_endog).T).reshape(
                                                          self.k_endog,
                                                          self.k_endog,
                                                          self.k_ar).T

            tmp = np.eye(self.k_endog) - np.sum(coefficient_matrices, axis=0)

            if self.k_trend > 0:
                trend_params = np.dot(tmp, trend_params)
            if self.mle_regression > 0:
                exog_params = np.dot(tmp, exog_params)

        # 1. Intercept terms
        if self.k_trend > 0:
            params[self._params_trend] = trend_params.ravel()

        # 2. AR terms
        if self.k_ar > 0:
            params[self._params_ar] = ar_params

        # 3. MA terms
        if self.k_ma > 0:
            params[self._params_ma] = ma_params

        # 4. Regression terms
        if self.mle_regression:
            params[self._params_regression] = exog_params.ravel()

        # 5. State covariance terms
        if self.error_cov_type == 'diagonal':
            params[self._params_state_cov] = res_ar.sigma_u.diagonal()
        elif self.error_cov_type == 'unstructured':
            cov_factor = np.linalg.cholesky(res_ar.sigma_u)
            params[self._params_state_cov] = (
                cov_factor[self._idx_lower_state_cov].ravel())

        # 5. Measurement error variance terms
        if self.measurement_error:
            if self.k_ma > 0:
                params[self._params_obs_cov] = res_ma.sigma_u.diagonal()
            else:
                params[self._params_obs_cov] = res_ar.sigma_u.diagonal()

        return params
Beispiel #24
0
def model_selection(method, data=None, n_HN=None, CV_name=None, CV_value=None):
    """Select model instance from scikit-learn library.

    Parameters
    ----------
    method : str
        model type, options: NB' (Naive-Bayes),'SVM','NN' (neural net), 'Tree','Forest','kNN','Logit','OLS','VAR'
                             suffix: 'rgr' : regression problem, 'clf' : classification problem
                             NB only takes clf, No suffix for Logit, OLS and 'VAR'

   data : numpy.array, optional (Default value = None)
        needs to be given if method=='VAR' (from 'statsmodels', not 'scikit-learn').

    n_HN : int, optional (Default value = None)
        number of neurons in hidden layer (only needed for neural networks)

    CV_name : str, optional (Default value = None, no cross-validation)
        name of cross-validation parameter.
        Note: default and cross-validated model require to modify the source code below.

    CV_value : value, optional (Default value = None)
        value of CV_name, if not None.

    Returns
    -------
    model : scikit-learn model instance (VAR from statsmodels)

    """

    # check if model choice is valid
    valid_methods = ['NN-rgr', 'NN-clf', 'Tree-rgr', 'Tree-clf', 'Forest-rgr', 'Forest-clf', \
                     'SVM-rgr', 'SVM-clf', 'kNN-rgr', 'kNN-clf', 'NB-clf', 'OLS', 'Logit', 'VAR']
    if not method in valid_methods:
        raise ValueError("Invalid method: '{0}' not supported.".format(method))

    # select model
    else:
        if method == 'NN-rgr':  # ONLY WORKING with scikit-learn >= 0.18
            # create and train network
            if CV_name == None:
                model = skl_nn.MLPRegressor(hidden_layer_sizes=(n_HN, n_HN),
                                            alpha=.001,
                                            activation='relu',
                                            solver='lbfgs')
            else:
                exec('model = skl_nn.MLPRegressor(' + CV_name + '=' + str(CV_value) + \
                     ',hidden_layer_sizes=((n_col-1),(n_col-1)),activation="relu",solver="lbfgs")')
        elif method == 'NN-clf':  # ONLY WORKING with scikit-learn >= 0.18
            # create and train network
            if CV_name == None:
                model = skl_nn.MLPClassifier(hidden_layer_sizes=n_HN,
                                             alpha=2.,
                                             activation='logistic',
                                             solver='lbfgs')
            else:
                exec('model = skl_nn.MLPClassifier(' + CV_name + '=' + str(CV_value) + \
                     ',hidden_layer_sizes=(n_col-1),activation="logistic",solver="lbfgs")')
        elif method == 'Tree-rgr':
            if CV_name == None:
                model = skl_tree.DecisionTreeRegressor(max_features='sqrt',
                                                       max_depth=5)
            else:
                exec('model = skl_tree.DecisionTreeRegressor(' + CV_name +
                     '=' + str(CV_value) + ',max_features="sqrt")')
        elif method == 'Tree-clf':
            if CV_name == None:
                model = skl_tree.DecisionTreeClassifier(max_depth=8)
            else:
                exec('model = skl_tree.DecisionTreeClassifier(' + CV_name +
                     '=' + str(CV_value) + ')')  # e.g. 'max_depth'
        elif method == 'Forest-rgr':
            if CV_name == None:
                model = skl_ens.RandomForestRegressor(
                    200, max_features='sqrt', max_depth=11)  # 7 best for CPI
            else:
                exec('model = skl_ens.RandomForestRegressor(200,' + CV_name +
                     '=' + str(CV_value) + ',max_depth=11)')
        elif method == 'Forest-clf':
            if CV_name == None:
                model = skl_ens.RandomForestClassifier(200, max_depth=9, \
                                                       criterion='entropy', max_features='sqrt')
            else:
                exec('model = skl_ens.RandomForestClassifier(200,' + CV_name +
                     '=' + str(CV_value) + ',criterion="entropy")')
        elif method == 'SVM-rgr':
            if CV_name == None:
                model = skl_svm.SVR(C=50, gamma=0.001, epsilon=0.2)
            else:
                exec(
                    'model = skl_svm.SVR(' + CV_name + '=' + str(CV_value) +
                    ',epsilon=0.2,gamma=0.001)')  # change for gamma/epsilon CV
        elif method == 'SVM-clf':
            if CV_name == None:
                model = skl_svm.SVC(C=1e3, gamma=1)
            else:
                exec('model = skl_svm.SVC(C=1e3,' + CV_name + '=' +
                     str(CV_value) + ')')
        elif method == 'kNN-rgr':
            if CV_name == None:
                model = skl_neigh.KNeighborsRegressor(n_neighbors=2, p=1)
            else:
                exec('model = skl_neigh.KNeighborsRegressor(' + CV_name + '=' +
                     str(CV_value) + ',p=1)')  # e.g. 'n_neighbors'
        elif method == 'kNN-clf':
            if CV_name == None:
                model = skl_neigh.KNeighborsClassifier(n_neighbors=5, p=1)
            else:
                exec('model = skl_neigh.KNeighborsClassifier(n_neighbors=3,' +
                     CV_name + '=' + str(CV_value) + ')')  # e.g. 'n_neighbors'
        elif method == 'NB-clf':
            if CV_name == None:
                model = skl_NB.GaussianNB()
            else:
                exec(
                    'model = skl_NB.MultinomialNB(' + CV_name + '=' +
                    str(CV_value) + ')'
                )  # e.g. 'alpha' smoothing parameter (0 for no smoothing).
        elif method == 'OLS':
            if CV_name == None:
                model = skl_lin.Ridge(alpha=10)
            else:
                exec('model = skl_lin.Ridge(' + CV_name + '=' + str(CV_value) +
                     ')')
        elif method == 'Logit':
            if CV_name == None:
                model = skl_lin.logistic.LogisticRegression(C=0.01)
            else:
                exec("model = skl_lin.logistic.LogisticRegression(" + CV_name +
                     "=" + str(CV_value) + ",solver='liblinear')")
        elif method == 'VAR':
            model = sm_vars.VAR(data)

    return model