示例#1
0
    def knn(self):
        if self.model == 'SparsePCA':
            model_string = '*V_sPCA.npy'
        if self.model == 'ICA':
            model_string = '*_V_ICA.npy'
        if self.model == 'EnsemblePursuit_numpy':
            model_string = '*_V_ep_numpy.npy'
        if self.model == 'EnsemblePursuit_pytorch':
            model_string = '*_V_ep_pytorch.npy'
        if self.model == 'NMF':
            model_string = '*_V_NMF.npy'
        if self.model == 'PCA':
            model_string = '*_V_pca.npy'
        if self.model == 'LDA':
            model_string = '*_V_lda.npy'
        if self.model == 'NMF_regularization_experiments':
            model_string = '*_V_NMF_regularization_experiments.npy'
        if self.model == 'all':
            #self.save_path=self.data_path
            model_string = '*.mat'
        columns = ['Experiment', 'accuracy']
        acc_df = pd.DataFrame(columns=columns)
        print(self.save_path)
        for filename in glob.glob(os.path.join(self.save_path, model_string)):
            V = np.load(filename)
            istim_path = filename[len(self.save_path):len(self.save_path) +
                                  len(self.mat_file_lst[0])]
            istim = io.loadmat(self.data_path +
                               istim_path)['stim']['istim'][0][0].astype(
                                   np.int32)

            istim -= 1  # get out of MATLAB convention
            istim = istim[:, 0]
            nimg = istim.max()  # these are blank stims (exclude them)
            V = V[istim < nimg, :]
            istim = istim[istim < nimg]
            x_train, x_test, y_train, y_test = test_train_split(V, istim)
            acc = evaluate_model_torch(x_train, x_test)
            if self.model != 'NMF_regularization_experiments':
                acc_df = acc_df.append(
                    {
                        'Experiment': filename[len(self.save_path):],
                        'accuracy': acc
                    },
                    ignore_index=True)
            if self.model == 'NMF_regularization_experiments':
                acc_df = acc_df.append(
                    {
                        'Experiment': filename[len(self.save_path):],
                        'accuracy': acc,
                        'alpha': filename[:-19][-1:]
                    },
                    ignore_index=True)
        grouped = acc_df.groupby(['alpha']).mean()
        print(grouped)
        print(grouped.describe())
        pd.options.display.max_colwidth = 300
        print(acc_df)
        print(acc_df.describe())
        return acc_df
def getfeatures(folder, target_size, model='Resnet50'):
    model = ResNet50(weights="imagenet", include_top=False)
    images = getFilesInDir(folder)
    images_train, images_test = test_train_split(images, fraction)
    tensors_train = []
    tensors_test = []
    for i, j in images_train.items():
        for k in j:
            tensors_train.append(img_to_tensor(k, target_size=(target_size)))
    for i, j in images_test.items():
        for k in j:
            tensors_test.append(img_to_tensor(k, target_size=(target_size)))
            # print()
    preprocessed_tensors_train = [preprocess_input(i) for i in tensors_train]
    preprocessed_tensors_test = [preprocess_input(i) for i in tensors_test]
    print("Total Training Tensors created:" + str(len(tensors_train)))
    print("Total Testing Tensors created:" + str(len(tensors_test)))
    labels_train = create_labels(images_train, output_classes=21)
    labels_test = create_labels(images_test, output_classes=21)
    print("Total Training Lables created:" + str(len(labels_train)))
    print("Total Testing Lables created:" + str(len(labels_test)))
    features_list_train = []
    features_list_test = []
    features_list_train = [
        model.predict(x) for x in preprocessed_tensors_train
    ]
    features_list_test = [model.predict(x) for x in preprocessed_tensors_test]
    return (features_list_train, labels_train, features_list_test, labels_test)
 def fit_ridge(self, V):
     images = io.loadmat(self.data_path +
                         'images/images_natimg2800_all.mat')['imgs']
     images = images.transpose((2, 0, 1))
     images = images.reshape((2800, 68 * 270))
     from utils import PCA
     reduced_images = PCA(images)
     stim = io.loadmat(self.data_path +
                       self.mouse_filename)['stim']['istim'][0][0].astype(
                           np.int32)
     x_train, x_test, y_train, y_test = test_train_split(V, stim)
     y_train = y_train - 1
     reduced_images_ = reduced_images[y_train]
     for alpha in [5000]:
         assembly_array = []
         for assembly in range(0, self.nr_of_components):
             av_resp = (x_train[:, assembly].T + x_test[:, assembly].T) / 2
             reg = ridge_regression(reduced_images_, av_resp, alpha=alpha)
             assembly_array.append(reg)
         assembly_array = np.array(assembly_array)
         if self.save == True:
             if self.model == 'EnsemblePursuit_numpy':
                 file_string = self.save_path + self.mouse_filename + '_ep_numpy_' + str(
                     alpha) + 'reg.npy'
             if self.model == 'EnsemblePursuit_pytorch':
                 file_string = self.save_path + self.mouse_filename + '_ep_pytorch_' + str(
                     alpha) + 'reg.npy'
             np.save(file_string)
     return assembly_array
示例#4
0
 def knn(self):
     if self.model == 'SparsePCA':
         model_string = '*V_sPCA.npy'
     if self.model == 'EnsemblePursuit':
         model_string = '*_V_ep.npy'
     if self.model == 'NMF':
         model_string = '*_V_NMF.npy'
     columns = ['Experiment', 'accuracy']
     acc_df = pd.DataFrame(columns=columns)
     for filename in glob.glob(os.path.join(self.save_path, model_string)):
         V = np.load(filename)
         #print(self.data_path+'/'+filename[43:78]+'.mat')
         istim = sio.loadmat(self.data_path + '/' + filename[43:78] +
                             '.mat')['stim']['istim'][0][0].astype(np.int32)
         istim -= 1  # get out of MATLAB convention
         istim = istim[:, 0]
         nimg = istim.max()  # these are blank stims (exclude them)
         V = V[istim < nimg, :]
         istim = istim[istim < nimg]
         x_train, x_test, y_train, y_test = test_train_split(V, istim)
         acc = evaluate_model_torch(x_train, x_test)
         acc_df = acc_df.append(
             {
                 'Experiment': filename[43:],
                 'accuracy': acc
             },
             ignore_index=True)
     pd.options.display.max_colwidth = 300
     print(acc_df)
     print(acc_df.describe())
     return acc_df
示例#5
0
 def fit_ridge(self):
     images = io.loadmat(self.data_path +
                         'images/images_natimg2800_all.mat')['imgs']
     images = images.transpose((2, 0, 1))
     images = images.reshape((2800, 68 * 270))
     from utils import PCA
     reduced_images = PCA(images)
     if self.model == 'EnsemblePursuit_pytorch':
         model_string = '*V_ep_pytorch.npy'
     if self.model == 'SparsePCA':
         model_string = '*V_sPCA.npy'
     if self.model == 'ICA':
         model_string = '*_V_ICA.npy'
     if self.model == 'NMF':
         model_string = '*_V_NMF.npy'
     if self.model == 'PCA':
         model_string = '*V_pca.npy'
     if self.model == 'LDA':
         model_string = '*_V_lda.npy'
     if self.model == 'EnsemblePursuit_adaptive':
         model_string = '*_V_ep_adaptive.npy'
     for filename in glob.glob(os.path.join(self.save_path, model_string)):
         print(filename)
         istim_path = filename[len(self.save_path):len(self.save_path) +
                               len(self.mat_file_lst[0])]
         stim = io.loadmat(self.data_path +
                           istim_path)['stim']['istim'][0][0]
         #test train split
         components = np.load(filename)
         x_train, x_test, y_train, y_test = test_train_split(
             components, stim)
         y_train = y_train - 1
         reduced_images_ = reduced_images[y_train]
         for alpha in [5000]:
             assembly_array = []
             for assembly in range(0, self.nr_of_components):
                 av_resp = (x_train[:, assembly].T +
                            x_test[:, assembly].T) / 2
                 reg = ridge_regression(reduced_images_,
                                        av_resp,
                                        alpha=alpha)
                 assembly_array.append(reg)
             assembly_array = np.array(assembly_array)
             if self.model == 'EnsemblePursuit_pytorch':
                 file_string = self.save_path + istim_path + '_ep_pytorch_reg.npy'
             if self.model == 'SparsePCA':
                 file_string = self.save_path + istim_path + '_sPCA_reg.npy'
             if self.model == 'ICA':
                 file_string = self.save_path + istim_path + '_ica_reg.npy'
             if self.model == 'NMF':
                 file_string = self.save_path + istim_path + '_NMF_reg.npy'
             if self.model == 'PCA':
                 file_string = self.save_path + istim_path + '_pca_reg.npy'
             if self.model == 'LDA':
                 file_string = self.save_path + istim_path + '_lda_reg.npy'
             if self.model == 'EnsemblePursuit_adaptive':
                 file_string = self.save_path + istim_path + '_ep_adaptive_reg.npy'
             np.save(file_string, assembly_array)
示例#6
0
 def knn(self):
    if self.model=='SparsePCA':
          model_string='*V_sPCA.npy'
    if self.model=='EnsemblePursuit':
          model_string='*_V_ep.npy'
    if self.model=='NMF':
          model_string='*_V_NMF.npy'
    if self.model=='PCA':
          model_string='*_V_pca.npy'
    if self.model=='LDA':
          model_string='*_V_lda.npy'
    if self.model=='all':
          #self.save_path=self.data_path
          model_string='*.mat'
    columns=['Experiment','accuracy']
    acc_df=pd.DataFrame(columns=columns)
    print(self.save_path)
    for filename in glob.glob(os.path.join(self.save_path, model_string)):
          if self.model=='all':
              data = io.loadmat(filename)
              resp = data['stim'][0]['resp'][0]
              spont =data['stim'][0]['spont'][0]
              X=subtract_spont(spont,resp)
              V=stats.zscore(X)
          else:
              print(filename)
              V=np.load(filename)
          #if self.model='PCA':
          print(V.shape)
          #print(self.data_path+'/'+filename[43:78]+'.mat')
          istim_path=filename[len(self.save_path):len(self.save_path)+len(self.mat_file_lst[0])]
          print(istim_path)
          istim=sio.loadmat(self.data_path+istim_path)['stim']['istim'][0][0].astype(np.int32)
          istim -= 1 # get out of MATLAB convention
          istim = istim[:,0]
          nimg = istim.max() # these are blank stims (exclude them)
          V = V[istim<nimg, :]
          istim = istim[istim<nimg]
          x_train,x_test,y_train,y_test=test_train_split(V,istim)
          acc=evaluate_model_torch(x_train,x_test)
          acc_df=acc_df.append({'Experiment':filename[len(self.save_path):],'accuracy':acc},ignore_index=True)
    pd.options.display.max_colwidth = 300
    print(acc_df)
    print(acc_df.describe())
    return acc_df
 def knn(self, V):
     columns = ['Experiment', 'accuracy']
     acc_df = pd.DataFrame(columns=columns)
     istim = io.loadmat(self.data_path +
                        self.mouse_filename)['stim']['istim'][0][0].astype(
                            np.int32)
     istim -= 1  # get out of MATLAB convention
     istim = istim[:, 0]
     nimg = istim.max()  # these are blank stims (exclude them)
     V = V[istim < nimg, :]
     istim = istim[istim < nimg]
     x_train, x_test, y_train, y_test = test_train_split(V, istim)
     acc = evaluate_model_torch(x_train, x_test)
     acc_df = acc_df.append(
         {
             'Experiment': self.mouse_filename,
             'accuracy': acc
         },
         ignore_index=True)
     pd.options.display.max_colwidth = 300
     print(acc_df)
     return acc_df
示例#8
0
 def fit_ridge(self):
     images = sio.loadmat(self.data_path +
                          '/images/images_natimg2800_all.mat')['imgs']
     images = images.transpose((2, 0, 1))
     images = images.reshape((2800, 68 * 270))
     reduced_images = PCA(images)
     if self.model == 'EnsemblePursuit':
         model_string = '*V_ep.npy'
     if self.model == 'SparsePCA':
         model_string = '*V_sPCA.npy'
     for filename in glob.glob(os.path.join(self.save_path, model_string)):
         print(filename)
         stim = sio.loadmat(self.data_path + '/' + filename[43:78] +
                            '.mat')['stim']['istim'][0][0]
         #test train split
         components = np.load(filename)
         x_train, x_test, y_train, y_test = test_train_split(
             components, stim)
         y_train = y_train - 1
         reduced_images_ = reduced_images[y_train]
         for alpha in [5000]:
             assembly_array = []
             for assembly in range(0, self.nr_of_components):
                 av_resp = (x_train[:, assembly].T +
                            x_test[:, assembly].T) / 2
                 reg = ridge_regression(reduced_images_,
                                        av_resp,
                                        alpha=alpha)
                 assembly_array.append(reg)
             assembly_array = np.array(assembly_array)
             if self.model == 'EnsemblePursuit':
                 file_string = filename[:-11] + '_' + str(
                     alpha) + '_ep_reg.npy'
             if self.model == 'SparsePCA':
                 file_string = filename[:-11] + '_' + str(
                     alpha) + '_sPCA_reg.npy'
             np.save(file_string, assembly_array)
示例#9
0
    device = torch.cuda.device("cuda:0")
    torch.backends.cudnn.benchmark = True 
    print("CUDA is available")

else:
    device = torch.device("cpu")    
   

all_img_folder = ut.get_image_paths(
    conf.SOMITE_COUNTS,
    opt.data_dir
)

train_folder, val_folder = ut.test_train_split(
    opt.data_dir,
    all_img_folder,
    0.2,
    conf.N_CLASSES
)

# Datasets
train_data = SomiteStageDataset(
    img_folder=train_folder
)

val_data = SomiteStageDataset(
    img_folder=val_folder
)


# Generators
train_generator = data.DataLoader(
示例#10
0
def main():
    p = optparse.OptionParser()
    p.add_option('--load_data', action="store_true", default=False)
    p.add_option('--save_data', action="store_true", default=False)
    p.add_option('--load_model', action="store_true", default=False)
    p.add_option('--no_run_model', action="store_false", dest="run_model", default=True)
    p.add_option('--no_save_model', action="store_false", dest="save_model", default=True)
    p.add_option('--load_results', action="store_true", default=False)
    p.add_option('--no_save_results', action="store_false", dest="save_results", default=True)
    p.add_option('--no_plot_results', action="store_false", dest="plot_results", default=True)
    p.add_option('--model_name', default='shallow_RNN', type="string",
                 help='Options: shallow_RNN,shallow_LSTM,shallow_GRU,'
                      'deep_RNN, deep_LSTM, deep_GRU, seq2seq')
    p.add_option('--base_path', default="~/machine_learning/stock_sandbox/")
    p.add_option('--dataset', default='jigsaw', type="string", help='Options: jigsaw, synthetic, sp500')
    p.add_option('--n_samples', default=100, type="int")
    p.add_option('--n_ahead', default=50, type="int")
    p.add_option('--patience', default=5, type="int")
    p.add_option('--batch_size', default=20, type="int")
    p.add_option('--max_epochs', default=1000, type="int")
    ops, args = p.parse_args()

    if (not ops.load_results and not ops.run_model) and ops.save_results:
        raise ValueError("Cannot save what has not been loaded or run ")

    if not os.path.exists(os.path.expanduser(ops.base_path + 'results')):
        os.makedirs(ops.base_path + 'results')
    if not os.path.exists(os.path.expanduser(ops.base_path + 'data')):
        os.makedirs(ops.base_path + 'data')
    base_name = ops.dataset + '_' + ops.model_name
    data_fname = ops.base_path + 'data/' + ops.dataset + "_data.pkl"
    data_fname = os.path.expanduser(data_fname)
    arch_fname = ops.base_path + 'results/' + base_name + '_model_architecture.json'
    arch_fname = os.path.expanduser(arch_fname)
    weights_fname = ops.base_path + 'results/' + base_name + '_model_weights.h5'
    weights_fname = os.path.expanduser(weights_fname)
    plot_fname = ops.base_path + 'results/' + base_name + '_results.png'
    plot_fname = os.path.expanduser(plot_fname)
    results_fname = ops.base_path + 'results/' + ops.model_name + '_results.pkl'
    results_fname = os.path.expanduser(results_fname)


    #########################BEGIN CODE#######################################
    # tickers = ['AAPL','VZ','NKE','KMI','M','MS','WMT','DOW','MPC']
    tickers = None

    if not ops.load_results:

        if ops.load_data:
            print('Loading data...')
            data = pickle.load(open(data_fname, 'r'))
            if tickers:
                data.loc(tickers)
        else:

            if ops.dataset == "sp500":
                ##### Real Stock Data
                print('Using sp500 data')
                data = load_s_and_p_data(start="2014-1-1", tickers=tickers)
            elif ops.dataset == "synthetic":
                ##### Synthetic data for testing purposes
                print('Using Synthetic data')
                values = 10000
                s = pd.Series(range(values))
                noise = pd.Series(np.random.randn(values))
                s = s / 1000  # + noise / 100
                d = {'one': s * s * 100 / values,
                     'two': np.sin(s * 10.),
                     'three': np.cos(s * 10),
                     'four': np.sin(s * s / 10) * np.sqrt(s)}
                data = pd.DataFrame(d)
            elif ops.dataset == "jigsaw":
                ##### Easy synthetic data for testing purposes
                print('Using jigsaw data')
                flow = (list(range(1, 10, 1)) + list(range(10, 1, -1))) * 1000
                pdata = pd.DataFrame({"a": flow, "b": flow})
                pdata.b = pdata.b.shift(9)
                data = pdata.iloc[10:] * random.random()  # some noise
            else:
                raise ValueError('Not a legal dataset name')

        if ops.save_data:
            print('Saving data...')
            pickle.dump(data, open(data_fname, 'wb+'))

        if ops.model_name == 'seq2seq':
            (X_train, y_train), (X_test, y_test) = test_train_split(data, splitting_method='seq2seq',
                                                                    n_samples=ops.n_samples, n_ahead=ops.n_ahead)
            print(X_train.shape, y_train.shape)
        else:
            (X_train, y_train), (X_test, y_test) = test_train_split(data, n_samples=ops.n_samples, n_ahead=ops.n_ahead)

        if not ops.load_model:
            print('compiling model')
            in_out_neurons = len(data.columns)

            if ops.model_name == "shallow_RNN":
                model = make_RNN(X_train.shape, [300], SimpleRNN, dropout=0)
            elif ops.model_name == "shallow_LSTM":
                model = make_RNN(X_train.shape, [300], LSTM, dropout=0)
            elif ops.model_name == "shallow_GRU":
                model = make_RNN(X_train.shape, [300], GRU, dropout=0)
            elif ops.model_name == "deep_RNN":
                model = make_RNN(X_train.shape, [300, 500, 200], SimpleRNN, dropout=.2)
            elif ops.model_name == "deep_LSTM":
                model = make_RNN(X_train.shape, [300, 500, 200], LSTM, dropout=.2)
            elif ops.model_name == "deep_GRU":
                model = make_RNN(X_train.shape, [300, 500, 200], GRU, dropout=.2)
            elif ops.model_name == "seq2seq":
                maxlen = 100  # length of input sequence and output sequence
                hidden_dim = 500  # memory size of seq2seq
                seq2seq = Seq2seq(input_length=X_train.shape[1], input_dim=X_train.shape[2], hidden_dim=hidden_dim,
                                  output_dim=X_train.shape[2], output_length=y_train.shape[1],
                                  batch_size=ops.batch_size, depth=4)

                model = Sequential()
                model.add(seq2seq)
                model.compile(loss="mean_squared_error", optimizer="rmsprop")
            else:
                raise ValueError('Not a legal model name')

            model.compile(loss="mean_squared_error", optimizer="rmsprop")
            print('Training model...')
            early_stopping = EarlyStopping(monitor='val_loss', patience=ops.patience, verbose=0)
            model.fit(X_train, y_train, batch_size=ops.batch_size, nb_epoch=ops.max_epochs,
                      validation_split=0.1, callbacks=[early_stopping])
        else:
            print('Loading model...')
            model = model_from_json(open(arch_fname).read())
            model.load_weights(weights_fname)

        if ops.save_model:
            print("Saving model...")
            json_string = model.to_json()
            open(arch_fname, 'w+').write(json_string)
            model.save_weights(weights_fname, overwrite=True)

        if ops.run_model:
            print('Running forecast...')
            forecasted = forecast(model, X_train[-1, :, :], n_ahead=len(y_test[0]))
            predicted = model.predict(X_test)
            rmse = np.sqrt(((predicted - y_test) ** 2).mean(axis=0)).mean()
            print("RMSE:", rmse)

        if ops.save_results:
            print('Saving results...')
            pickle.dump((predicted, forecasted, y_test), open(results_fname, 'wb+'))
    else:
        print('Loading results...')
        predicted, forecasted, y_test = pickle.load(open(results_fname, 'r'))

    if ops.plot_results:
        print('Plotting results...')
        print(predicted.shape, y_test.shape, forecasted.shape)
        fig = plt.figure()
        for i in range(min(4, predicted.shape[2])):
            ax = fig.add_subplot(2, 2, i + 1)
            ax.plot(forecasted[:, i], color='r')
            ax.plot(predicted[0, :, i], color='g')
            ax.plot(y_test[0, :, i], color='b')
            if tickers:
                ax.set_title(tickers[i])

        fig.savefig(plot_fname)
示例#11
0
def main():

    parser = argparse.ArgumentParser(
        description=
        'A script for training model(s). It can select specific part fo the training dta and feed it for training to some model. At the end it evaluated the trained models with the proper scoring rules, generates rank historgams, calculated feature importance data as well as plots over the entire dataset in order to visually show the performance of the trained model(s)'
    )

    parser.add_argument(
        '--model',
        dest='model',
        action='store',
        help=
        'The model to be trained. This can be \'bnn\', \'mdn\' or \'bothn\'.')

    parser.add_argument(
        '--config',
        dest='config',
        action='store',
        required=True,
        help=
        'The configuration file with setting for the architecture of the models'
    )

    parser.add_argument(
        '--station',
        dest='station',
        action='store',
        default="SBC",
        help='The target LUBW station the trained model aims to predict')

    parser.add_argument(
        '--predictor',
        dest='pred_value',
        action='store',
        default="P1",
        help='The value(s) that should be used as features (P1, P2 of P1P2)')

    parser.add_argument('--period',
                        dest='period',
                        action='store',
                        default="1D",
                        help='intEgration period for the data (1D, 1H, 12H)')

    parser.add_argument(
        '--outvalue',
        dest='out_value',
        action='store',
        default="P1",
        help='Output air pollution value of the model(P1 or P2)')

    parser.add_argument('--take_lubw',
                        dest='take_lu_bw',
                        action='store_true',
                        default=False,
                        help='Should the LU BW station be taken as feature')

    parser.add_argument(
        '--random_split',
        dest='random_split',
        action='store_true',
        default=False,
        help='Should the data be split randomly for the test train split')

    parser.add_argument(
        '--dest',
        dest='dest',
        action='store',
        required=False,
        default="/home/arnaud/code/pollution/test_eval",
        help='destination for the evaluation and for the build models')

    parser.add_argument('--base-dir',
                        dest='base_dir',
                        action='store',
                        required=False,
                        default="/home/arnaud/code/pollution/env/data_frames",
                        help='The directory where the data frames reside')

    parser.add_argument(
        '--load-mdn',
        dest='load_mdn',
        action='store',
        required=False,
        default=None,
        help=
        'Load the MDB model from specific folder and do not train a new one')

    parser.add_argument(
        '--load-bnn',
        dest='load_bnn',
        action='store',
        required=False,
        default=None,
        help=
        'Load the BNN model from specific folder and do not train a new one')

    parser.add_argument('--emp',
                        dest='emp',
                        action='store_true',
                        default=False,
                        help='emp')

    args = parser.parse_args()

    station = args.station
    in_value = args.pred_value
    period = args.period
    out_value = args.out_value
    train_per = 0.75
    take_lu_bw = args.take_lu_bw
    random_split = args.random_split
    base_dir = args.base_dir
    dest = args.dest

    X, y, col_names, out_name = select_data(station,
                                            in_value,
                                            period,
                                            include_lu_bw=take_lu_bw,
                                            output_value=out_value,
                                            base_dir=base_dir)
    X_train, X_test, y_train, y_test = test_train_split(X,
                                                        y,
                                                        train_size=train_per,
                                                        random=random_split)
    y_train = y_train.reshape(y_train.shape[0], 1)
    y_test = y_test.reshape(y_test.shape[0], 1)

    config_file = open(args.config, "r")
    config = json.load(config_file)
    config_file.close()

    print("Period: " + period)
    print("Training samples: ", X_train.shape[0])
    print("Test samples: ", X_test.shape[0])
    print("Number of features: ", X_train.shape[1])
    print("Input features: " + in_value)
    print("Target station", station)
    print("Input features:", col_names)
    print("Outpute value", out_name)
    print("-------------")

    ev_samples_cnt = config["ev_samples_cnt"]

    mdn_iter = config["mdn"]["mdn_iter"]
    mdn_layers = config["mdn"]["mdn_layers"]
    mdn_mixture_cnt = config["mdn"]["mdn_mixture_cnt"]
    mdn_id = "mdn_l" + str(mdn_layers) + "_i" + str(mdn_iter) + "_mc" + str(
        mdn_mixture_cnt)

    bnn_samples = config["bnn"]["bnn_samples"]
    bnn_iter = config["bnn"]["bnn_iter"]
    bnn_layers = config["bnn"]["bnn_layers"]
    bnn_id = "bnn_l" + str(bnn_layers) + "_i" + str(bnn_iter) + "_s" + str(
        bnn_samples)

    desc = ""
    desc += "\nPeriod: " + str(period)
    desc += "\nTraining samples: " + str(X_train.shape[0])
    desc += "\nTest samples: " + str(X_test.shape[0])
    desc += "\nNumber of features: " + str(X_train.shape[1])
    desc += "\nTaking LU BW as feature: " + str(take_lu_bw)
    desc += "\nInput value: " + str(in_value)
    desc += "\nTarget station: " + str(station)
    desc += "\nInput features: " + str(col_names)
    desc += "\nOutpute value: " + str(out_name)
    desc += "\nTest-train split ratio: " + str(train_per)
    desc += "\n-------------\n"

    desc += "\nMDN Configuration: "
    desc += "\nIterations: " + str(mdn_iter)
    desc += "\nLayers: " + str(mdn_layers)
    desc += "\nMixtures Count: " + str(mdn_mixture_cnt)
    desc += "\n-------------\n"

    desc += "\nBNN Configuration: "
    desc += "\nIterations: " + str(bnn_iter)
    desc += "\nLayers: " + str(bnn_layers)
    desc += "\nSamples for vatiational inference: " + str(bnn_samples)
    desc += "\n-------------\n"

    desc += "\nEvaluation Configuration"
    desc += "\nSamples drawn from models for each observation: " + str(
        ev_samples_cnt)

    ev = Evaluator(dest, desc, out_value)
    ev.set_test_train_split(X_train, X_test, y_train, y_test)
    ev.set_names(col_names, out_name)

    os.makedirs(dest + "/bnn_train_plots")
    os.makedirs(dest + "/mdn_train_plots")

    def get_mdn():
        if args.load_mdn is None:
            mdn_model = Mdn("MDN Model",
                            X_train,
                            y_train,
                            inner_dims=mdn_layers,
                            num_mixtures=mdn_mixture_cnt)
            mdn_model.fit(num_iter=mdn_iter,
                          callback=lambda mod, j: plot_mdn(
                              X_train, X_test, y_train, y_test, dest +
                              "/mdn_train_plots", mod, j))
            mdn_model.save(dest + "/mdn_model")
            return mdn_model
        else:
            print("Loading MDN from file")
            mdn_model = Mdn("MDN Model",
                            X_train,
                            y_train,
                            inner_dims=mdn_layers,
                            num_mixtures=mdn_mixture_cnt,
                            model_file=args.load_mdn)
            mdn_model.save(dest + "/mdn_model")
            return mdn_model

    def get_bnn():
        if args.load_bnn is None:
            bnn_model = Bnn("BNN Model")
            bnn_model.build(X_train.shape[1],
                            1,
                            layers_defs=bnn_layers,
                            examples=X_train.shape[0])
            bnn_model.fit(X_train,
                          np.squeeze(y_train),
                          epochs=bnn_iter,
                          samples=bnn_samples,
                          callback=lambda mod, j: plot_bnn(
                              X_train, X_test, y_train, y_test, dest +
                              "/bnn_train_plots", mod, j))
            bnn_model.save(dest + "/bnn_model", "bnn_model")
            return bnn_model
        else:
            print("Loading BNN from file")
            bnn_model = Bnn("BNN Model")
            bnn_model.load(args.load_bnn, name="bnn_model")
            bnn_model.save(dest + "/bnn_model", "bnn_model")
            return bnn_model

    if args.model == "bnn":
        print("Fitting the BNN")
        bnn_model = get_bnn()
        ev.evaluate_bnn(bnn_model, bnn_id, samples=ev_samples_cnt)
    elif args.model == "mdn":
        print("Fitting the MDN")
        mdn_model = get_mdn()
        ev.evaluate_mdn(mdn_model, mdn_id, samples=ev_samples_cnt)
    else:
        print("Fitting the MDN")
        mdn_model = get_mdn()

        ev.evaluate_mdn(mdn_model, mdn_id, samples=ev_samples_cnt)

        tf.reset_default_graph()

        print("Fitting the BNN")
        bnn_model = get_bnn()
        ev.evaluate_bnn(bnn_model, bnn_id, samples=ev_samples_cnt)

    if arg.emp:
        ev.evaluate_empirical(samples=ev_samples_cnt)