def processing_dir(dir_path1, dir_path2): # list_accepted = [] list_dict = [] element = '' for element in os.listdir(dir_path1): df1 = data_utils.load_var(join(dir_path1, element), '1') for element in os.listdir(dir_path2): df2 = data_utils.load_var(join(dir_path2, element), '2') print compare_two_dfs(df1, df2)
def __run_cell(cell, filename, t_rng, use_single_bc_comp=False): cell = deepcopy(cell) original_model_output = data_utils.load_var(filename) # Run with original stimulus. cell.set_stim(original_model_output['Stimulus'], stim_type='Light') if t_rng is None: t_rng = original_model_output['t_rng'] cell.update_t_rng(t_rng) if use_single_bc_comp: cell.update_cpl(cpl=2, verbose=False) cell.predur = original_model_output['predur'] print('Running with n_bc_comps =', cell.n_bc_comps) im = cell.init_retsim(verbose=False) plt.figure(figsize=(8, 8)) plt.imshow(im) plt.show() try: rec_data, rec_time, rec_stim = cell.run(rec_type='test', plot=False, verbose=True, reset_retsim_stim=True) except KeyboardInterrupt: print("KeyboardInterrupt") rec_data, rec_time, rec_stim = None, None, None except Exception as e: print("Error in Simulation\n", e) rec_data, rec_time, rec_stim = None, None, None if rec_time is not None: rec_time += t_rng[0] return rec_data, rec_time, rec_stim, original_model_output
def check_parameter_files(cell, params, folder): ''' Test if cell parameters are the same as they were in a previous run. This is crucial if you load the data. ''' files_vs_dicts = {} files_vs_dicts['cell_params_default.pkl'] = cell.params_default files_vs_dicts['cell_params_unit.pkl'] = cell.params_unit files_vs_dicts['opt_p_range.pkl'] = params.p_range for file, param_dict in files_vs_dicts.items(): src_file = os.path.join(folder, file) if os.path.isfile(src_file): loaded_dict = data_utils.load_var(src_file) for param_name, param_value in param_dict.items(): if param_name not in loaded_dict.keys(): print(param_name, 'not in loaded_dict params') elif param_value != loaded_dict[param_name]: print(param_name, ':', param_value, '!= ', loaded_dict[param_name]) input("Params in " + file + " are different. Press Enter to overwrite ... ") data_utils.save_var(param_dict, src_file) # If p_range was fine, p_names is fine too. data_utils.save_var(params.p_names, os.path.join(folder, 'opt_p_names.pkl'))
def load_init_data(self, dirname): ''' Load pilot run, i.e. initial data. ''' self.cell.load_init_data(os.path.join('optim_data', dirname)) self.rec_data = data_utils.load_var( os.path.join('optim_data', dirname, 'init_rec_data.pkl')) assert self.rec_data is not None if self.loss is not None: self.loss.update_target(rec_time=self.get_rec_time())
def load_acc_sols_from_file(self): """Load acc sols from. Handle with care, as there are no checks""" folder = f'{self.base_folder}' if self.subfoldername is not None: folder += f'/{self.subfoldername}' filename = f"{folder}/acc_sols.pkl" if data_utils.file_exists(filename): self.acc_sols = data_utils.load_var(filename) else: print('Not acc sols file found!') self.acc_sols = []
def gen_or_load_samples(optim, opt_params, filename, load): if load: assert os.path.isfile(filename), 'File does not exist' model_output_list = data_utils.load_var(filename) else: optim.init_rec_data(allow_loading=False, force_loading=True, verbose=True) model_output_list = optim.run_parallel(opt_params_list=opt_params, verbose=True) data_utils.save_var(model_output_list, filename) if load: assert len(model_output_list) == opt_params.shape[ 0], 'Loaded sample size differs from requested' return model_output_list
def load_samples(self, file=None, verbose=False): ''' Load samples from file. ''' if file is None: file = self.optim_data_file optim_data = None if os.path.exists(file): if os.stat(file).st_size != 0: optim_data = data_utils.load_var(file) if verbose: print('Loaded file', file, '-> n_samples =', optim_data['loss']['total'].size) else: if verbose: print('File', file, 'does not exist!') return optim_data
def load_tds_from_file(self, file, params): ''' Load training data to pass it to SNPE. Importance weights will be recomputed. ''' print(file) loaded_samples = data_utils.load_var(file) if 'wall-time' in loaded_samples.keys(): n_loaded_samples = loaded_samples['wall-time'].size elif 'loss' in loaded_samples.keys(): n_loaded_samples = loaded_samples['loss']['total'].size else: raise NotImplementedError print('Loaded', n_loaded_samples, 'samples from', file) loss_names = loaded_samples['loss'].keys() tds_loss = [] for i in range(n_loaded_samples): sample_loss_dict = {} for loss_name in loss_names: sample_loss_dict[loss_name] = loaded_samples['loss'][loss_name][i] tds_loss.append(self.to_network_input(sample_loss_dict)) tds_loss = np.array(tds_loss) if tds_loss.ndim == 1: tds_loss = np.atleast_2d(tds_loss).T if isinstance(loaded_samples['params'], dict): n_params = len(loaded_samples['params']) else: n_params = loaded_samples['params'].shape[1] assert n_params == params.p_N tds_params = np.zeros((n_loaded_samples, n_params)) if isinstance(loaded_samples['params'], dict): for idx, param in enumerate(params.p_names): tds_params[:,idx] = params.sim_param2opt_param(loaded_samples['params'][param], param) else: tds_params = loaded_samples['params'] loaded_tds = (tds_params, tds_loss, None) return loaded_tds, n_loaded_samples
def load_data( self, method, adaptive, step_param, pert_method, pert_param='auto', filename=None, ): """Load data without checking it""" if filename is None: filename = self.get_data_folder_and_filename( method=method, adaptive=adaptive, step_param=step_param, pert_method=pert_method, pert_param=pert_param)[1] data = data_utils.load_var(filename) return data
def read_var(self, csv_path=None): """ This function reads a csv file Parameters ---------- csv_path : {string type} Path to the csv file Return ------ df : {Pandas dataframe} The dataframe of the csv file with a sorted time index """ if csv_path is None: csv_path = self.get_param('path') var_name = self.get_param('var_name') df = data_utils.load_var(path=csv_path, var_name=var_name) return df
def load_SNPE_rounds(self): ''' Load stored rounds of SNPE. ''' inf_snpes = data_utils.load_var(os.path.join(self.snpe_folder, 'inf_snpes.pkl')) logs = data_utils.load_var(os.path.join(self.snpe_folder, 'logs.pkl')) tds = data_utils.load_var(os.path.join(self.snpe_folder, 'tds.pkl')) sample_distributions = data_utils.load_var(os.path.join(self.snpe_folder, 'sample_distributions.pkl')) n_samples = data_utils.load_var(os.path.join(self.snpe_folder, 'n_samples.pkl')) if self.snpe_type in ['b', 'B']: kernel_bandwidths = data_utils.load_var(os.path.join(self.snpe_folder, 'kernel_bandwidths.pkl')) pseudo_obs = data_utils.load_var(os.path.join(self.snpe_folder, 'pseudo_obs.pkl')) else: kernel_bandwidths = None pseudo_obs = None print('Loaded {:d} rounds'.format(len(n_samples))) return inf_snpes, logs, tds, sample_distributions, n_samples, kernel_bandwidths, pseudo_obs
# -*- coding: utf-8 -*- import data_utils import matplotlib.pyplot as plt # df1 = data_utils.load_var('2 Data/1 Received/Market data/Base/XAU_Curncy_LAST_PRICE.csv') # commo_gold # df1 = data_utils.load_var('2 Data/2 Calculs/18 06 Derived/I/STR_USD_1M.csv') # str_usd_1m # df1 = data_utils.load_var('usd_eur.csv') df1 = data_utils.load_var('usd_eur_nan.csv') plt.grid() # plt.plot(df1, "r") plt.plot(df1, 'r') plt.show() plt.figure()
df1 = data_utils.load_var(join(dir_path1, element), '1') for element in os.listdir(dir_path2): df2 = data_utils.load_var(join(dir_path2, element), '2') print compare_two_dfs(df1, df2) dir_path1 = 'I06' dir_path2 = 'I04' i = 0 j = 0 for element1 in os.listdir(dir_path1): for element2 in os.listdir(dir_path2): if element1 == element2: j += 1 csv_path = dir_path1 + '/' + element1 df_base = data_utils.load_var(csv_path, 'x') # df_base.columns = [x.lower() for x in df_base.columns] csv_path = dir_path2 + '/' + element2 df_latest = data_utils.load_var(csv_path, 'y') dfs = [df_latest, df_base] if df_base.equals(df_latest): print(element1) else: i += 1 print(element1, element2) print(compare_two_dfs(df_base, df_latest)) print(i) print(j) df_latest.sort_index(ascending=True, inplace=True) df_latest.columns = [x.lower() for x in df_latest.columns]
'j': ['ABPD4', 'LP2', 'PY1'], } n3_panel2syngs = { # [nS] 'a': np.array([10, 100, 10, 3, 30, 1, 3]), 'b': np.array([3, 0, 0, 30, 3, 3, 0]), 'c': np.array([100, 0, 30, 1, 0, 3, 0]), 'd': np.array([3, 100, 10, 1, 10, 3, 10]), 'e': np.array([30, 30, 10, 3, 30, 1, 30]), 'f': np.array([3, 100, 10, 1, 10, 3, 10]), # f-j are the same 'g': np.array([3, 100, 10, 1, 10, 3, 10]), 'h': np.array([3, 100, 10, 1, 10, 3, 10]), 'i': np.array([3, 100, 10, 1, 10, 3, 10]), 'j': np.array([3, 100, 10, 1, 10, 3, 10]), } n3_isslow_list = [0, 1, 0, 1, 0, 0, 0] __filename = f'{Path(__file__).parent.absolute()}/stg_neuron2y0.pkl' try: neuron2y0 = data_utils.load_var(__filename) except: print(f'Could not initialize {__filename}.') __filename = f'{Path(__file__).parent.absolute()}/stg_n3_panel2y0.pkl' try: n3_panel2y0 = data_utils.load_var(__filename) except: print(f'Could not initialize {__filename}.')
from __future__ import division import numpy as np import data_utils import pandas as pd from dateutil import parser #import control_var as cv from matplotlib import pyplot import control_utils # Librairies import logging from var_logger import setup_logging setup_logging() logger = logging.getLogger(__name__) logger.debug('Logger for class ') logger.setLevel('DEBUG') # print df # df.plot() # pyplot.show() df = data_utils.load_var('STR_USD_3M_DACE_1_20_100.csv', 'GOV_JPN_1Y_Z250D')
# -*- coding: utf-8 -*- import data_utils print data_utils.load_var( '2 Data/2 Calculs/18 06 Derived/I/FUT_SP500_C1_RET1D.csv') # rollingreturn print data_utils.load_var( '2 Data/2 Calculs/18 06 Derived/I/FUT_BUND_RET1ROLL.csv') # futuresroll print data_utils.load_var( '2 Data/2 Calculs/18 06 Derived/I/FUT_NKY_RET1_STD50.csv') # vol path = '2 Data/2 Calculs/18 06 Derived/I/FUT_BUND_RET1ROLL.csv' # futuresroll print data_utils.load_var(path)
# coding: utf-8 from __future__ import division import data_utils import matplotlib.pyplot as plt import pandas as pd import numpy as np from os.path import basename, splitext path = 'GOV_JPN_1Y_Z250D.csv' var_name = splitext(path)[0] # read csv file df = data_utils.load_var(path, var_name) df.plot() plt.show() def zero_cross(arr): neg_pos = ((arr[:-1] * arr[1:]) < 0).sum() # zcr = (1/T)*sum((s(t)*(st-1) < 0)) zcr = neg_pos / len(arr) return zcr # print zero_cross() print zero_cross(df.values)
def load_init_data(self, dirname): for cell in self.cells: cell.load_init_data('optim_data/' + dirname + '/' + cell.bp_type) self.rec_data = data_utils.load_var('optim_data/' + dirname + '/init_rec_data.pkl') assert self.rec_data is not None
def load_random_state(self): ''' Load numpy random state from folder. ''' np.random.set_state(data_utils.load_var(os.path.join(self.general_folder, 'random_state.pkl')))