def __init__(self, ax, fig, user, par=None): if par is None: par = ['0'] self.fig = fig self.ax = ax self.ful = rcsv('{}\{}.ful'.format(*user[2:4]), sep=',') self.parameters = par
def asignar_sun(dataframe, CVE_MUN='CVE_MUN', vars=['CVE_MUN', 'CVE_SUN', 'NOM_SUN', 'TIPO_SUN']): sun = rcsv( r'D:\PCCS\01_Analysis\01_DataAnalysis\00_Parametros\sun_main.csv', dtype={ 'CVE_SUN': str, 'CVE_ENT': str, 'CVE_MUN': str, 'CVE_LOC': str }, encoding='UTF-8', ) sun['CVE_SUN'] = sun['CVE_SUN'].apply('{:0>3}'.format) sun['CVE_ENT'] = sun['CVE_ENT'].apply('{:0>2}'.format) sun['CVE_MUN'] = sun['CVE_MUN'].apply('{:0>5}'.format) print('Catalogo de variables. Default vars = {}'.format(vars)) print(list(sun)) if 'CVE_MUN' not in vars: vars.append('CVE_MUN') dataframe.rename( columns={CVE_MUN: 'CVE_MUN'}, inplace=True ) # Estandariza el nombre de la columna de clave geoestadistica sun = sun[vars] dataframe = pd.merge(dataframe, sun, on='CVE_MUN') return dataframe
def alfa_t2(self, name, property=None): ffile = rcsv('{}.ful'.format(name), sep=',') fire_site = self.mc_rand(ffile) config = ffile.iloc[fire_site] fuel_xes = (config.XA, config.XB) fuel_yes = (config.YA, config.YB) fuel_zes = (config.ZA, config.ZB) hrrpua = triangular(config.hrrpua_min, config.hrrpua_max, mode=config.hrrpua_mode) * 1000 # kW/m2 if not property: alpha = triangular(config.alpha_min, config.alpha_max, mode=config.alpha_mode) # kW/s2 elif property == 'store': alpha = hrrpua * random.lognormal(-9.72, 0.97) # kW/s2 area = min(config.hrr_max / hrrpua * 1000, self.a_max) # m2 area = config.hrr_max / hrrpua * 1000 # m2 print('alpha:{}, hrrpua:{}'.format(alpha, hrrpua)) hrr = [] for t_frag in range(0, 120): t = self.t_end * t_frag/119 hrr.extend([round(i, 4) for i in [t/60, alpha / 1000 * (t ** 2)]]) if hrr[-1] > area * hrrpua: hrr[-1] = area * hrrpua return hrr, area, fuel_zes, fuel_xes, fuel_yes, hrrpua, alpha
def sprink_noeff(self, name, property=None): ffile = rcsv('{}.ful'.format(name), sep=',') fire_site, ases = self.mc_rand(ffile) config = ffile.iloc[fire_site] fuel_xes = (config.XA, config.XB) fuel_yes = (config.YA, config.YB) fuel_zes = (config.ZA, config.ZB) hrrpua = triangular(config.hrrpua_min, config.hrrpua_max, mode=config.hrrpua_mode) * 1000 # [kW] if not property: alpha = triangular(config.alpha_min, config.alpha_max, mode=config.alpha_mode) # [kW/s2] elif property == 'store': alpha = hrrpua * random.lognormal(-9.72, 0.97) # [kW/s2] # q_0 = min(alpha * config.t_sprink ** 2, self.a_max * hrrpua) # [kW] q_0 = alpha * config.t_sprink ** 2 # [kW] area = q_0 / hrrpua # [m2] print('alpha:{}, hrrpua:{}'.format(alpha, hrrpua)) hrr = [] for t_frag in range(0, 120): t = self.t_end * t_frag/120 if t >= config.t_sprink: # [min], [kW/s2 * 1k * s2]=[MW] hrr.extend([round(i, 4) for i in [t/60, alpha / 1000 * (config.t_sprink ** 2)]]) else: hrr.extend([round(i, 4) for i in [t/60, alpha / 1000 * (t ** 2)]]) return hrr, area, fuel_zes, fuel_xes, fuel_yes, hrrpua, alpha
def asignar_sun(dataframe, CVE_MUN='CVE_MUN', variables=['CVE_MUN', 'CVE_SUN', 'NOM_SUN', 'TIPO_SUN']): # Cargar archivo del Subsistema Principal del SUN sun = rcsv( r'D:\PCCS\01_Dmine\00_Generales\sun_main.csv', dtype={ 'CVE_SUN': str, 'CVE_ENT': str, 'CVE_MUN': str, 'CVE_LOC': str, 'CVE_SUNMUN': str }, encoding='UTF-8', ) sun['CVE_SUN'] = sun['CVE_SUN'].apply('{:0>3}'.format) sun['CVE_ENT'] = sun['CVE_ENT'].apply('{:0>2}'.format) sun['CVE_MUN'] = sun['CVE_MUN'].apply('{:0>5}'.format) # print('Catalogo de variables. Default vars = {}'.format(vars)) # print(list(sun)) if 'CVE_MUN' not in variables: variables.append('CVE_MUN') dataframe.rename( columns={CVE_MUN: 'CVE_MUN'}, inplace=True ) # Estandariza el nombre de la columna de clave geoestadistica sun.drop_duplicates( 'CVE_SUNMUN', keep='first', inplace=True ) # Quita los municipios que en el dataset aparecen duplicados por estar subdivididos en localidades sun = sun[variables] dataframe = pd.merge(dataframe, sun, on='CVE_MUN') # Eliminar registros que no formen parte del Subsistema Principal return dataframe
def __init__(self, res_path, t_crit, rset, probs): chdir(res_path) self.results = rcsv('stoch_rest.csv', sep=',') self.t_crit = t_crit self.rset = rset self.p_coll = probs[0] self.p_evac = probs[1]
def mean(self): data_list = [] for t in range(self.time[0], self.time[1], 10): for o in self.orientation: # split csv files for specified bounds with open('f2a_{}_{}.csv'.format(t, o)) as temp: files = temp.read().split('Patch ') [self.split_csv(f) for f in files[1:]] # merge splitted csv files using Y as key data_list.append(rcsv('f2a_{}_{}.csv'.format(t, o), sep=',')) break break
def generate_sim(data_path): t = sec() chdir(config['results_path']) data_set = rcsv(data_path) for i, r in data_set.iterrows(): if r['profile'] != r['profile']: with open('{0}\{0}.err'.format(r['ID']), 'w') as err: mess = '[WARNING] There are no elements above the fire base in scenario {}'.format(r['ID']) err.write('{}\nMax element temperature in te scenario is equal to the ambient temperature'.format(mess)) print(out(outpth, mess)) continue chdir(str(r['ID'])) MultiT2D(config['time_end']).prepare(r) chdir('..') return '[OK] {} simulation files created ({} s)'.format(len(data_set.index), round(sec() - t, 2))
def __init__(self, t_end, title, fire_type, fuelconfig): self.t_end = t_end # simulation duration time self.title = title # simulation title self.f_type = fire_type # type of fire self.fire_coords = [] # to export to Single class print(out(outpth, 'Reading fuel configuration files...'), end='\r') t = sec() if fuelconfig == 'stp': self.fuel = fires.Fuel(title).read_fuel() # import fuel from STEP and FUL config files elif fuelconfig == 'obj': self.fuel = fires.FuelOBJ(title).read_fuel() # import fuel from OBJ and FUL config files else: self.fuel = rcsv('{}.ful'.format(title)) print(out(outpth, '[OK] Fuel configuration imported ({} s)'.format(round(sec() - t, 2))))
def save(self, rset, t_crit, errors): rset = int(rset) t_crit = int(t_crit) data = rcsv('stoch_rest.csv', sep=',') num_nocoll = len(data.time_crit[data.time_crit == 0]) n_iter = len(data.t_max) save_list = ['v{}\n\nResults from {} iterations\n'.format(self.ver, n_iter)] err = [1, 1] # actual uncertainty of calculation # calculating and writing exceeding critical temperature probability and uncertainty to the list try: p_coll = len(data.t_max[data.t_max < int(t_crit)]) / len(data.t_max) save_list.append('P(collapse) = {}\n'.format(1 - p_coll)) except ZeroDivisionError: save_list.append('unable to calculate P(ASET<RSET) and RMSE\n') p_coll = 0 err[0], save_list = self.uncertainity(save_list, p_coll, n_iter) # calculating and writing ASET<RSET probability and uncertainty to the list try: p_evac = (len(data.time_crit[data.time_crit <= int(rset)]) - num_nocoll) / ( len(data.time_crit) - num_nocoll) save_list.append('P(ASET < RSET) = {}\n'.format(p_evac)) except ZeroDivisionError: save_list.append('unable to calculate P(ASET<RSET) and RMSE\n') p_evac = 0 err[1], save_list = self.uncertainity(save_list, p_evac, n_iter) save_list.append('{} OZone errors occured'.format(errors)) with open('results.txt', 'w') as file: file.writelines(save_list) # draw charts print('temp_crit={}\nRSET={}'.format(t_crit, rset)) Charting(self.r_p, t_crit, rset, (p_coll, p_evac)). draw() # check if uncertainty is low enough to stop calculations if 0 < err[0] < 0.001 and 0 < err[1] < 0.001: return True else: return False
def newzealand1(self, name): fuel_height = (0.5, 18.5) fuel_xes = (0.5, 9.5) fuel_yes = (0.5, 19.5) hrr_max = 50 config = rcsv('{}.ful'.format(name), sep=',') print(float(config.alpha_mode)) alpha = triangular(*config.alpha_min, *config.alpha_max, mode=float(config.alpha_mode)) hrrpua = triangular(*config.hrrpua_min, *config.hrrpua_max, mode=float(config.hrrpua_mode)) area = hrr_max / hrrpua print('alpha:{}, hrrpua:{}'.format(round(alpha, 4), round(hrrpua,4))) hrr = [] for i in range(0, int(self.t_end/120)): hrr.extend([i / 60, round(alpha / 1000 * (i ** 2), 4)]) if hrr[-1] > hrr_max: hrr[-1] = hrr_max return hrr, area, fuel_height, fuel_xes, fuel_yes
def _read_csv(cls, input_file): df_data = rcsv(input_file) df_data['学号'] = df_data['学号'].astype('str') df_data.set_index('学号') dicts = [] for index, row in df_data.iterrows(): if not row['实验结论为空']: sentence = { 'id' : row['学号'], 'isTrain' : row['isTrain'], 'content' : row['content'], 'a' : row['结论跑题'], 'b' : row['提到变频器多频率,改变频率多个值之类的'], 'c' : row['变频器一定,流量值稳定'], 'd' : row['频率越大,流量值越大'], 'e' : row['流量计能反映瞬时流量值,数据准确'], } dicts.append(sentence) return dicts
def newzealand2(self, name): fuel_height = (0.32, 34.1) fuel_xes = (0.3, 23.1) fuel_yes = (10.3, 101.7) hrr_max = 50 H = fuel_height[1] - fuel_height[0] A_max = (fuel_xes[1] - fuel_xes[0]) ** 2 * 3.1415 / 4 config = rcsv('{}.ful'.format(name), sep=',') alpha = triangular(*config.alpha_min, *config.alpha_max, mode=float(config.alpha_mode)) area = triangular(0, A_max) print('alpha:{}, radius: {}'.format(alpha, (area / 3.1415) ** 0.5)) hrr = [] for i in range(0, int(self.t_end/120)): hrr.extend([i / 60, round(H * alpha * (i ** 3) / 1000, 4)]) if hrr[-1] > hrr_max: hrr[-1] = hrr_max return hrr, area, fuel_height, fuel_xes, fuel_yes
def row_maker(self, name): # change CSV file to applicable form with open(name) as file: lines = file.readlines() if lines[0][:5] == 'Patch': lines.pop(0) lines.pop(1) for i in range(len(lines[1:])): lines[i + 1] = ','.join([str(i), lines[i + 1]]) with open(name, 'w') as file: file.writelines(lines) csv = rcsv(name) row = [] csv.columns = csv.columns.str.strip().str.replace(' ', '_') for v in csv.iloc[:, -1]: if type(v) != str(): row.append(v) return row
def split_mean(self, file): data = {} with open('temp.csv', 'w') as temp: temp.write(file) with open('temp.csv', 'r') as temp: f = temp.readlines() patch_no = f[0].split(' ')[0] with open('temp.csv', 'w') as temp: temp.writelines(f[1:]) file = rcsv('temp.csv') file.drop(0, inplace=True) print(file.columns) # split according to Z file.drop([' X', 'Z'], axis='columns', inplace=True) # save in data dictionary as title:array(pandas df) # title = 'Patch{} X={} Z={}'.format(patch_no, x, z) # data{title:array} return data
# calculating and writing ASET<RSET probability and uncertainty to the list try: p_evacfailed = (len(data.time_crit[data.time_crit <= int(rset)]) - num_nocoll) / (len(data.time_crit) - num_nocoll) save_list.append('P(ASET < RSET) = {}\n'.format(p_evacfailed)) except ZeroDivisionError: save_list.append('unable to calculate P(ASET<RSET) and RMSE\n') p_evacfailed = 0 err[1], save_list = uncertainty(save_list, p_evacfailed, n_iter) with open('results.txt', 'w') as file: file.writelines(save_list) print('[OK] Results summary written to TXT file') # draw charts Charting(data, t_crit, rset, (p_collapse, p_evacfailed)).draw() # check if uncertainty is low enough to stop calculations if 0 < err[0] < 0.001 and 0 < err[1] < 0.001: return True else: return False if __name__ == '__main__': user = user_config(argv[1]) summary( rcsv('{}\\{}_results.csv'.format(user['results_path'], user['case_title'])), temp_crit(user['miu']), user['RSET'])