def test_legend(self): configuration = get_configuration() configuration['legend']['columns'] = 1 self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_1column_legend.pdf')) configuration['legend']['columns'] = 2 self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_2columns_legend.pdf')) configuration['legend']['columns'] = 3 self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_3columns_legend.pdf')) configuration['legend']['columns'] = 2 configuration['files']['MC_sample1.root']['legend-order'] = 1 configuration['files']['MC_sample2.root']['legend-order'] = 0 self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file( 'default_configuration_2columns_samplesordering_legend.pdf')) configuration = get_configuration() configuration['legend']['columns'] = 2 configuration['legend']['position'][2] = 0.75 configuration['legend']['entries'] = [ { 'label': 'Extra MC legend entry', 'type': 'mc', 'order': 1000 }, { 'label': 'Extra signal legend entry', 'type': 'signal', 'order': 1000 }, ] self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_extra_legend_entries.pdf'))
def convert_code_to_token(code): query_string = { 'client_id': get_configuration()['appId'], 'redirect_uri': 'http://imbored.davidstrauss.net/bounce/', 'client_secret': get_configuration()['secret'], 'code': code } encoded = urllib.urlencode(query_string) url = "https://graph.facebook.com/oauth/access_token?{0}".format(encoded) deferred = http_request.run(url) def cbResponse(response): log.msg('Parsing response into a token.') parts = urlparse.parse_qs(response.body) from pprint import pprint pprint(parts) token = parts['access_token'][0] expires = parts['expires'][0] # Not yet used. log.msg('Parsed response into token {0}.'.format(token)) return token def cbErrResponse(error): log.msg(error) return None deferred.addCallback(cbResponse) deferred.addErrback(cbErrResponse) return deferred
def test_legend(self): configuration = get_configuration() configuration['legend']['columns'] = 1 self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_1column_legend.pdf') ) configuration['legend']['columns'] = 2 self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_2columns_legend.pdf') ) configuration['legend']['columns'] = 3 self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_3columns_legend.pdf') ) configuration['legend']['columns'] = 2 configuration['files']['MC_sample1.root']['legend-order'] = 1 configuration['files']['MC_sample2.root']['legend-order'] = 0 self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_2columns_samplesordering_legend.pdf') ) configuration = get_configuration() configuration['legend']['columns'] = 2 configuration['legend']['position'][2] = 0.75 configuration['legend']['entries'] = [ {'label': 'Extra MC legend entry', 'type': 'mc', 'order': 1000}, {'label': 'Extra signal legend entry', 'type': 'signal', 'order': 1000}, ] self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_extra_legend_entries.pdf') )
def single_pv_set(year, key, module_name, inverter_name, orientation): c = config.get_configuration() weatherpath = os.path.join(c.paths['weather'], c.pattern['weather']) weather = pd.read_hdf(weatherpath.format(year=year), 'A' + str(key)) latlon = pd.read_csv(os.path.join(c.paths['geometry'], c.files['grid_centroid']), index_col='gid').loc[key] location = {'latitude': latlon['st_y'], 'longitude': latlon['st_x']} weather = f.adapt_weather_to_pvlib(weather, location) sandia_modules = pvlib.pvsystem.retrieve_sam('sandiamod') sapm_inverters = pvlib.pvsystem.retrieve_sam('sandiainverter') smodule = { 'module_parameters': sandia_modules[module_name], 'inverter_parameters': sapm_inverters[inverter_name], 'surface_azimuth': orientation['azimuth'], 'surface_tilt': orientation['tilt'], 'albedo': 0.2 } p_peak = (smodule['module_parameters'].Impo * smodule['module_parameters'].Vmpo) mc = f.feedin_pvlib_modelchain(location, smodule, weather) ac = mc.ac # .clip(0).fillna(0).div(p_peak) dc = mc.dc.p_mp # .clip(0).fillna(0).div(p_peak) print('ac:', ac.sum()) print('dc:', dc.sum())
def main(): # Setup and get current configuration config = get_configuration() # Print parameters print_configuration() #Initialize class - preprocessing preprocess = Preprocessing(config=config) # Perform preprocessing train_input, train_length_input, train_labels, test_input, test_length_input = preprocess.prepare_data( ) # Initialize class and select mode and model - embeddings if config.mode != "infer": if config.emb_model == "glove": model_emb = GloVeModel(config=config, dict_vocab=preprocess.dict_vocab) else: model_emb = Word2VecModel(config=config, dict_vocab=preprocess.dict_vocab) # Fit corpus model_emb.fit_to_corpus() # Train embeddings model_emb.train() # Train model RNN_Model(config, preprocess.dict_vocab_reverse, train_input, train_length_input, train_labels, test_input, test_length_input)
def __init__(self): database_credentials = configuration.get_configuration('database') self.conn = mariadb.connect(user=database_credentials["user"], password=database_credentials["password"], host=database_credentials["host"], port=int(database_credentials["port"]), database=database_credentials["database"]) self.conn.auto_reconnect = True
def get_full_load_hours(): """pass""" c = config.get_configuration() feedinpath = os.path.join(c.paths['feedin'], '{type}', c.pattern['feedin']) my_idx = pd.MultiIndex(levels=[[], []], labels=[[], []], names=['year', 'key']) df = pd.DataFrame(index=my_idx, columns=['wind']) years = list() for vtype in ['solar', 'wind']: for year in range(1970, 2020): if os.path.isfile(feedinpath.format(year=year, type=vtype.lower())): years.append(year) years = list(set(years)) # opening one file to get the keys of the weather fields and the columns of # the solar file (the columns represent the sets). file = pd.HDFStore(feedinpath.format(year=years[0], type='solar')) keys = file.keys() columns = list(file[keys[0]].columns) for col in columns: df[col] = '' file.close() for key in keys: df.loc[(0, int(key[2:])), :] = 0 df.loc[(0, 0), :] = 0 for year in years: df.loc[(year, 0), :] = 0 logging.info("Processing: {0}".format(year)) solar = pd.HDFStore(feedinpath.format(year=year, type='solar')) wind = pd.HDFStore(feedinpath.format(year=year, type='wind')) for key in keys: skey = int(key[2:]) df.loc[(year, skey), 'wind'] = wind[key].sum() df.loc[(0, skey), 'wind'] += df.loc[(year, skey), 'wind'] df.loc[(year, 0), 'wind'] += df.loc[(year, skey), 'wind'] df.loc[(0, 0), 'wind'] += df.loc[(year, skey), 'wind'] df.loc[(year, skey), columns] = solar[key].sum() df.loc[(0, skey), columns] += df.loc[(year, skey), columns] df.loc[(year, 0), columns] += df.loc[(year, skey), columns] df.loc[(0, 0), columns] += df.loc[(year, skey), columns] solar.close() wind.close() df.loc[(year, 0), :] = (df.loc[(year, 0), :] / len(keys)) for key in keys: df.loc[(0, int(key[2:])), :] = df.loc[(0, int(key[2:])), :] / len(years) df.loc[(0, 0), :] = df.loc[(0, 0), :] / (len(years) * len(keys)) df.sort_index(inplace=True) df.to_csv(os.path.join(c.paths['analysis'], 'full_load_hours.csv'))
def test_default_ratio(self): configuration = get_configuration() configuration['plots']['histo1']['show-ratio'] = True self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_ratio.pdf'))
def test_lines(self): configuration = get_configuration() configuration['configuration']['line-color'] = "#1685f2" configuration['configuration']['line-width'] = 3 configuration['configuration']['line-type'] = 4 configuration['plots']['histo1']['vertical-lines'] = [3, 7] configuration['plots']['histo1']['horizontal-lines'] = [80] configuration['plots']['histo1']['lines'] = [ [[3, 20], [8, 90]], [[5.5, float('nan')], [5.5, float('nan')]] ] self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_lines.pdf') ) configuration = get_configuration() configuration['configuration']['line-color'] = "#1685f2" configuration['configuration']['line-width'] = 3 configuration['configuration']['line-type'] = 4 configuration['plots']['histo1']['vertical-lines'] = [{ 'line-color': '#ae4fea', 'line-width': 10, 'value': 3}, 7] configuration['plots']['histo1']['horizontal-lines'] = [80] configuration['plots']['histo1']['lines'] = [ [[3, 20], [8, 90]], [[5.5, float('nan')], [5.5, float('nan')]] ] self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_lines_inline_style.pdf') )
def parse(inputFile, outputDir): # getting configuration and BD. db_handler = Japanese_DB_handler() #config_data = _parseConf() config_data = configuration.get_configuration() if not config_data : log.error("couldn't find get configuration data") return copyfile(inputFile, _generateFileName(config_data.input_files_bk, 'input')) f = db_handler.base_format existing_kanjis = db_handler.list(f.vocab, f.vocab.word) potentialErrors = [] newEntriesList = [] # Parsing input file. with open(inputFile, 'r') as fin: for row in csv.reader(fin, delimiter=' '): # usefull to just get half of the list # but question are not necessrely before awnser # we forced japanese as row[0] word = row[0] meaning = row[1] prononciation = row[2] if row[2] else '' exemple = '' if word not in existing_kanjis : newEntriesList.append(['','',word, prononciation, meaning, exemple]) else : log.error('already exists : '+word) nb_of_files = len(newEntriesList)//100 if len(newEntriesList)%100 != 0 : nb_of_files += 1 outputDir += '/' for nb in range(1, nb_of_files+1, 1): fileName = _generateFileName(outputDir, "int", str(nb)) with open(fileName, 'w') as fout: writer = csv.writer(fout, delimiter= ' ') writer.writerow(['categorie','tag','word','prononciation','meaning','exemple']) for entry in newEntriesList[100 * (nb - 1) : 100 * nb] : writer.writerow(entry) fileName = _generateFileName(outputDir, "int", '_pottentialErrors') with open(fileName, 'w') as fout: writer = csv.writer(fout, delimiter= ' ') for error in potentialErrors: writer.writerow(error) log.error(error) return
def main(): config = cfg.get_configuration() sw = init(config) _debug(sw.getGroups()) _debug(sw.getFriends()) grp_id = 19058988 # test group _debug(sw.getExpenses(group_id=grp_id)) today = dt.date.today() share_expense_with_group_members(sw, "test_date", 10, grp_id, today)
def test_blinded(self): configuration = get_configuration() configuration['plots']['histo1']['blinded-range'] = [3, 5.2] self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_blinded_range.pdf')) configuration = get_configuration() configuration['plots']['histo1']['blinded-range'] = [3, 5.2] configuration['plots']['histo1']['log-y'] = True self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1_logy.pdf'), get_golden_file('default_configuration_blinded_range_logy.pdf'))
def test_default_ratio(self): configuration = get_configuration() configuration['plots']['histo1']['show-ratio'] = True self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_ratio.pdf') )
def analyse_pv_orientation(year, key, module_name): c = config.get_configuration() weatherpath = os.path.join(c.paths['weather'], c.pattern['weather']) weather = pd.read_hdf(weatherpath.format(year=year), 'A' + str(key)) latlon = pd.read_csv(os.path.join(c.paths['geometry'], c.files['grid_centroid']), index_col='gid').loc[key] location = {'latitude': latlon['st_y'], 'longitude': latlon['st_x']} weather = f.adapt_weather_to_pvlib(weather, location) sandia_modules = pvlib.pvsystem.retrieve_sam('sandiamod') sapm_inverters = pvlib.pvsystem.retrieve_sam('sandiainverter') invertername = 'SMA_America__SB5000US_11_208V__CEC_2007_' azimuth = range(0, 361, 10) tilt = range(0, 91, 10) # df_ts = pd.DataFrame() df_dc = pd.DataFrame() df_ac = pd.DataFrame() df_sun = pd.DataFrame() length = len(azimuth) * len(tilt) # from matplotlib import pyplot as plt for az in azimuth: for tlt in tilt: name = 'az{0}_tlt{1}'.format(az, tlt) logging.info("{0}, {1}".format(name, length)) length -= 1 smodule = { 'module_parameters': sandia_modules[module_name], 'inverter_parameters': sapm_inverters[invertername], 'surface_azimuth': az, 'surface_tilt': tlt, 'albedo': 0.2 } p_peak = (smodule['module_parameters'].Impo * smodule['module_parameters'].Vmpo) mc = f.feedin_pvlib_modelchain(location, smodule, weather) df_dc.loc[az, tlt] = mc.dc.p_mp.clip(0).div(p_peak).sum() df_ac.loc[az, tlt] = mc.ac.clip(0).div(p_peak).sum() # print(mc.total_irrad.columns) # print(mc.total_irrad['poa_global'].fillna(0).div(p_peak).sum()) df_sun.loc[az, tlt] = mc.total_irrad['poa_global'].div(p_peak).sum() # df_ts.to_csv(os.path.join(paths['analysis'], 'orientation_feedin.csv')) df_sun.to_csv(os.path.join(c.paths['analysis'], 'sun.csv')) df_dc.to_csv(os.path.join(c.paths['analysis'], 'orientation_feedin_dc.csv')) df_ac.to_csv(os.path.join(c.paths['analysis'], 'orientation_feedin_ac.csv'))
def analyse_optimal_orientation_file(): c = config.get_configuration() df = pd.read_csv(os.path.join(c.paths['analysis'], 'optimal_orientation_multi.csv'), index_col=[0, 1, 2], header=[0, 1]) df.sort_index(axis=0, inplace=True) df.sort_index(axis=1, inplace=True) df['avg', 'azimuth'] = df.loc[:, (slice(None), 'azimuth')].sum(1).div(3) df['avg', 'tilt'] = df.loc[:, (slice(None), 'tilt')].sum(1).div(3) print(df.index) print(df['avg'].groupby('year').mean())
def something(): c = config.get_configuration() cap = pd.read_csv(os.path.join(c.paths['analysis'], 'pv_data.csv'), header=1, index_col='year') print(cap.columns) cap['inst_mean'] = cap.inst - (cap.inst - cap.inst.shift(1)) / 2 cap['diff'] = cap.inst - cap.inst.shift(1) cap['VLSt'] = (cap.inst_mean / cap.erzeug) * 1000 cap['factor'] = cap['VLSt'] / cap['mean'] print(cap) print(cap.sum() / 5)
def __init__(self): self.log = getLogger() self.log.msg('Starting up server.') self.web = None self.log.msg('Started web server.') self.aggregator = Aggregator() self.log.msg('Started Aggregator.') self.network = None self.log.msg('Gathering configuration') self.config = get_configuration() self.log.msg('Gathered configuration') self.reactor = None
def test_lines(self): configuration = get_configuration() configuration['configuration']['line-color'] = "#1685f2" configuration['configuration']['line-width'] = 3 configuration['configuration']['line-type'] = 4 configuration['plots']['histo1']['vertical-lines'] = [3, 7] configuration['plots']['histo1']['horizontal-lines'] = [80] configuration['plots']['histo1']['lines'] = [[[3, 20], [8, 90]], [[5.5, float('nan')], [5.5, float('nan')]]] self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_lines.pdf')) configuration = get_configuration() configuration['configuration']['line-color'] = "#1685f2" configuration['configuration']['line-width'] = 3 configuration['configuration']['line-type'] = 4 configuration['plots']['histo1']['vertical-lines'] = [{ 'line-color': '#ae4fea', 'line-width': 10, 'value': 3 }, 7] configuration['plots']['histo1']['horizontal-lines'] = [80] configuration['plots']['histo1']['lines'] = [[[3, 20], [8, 90]], [[5.5, float('nan')], [5.5, float('nan')]]] self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_lines_inline_style.pdf'))
def test_blinded(self): configuration = get_configuration() configuration['plots']['histo1']['blinded-range'] = [3, 5.2] self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_blinded_range.pdf') ) configuration = get_configuration() configuration['plots']['histo1']['blinded-range'] = [3, 5.2] configuration['plots']['histo1']['log-y'] = True self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1_logy.pdf'), get_golden_file('default_configuration_blinded_range_logy.pdf') )
def main(_): assert FLAGS.model_type in _MODEL_LIST, 'Invalid model specified.' if not tf.gfile.Exists(FLAGS.train_log_dir): tf.gfile.MakeDirs(FLAGS.train_log_dir) config = configuration.get_configuration() config.batch_size = FLAGS.batch_size training_config = configuration.TrainingConfig() g = tf.Graph() with g.as_default(): # If ps_tasks is zero, the local device is used. When using multiple # (non-local) replicas, the ReplicaDeviceSetter distributes the variables # across the different devices. if FLAGS.model_type == 'multi': vae = convolutional_multi_vae.ConvolutionalMultiVae( config, mode='train', split_name='train') elif FLAGS.model_type == 'single' or FLAGS.model_type == 'kronecker': raise NotImplementedError("%s not implemented" % (FLAGS.model_type)) vae.build_model() optimizer = training_config.optimizer(training_config.learning_rate) tf.losses.add_loss(vae.loss) total_loss = tf.losses.get_total_loss() # Set up training. train_op = slim.learning.create_train_op(total_loss, optimizer, check_numerics=False) saver = vae.setup_saver() if config.loss_type == 'fwkl': init_fn = vae.get_forward_kl_init_fn(FLAGS.fwkl_init_dir) else: init_fn = None # Run training. slim.learning.train(train_op=train_op, init_fn=init_fn, logdir=FLAGS.train_log_dir, graph=g, number_of_steps=FLAGS.max_number_of_steps, save_summaries_secs=FLAGS.save_summaries_secs, save_interval_secs=FLAGS.save_interval_secs, saver=saver)
def test_eras(self): configuration = get_configuration() configuration['files']['MC_sample1.root']['era'] = "1" configuration['files']['MC_sample1.root']['cross-section'] *= 1.5 configuration['files']['data.root']['era'] = "1" configuration['files']['MC_sample2.root']['era'] = "2" configuration['files']['MC_sample2.root']['cross-section'] *= 3. configuration['configuration']['eras'] = ["1", "2"] configuration['configuration']['luminosity'] = {"1": 0.67, "2": 0.33} self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_eras.pdf'))
def main(): # Setup and get current configuration config = get_configuration() # Print parameters print_configuration() # Perform preprocessing preprocess = Preprocessing(config=config) train_input_encoder, train_input_decoder, \ test_input_encoder, test_input_decoder, = preprocess.prepare_data() # Initialize model class - train or infer: select mode Seq2seq(config, train_input_encoder, train_input_decoder, test_input_encoder, test_input_decoder, preprocess.dict_vocab_reverse, mode=None)
def bounce_for_authentication(request): session = request.getSession() auth = IFacebookAuth(session) auth.state = str(uuid.uuid4()) query_string = { 'client_id': get_configuration()['appId'], 'redirect_uri': 'http://imbored.davidstrauss.net/bounce/', 'scope': ','.join(['user_about_me', 'user_birthday', 'email', 'friends_about_me', 'friends_birthday', 'friends_location', 'user_location']), 'state': auth.state } encoded = urllib.urlencode(query_string) uri = 'https://www.facebook.com/dialog/oauth?{0}'.format(encoded) request.redirect(uri)
def test_multi_stacks(self): configuration = get_configuration() # Remove data del configuration['files']['data.root'] configuration['files']['MC_sample1.root']['stack-index'] = 0 configuration['files']['MC_sample2.root']['stack-index'] = 1 self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_multi_stacks.pdf')) configuration['files']['MC_sample1.root']['stack-index'] = 1 configuration['files']['MC_sample2.root']['stack-index'] = 0 self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file( 'default_configuration_multi_stacks_reverse_ordering.pdf')) configuration['files']['MC_sample1.root']['stack-index'] = 0 configuration['files']['MC_sample2.root']['stack-index'] = 1 configuration['files']['MC_sample1.root']['fill-type'] = 0 configuration['files']['MC_sample1.root']['line-type'] = 1 configuration['files']['MC_sample1.root']['line-width'] = 2 configuration['files']['MC_sample1.root']['legend-style'] = 'l' configuration['files']['MC_sample2.root']['fill-type'] = 0 configuration['files']['MC_sample2.root']['line-type'] = 1 configuration['files']['MC_sample2.root']['line-width'] = 2 configuration['files']['MC_sample2.root']['legend-style'] = 'l' self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file( 'default_configuration_multi_stacks_line_type.pdf'))
def analyse_pv_types(year, key, orientation): c = config.get_configuration() weatherpath = os.path.join(c.paths['weather'], c.pattern['weather']) weather = pd.read_hdf(weatherpath.format(year=year), 'A' + str(key)) latlon = pd.read_csv(os.path.join(c.paths['geometry'], c.files['grid_centroid']), index_col='gid').loc[key] location = {'latitude': latlon['st_y'], 'longitude': latlon['st_x']} weather = f.adapt_weather_to_pvlib(weather, location) sandia_modules = pvlib.pvsystem.retrieve_sam('sandiamod') sapm_inverters = pvlib.pvsystem.retrieve_sam('sandiainverter') invertername = 'ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_' for modu in sandia_modules.keys(): if 'BP_Solar' in modu: print(modu) exit(0) df_ts_ac = pd.DataFrame() df = pd.DataFrame() length = len(sandia_modules.keys()) for smod in sandia_modules.keys(): name = smod # .replace('__', '_') logging.info("{0}, {1}".format(name, length)) length -= 1 smodule = { 'module_parameters': sandia_modules[smod], 'inverter_parameters': sapm_inverters[invertername], 'surface_azimuth': orientation['azimuth'], 'surface_tilt': orientation['tilt'], 'albedo': 0.2 } p_peak = (smodule['module_parameters'].Impo * smodule['module_parameters'].Vmpo) mc = f.feedin_pvlib_modelchain(location, smodule, weather) df_ts_ac[name] = mc.ac.clip(0).fillna(0).div(p_peak) df.loc[name, 'ac'] = df_ts_ac[name][:8760].sum() df.loc[name, 'dc_norm'] = mc.dc.p_mp.clip(0).div(p_peak).sum() df.loc[name, 'dc'] = mc.dc.p_mp.clip(0).sum() df.to_csv(os.path.join(c.paths['analysis'], 'module_feedin.csv')) df_ts_ac.to_csv( os.path.join(c.paths['analysis'], 'module_feedin_ac_ts.csv'))
def main() -> None: load_dotenv() config = get_configuration(sys.argv[1:]) logger, error_tracker = _configure_logging(config.log_level) logger.info("Starting iNaturalist project data extractor.") logger.info(f"Configuration: {config}") file_path = _run(config) if (config.input_file): merge_bulk_and_api_files(config, file_path) logger.info("Finished with data extraction.") if error_tracker.fired: print( "There was an error, please carefully review the log details above." ) sys.exit(1)
def get_maximum_value(filename, pathname=None, icol=None): if pathname is None: c = config.get_configuration() pathname = c.paths['analysis'] if icol is None: icol = [0] table = pd.read_csv(os.path.join(pathname, filename), index_col=icol) idx = None column = None if isinstance(table, pd.Series): max_value = table.max() idx = table[table == max_value].index[0] elif isinstance(table, pd.DataFrame): max_value = table.max().max() for col in table: try: idx = table[col][table[col] == max_value].index[0] column = col except IndexError: pass print(column, idx)
def analyse_inverter(year, key, module_name, orientation): c = config.get_configuration() weatherpath = os.path.join(c.paths['weather'], c.pattern['weather']) weather = pd.read_hdf(weatherpath.format(year=year), 'A' + str(key)) latlon = pd.read_csv(os.path.join(c.paths['geometry'], c.files['grid_centroid']), index_col='gid').loc[key] location = {'latitude': latlon['st_y'], 'longitude': latlon['st_x']} weather = f.adapt_weather_to_pvlib(weather, location) sandia_modules = pvlib.pvsystem.retrieve_sam('sandiamod') sapm_inverters = pvlib.pvsystem.retrieve_sam('sandiainverter') inv = pd.DataFrame() failed = pd.Series() length = len(sapm_inverters.keys()) for sinv in sapm_inverters.keys(): name = sinv # .replace('__', '_') logging.info("{0}, {1}".format(name, length)) length -= 1 smodule = { 'module_parameters': sandia_modules[module_name], 'inverter_parameters': sapm_inverters[sinv], 'surface_azimuth': orientation['azimuth'], 'surface_tilt': orientation['tilt'], 'albedo': 0.2 } p_peak = (smodule['module_parameters'].Impo * smodule['module_parameters'].Vmpo) try: mc = f.feedin_pvlib_modelchain(location, smodule, weather) inv.loc[name, 'ac'] = mc.ac.clip(0).fillna(0).div(p_peak).sum() inv.loc[name, 'dc'] = mc.dc.p_mp.clip(0).fillna(0).div(p_peak).sum() except ValueError: logging.info("Inverter {0} failed.".format(name)) failed.loc[name] = 'failed' inv.to_csv( os.path.join(c.paths['analysis'], 'sapm_inverters_feedin_full2.csv')) failed.to_csv( os.path.join(c.paths['analysis'], 'sapm_inverters_failed.csv'))
""" Postfix fabric commands. :copyright: (c) 2014 by Alexander Skiba <*****@*****.**> :licence: MIT :bugreports: [email protected] """ import configuration as config from fabric.api import task, run from os.path import splitext, basename # module configuration ######################################################## MODULE = splitext(basename(__file__))[0] CONFIGURATION = config.get_configuration(MODULE) # tasks ####################################################################### @task def renew_aliases(): """(POSTFIX): Read in the aliases file.""" command = "newaliases" run(command)
self.data = r.data else: print(f"Error when requesting {url}") def find(self, regex): if not regex: return None source = self.data.decode('utf8') matches = re.findall(regex, source) return matches def save(self, folder, filename): os.makedirs(folder, exist_ok=True) filename = filename.replace(" ", "_") filename = filename.replace(":", "-") with open(os.path.join(folder, filename), 'w', encoding="utf") as f: f.write(self.data.decode('utf8')) if __name__ == "__main__": c = cfg.get_configuration() s = Scraper() for i in c.scraper.items: if not i.enabled: continue print(i.name) s.refresh(i.url) print(s.find(i.sold_out_regex)) print(s.find(i.price_regex)) s.save(c.notifier.output_path, f"test_{i.name}.html")
# -*- coding: utf-8 -*- from flask import request, Flask, jsonify from flask_api import status from DataAugmentation import DataAugmentation import configuration import re import content_parser app = Flask(__name__) config = configuration.get_configuration() system = DataAugmentation(coreNLPDirectory=config['CORE_NLP_DIR'], port=config['CORE_NLP_PORT'], language=config['CORE_NLP_LANG'], glawiBz2FilePath=config['GLAWI_BZ2'], glawiPklFilePath=config['GLAWI_PKL'], lexiconsDirectory=config['LEXICON_DIR'], spacyModelPath=config['SPACY_MODEL']) password = config['DATA_AUGMENTATION_PASSWORD'] @app.route("/dafromtext", methods=['POST']) def data_augmentation_from_text(): if request.method == 'POST': sentence = request.form['text'] limit = request.form.get('limit') if sentence == None:
import logging import constant import discord import configuration from discord.ext import commands from datetime import datetime from discord_slash import SlashCommand DISCORD_COMMANDS_FILES = [ 'msqbot.discord_api.commands.commands_meter', 'msqbot.discord_api.commands.commands_movie_time' ] credentials = configuration.get_configuration('discord') intents = discord.Intents.all() bot = commands.Bot(command_prefix=None, intents=intents) slash = SlashCommand(bot, sync_commands=True, sync_on_cog_reload=True) def create_log_file(): logfile = 'log_{}.log'.format(str(datetime.now().strftime("%m%d%Y%H%M%S"))) logging.basicConfig(level=logging.DEBUG, handlers=[logging.FileHandler(constant.LOG_FILE + logfile, 'w', 'utf-8')], format='%(asctime)s:%(levelname)s:%(name)s: %(message)s') logger = logging.getLogger('discord') logger.setLevel(logging.DEBUG) handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w') handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s')) logger.addHandler(handler)
def test_systematics(self): configuration = get_configuration() configuration['configuration']['luminosity-error'] = 0.4 self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_lumi_error_0p4.pdf') ) configuration = get_configuration() configuration['configuration']['luminosity-error'] = 0. configuration['systematics'] = [] configuration['systematics'] += [{'syst1': 1.4}] self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_one_syst_0p4.pdf') ) configuration = get_configuration() configuration['configuration']['luminosity-error'] = 0. configuration['systematics'] = [] configuration['systematics'] += [{'syst1': 1.2}] configuration['systematics'] += [{'syst2': {'type': 'ln', 'prior': 1.2}}] self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_two_systs_0p28.pdf') ) configuration = get_configuration() configuration['configuration']['luminosity-error'] = 0. configuration['systematics'] = ['alpha', 'beta'] self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_two_systs_shape.pdf') ) configuration = get_configuration() configuration['configuration']['luminosity-error'] = 0. configuration['systematics'] = ['i-do-not-exist'] self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_syst_not_found.pdf') )
def test_systematics(self): configuration = get_configuration() configuration['configuration']['luminosity-error'] = 0.4 self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_lumi_error_0p4.pdf')) configuration = get_configuration() configuration['configuration']['luminosity-error'] = 0. configuration['systematics'] = [] configuration['systematics'] += [{'syst1': 1.4}] self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_one_syst_0p4.pdf')) configuration = get_configuration() configuration['configuration']['luminosity-error'] = 0. configuration['systematics'] = [] configuration['systematics'] += [{'syst1': 1.2}] configuration['systematics'] += [{ 'syst2': { 'type': 'ln', 'prior': 1.2 } }] self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_two_systs_0p28.pdf')) configuration = get_configuration() configuration['configuration']['luminosity-error'] = 0. configuration['systematics'] = ['alpha', 'beta'] self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_two_systs_shape.pdf')) configuration = get_configuration() configuration['configuration']['luminosity-error'] = 0. configuration['systematics'] = ['i-do-not-exist'] self.run_plotit(configuration) self.compare_images( os.path.join(self.output_folder.name, 'histo1.pdf'), get_golden_file('default_configuration_syst_not_found.pdf'))
"--dataset", dest="dataset", help="the name of the azure blob", default=None) return parser.parse_args() params = parse_arguments() if (not os.path.isfile(params.input_file_name)): print("File: {} does not exist".format(params.input_file_name)) exit() params.dataset = params.dataset if params.dataset != None else params.input_file_name # initialize dev configuration environment config = configuration.get_configuration("dev") # initlaize the data manager dm = DataManager(config) # upoad the file dm.save_data(params.data_source_type, None, params.input_file_name, params.dataset) print("The file: {} was uploaded to the blob: {}".format( params.input_file_name, params.dataset)) print("The storage account is: {} {}".format(config.storage_account, config.data_container_name))
for fuel in tpp.index.get_level_values(0).unique(): for year in src.index: try: df = tpp.loc[fuel, year] idx = df.efficiency.notnull() w_avg = np.average(df[idx].efficiency, weights=df[idx].capacity) limit = repp.loc[year, (fuel, 'energy')] / w_avg src.loc[year, (fuel, 'limit')] = limit except KeyError: src.loc[year, (fuel, 'limit')] = float('inf') return src def prepare_commodity_sources(c): logging.info("Commodity Sources.") commodity_sources = initialise_commodity_sources() commodity_sources = prices_from_bmwi_energiedaten(cfg, commodity_sources) commodity_sources = emissions_from_znes(cfg, commodity_sources) commodity_sources = prices_2014_from_znes(cfg, commodity_sources) commodity_sources = set_limit_by_energy_production(cfg, commodity_sources) commodity_sources.sort_index(1, inplace=True) commodity_sources.to_csv(os.path.join(c.paths['commodity'], c.files['commodity_sources'])) if __name__ == "__main__": logger.define_logging() cfg = config.get_configuration() logging.info("Commodity Sources.") prepare_commodity_sources(cfg)
import csv from japanese.Japanese_DB_handler import Japanese_DB_handler import configuration as configuration logging.basicConfig() log = logging.getLogger() log.setLevel(logging.DEBUG) output_dir = sys.argv[1] if not os.path.exists(output_dir): log.error('directory not found : ' + str(output_dir)) sys.exit(1) jpDB = Japanese_DB_handler() f = jpDB.base_format config_data = configuration.get_configuration() if not config_data: log.error("couldn't find get configuration data") sys.exit(1) cat_dir = {} tag_dir = {} cat_list = jpDB.list_categorie_by_usage() tag_list = jpDB.list_tag_by_usage() for cat, _ in cat_list: if not cat: continue cat_dir[cat] = [ data[0]