def config_imports(logger): try: config = config_reader.get_config() return config except Exception as e: logger.exception('ERROR:: Some issue in reading the Config...check config_reader.py script in bin Folder....') raise e
def extract(input_file, output_file): if not os.path.isdir(TMP_DATASET_DIR): os.mkdir(TMP_DATASET_DIR) if TAGGED_MARK in input_file.split('/')[-1].split('.'): print('Extracting raw text...') remove_labels(input_file, TMP_RAW_FILE) input_file = TMP_RAW_FILE print("Extracting dependencies...") # Extract dependencies extract_dependencies_via_stanford( classpath='/home/dima/CoreNLP/target/classes', embeddings='/home/dima/models/ArModel100.txt', dependencies_model='/home/dima/models/nndep.rus.modelAr100HS400.txt.gz', pos_model='/home/dima/models/russian-ud-pos.tagger', input=input_file) print("Formatting dependencies...") # Format dependencies structure_stanford_results.structure_stanford_output( f'{input_file.split("/")[-1]}.out', f'{TMP_DATASET_DIR}/train.txt') copyfile(f'{TMP_DATASET_DIR}/train.txt', f'{TMP_DATASET_DIR}/test.txt') copyfile(f'{TMP_DATASET_DIR}/train.txt', f'{TMP_DATASET_DIR}/dev.txt') print("Making predictions...") # Make predictions predict(config=get_config(DATASET), model_path='/home/dima/models/ner/big', training_dataset=DATASET, prediction_dataset=TMP_DATASET_DIR, prediction_subset='dev', output_path=output_file)
def index(zoom=None, lat=None, lng=None): if lat is None or lng is None or zoom is None: config = config_reader.get_config() lat = config['start_lat'] lng = config['start_lng'] zoom = config['start_zoom'] access_key = config_reader.get_config()['accessKey'] context = {} context['lat'] = lat context['lng'] = lng context['zoom'] = zoom context['access_key'] = access_key return render_template('Protocols.html', protocols=getProtocols(), **context)
def index(zoom=None, lat=None, lng=None): if lat is None or lng is None or zoom is None: config = config_reader.get_config() lat = config['start_lat'] lng = config['start_lng'] zoom = config['start_zoom'] access_key = config_reader.get_config()['accessKey'] context = {} context['lat'] = lat context['lng'] = lng context['zoom'] = zoom context['access_key'] = access_key jsonDoc = isActiveEmergency() print(jsonDoc) if jsonDoc: return render_template('Dashboard.html', **context) else: return render_template("NoEmergency.html")
def get_queue_client(config_file_name, url_key, name_key): config = config_reader.get_config(config_file_name) if config is None: print('get_queue_client: config invalid!!!') return url, name = config[url_key], config[name_key] queue_client = AMQPClient(url, name) queue_client.connect() assert queue_client.is_connected() return queue_client
def init(): global QUEUE_URL try: config = config_reader.get_config('../config/config.json') except Exception as e: print(e) sys.exit(1) # print(config) QUEUE_URL = config["dedupe_task_queue_url"] # remove all documents in the db test_collection = mongodb_client.get_db(DB_NAME).get_collection( COLLECTION_NAME) result = test_collection.remove() print('depepuer test: db cleaning result', result)
def setup_new_protocol(): # form: list of dictionaries containing building id, points, etc as guide all_buildings = GeoFeatures.getAllBuildings() config = config_reader.get_config() lat = config['start_lat'] lng = config['start_lng'] zoom = config['start_zoom'] access_key = config['accessKey'] context = {} context['lat'] = lat context['lng'] = lng context['zoom'] = zoom context['access_key'] = access_key context['zones'] = all_buildings return render_template('Protocols.html', protocols=getProtocols(), **context)
if 'building_id' in info: building_id = info['building_id'] global mrcnn if mrcnn is not None: building_id = mrcnn.delete_mask(lat, lng, zoom, building_id) json_post = {"rects_to_delete": {"ids": [building_id]}} return json_post return 'mrcnn has not been made' # run the app. if __name__ == "__main__": config = config_reader.get_config() REQUIRED_KEYS = [ 'imageryURL', 'accessKey', 'start_lat', 'start_lng', 'start_zoom' ] for k in REQUIRED_KEYS: if k not in config: print("[ERROR], REQUIRED KEY {} not in config".format(k)) else: application.config[k] = config[ k] # adds to the application config for access in templates # Get the imagery URL and access key imagery_url = config["imageryURL"] access_key = config["accessKey"]
import datetime import hashlib import os import sys import redis # for unregular import sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'utils')) import news_client from cloud_amqp_client import AMQPClient from config_reader import get_config # TODO is this the best way to address it? config = get_config( os.path.join(os.path.dirname(__file__), '..', 'config', 'config.json')) REDIS_HOST = config['redis_host'] REDIS_PORT = config['redis_port'] SCRAPE_NEWS_TASK_QUEUE_URL = config['scrape_task_queue_url'] SCRAPE_NEWS_TASK_QUEUE_NAME = config['scrape_task_queue_name'] NEWS_SOURCES = [ 'cnn', 'bbc-sport', 'the-new-york-times', 'bloomberg', 'buzzfeed', 'nbc-news' ] SLEEP_TIME_IN_SECONDS = 60 NEWS_TIME_OUT_IN_SECONDS = 3600 * 24 * 3
import uvloop import asyncio event_loop = asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) from sanic import Sanic from config_reader import get_config from app.log_config import get_log_config config = get_config() def configure(): sanic_app.config.update(config) from app.api.v1 import api_v1 sanic_app.blueprint(api_v1, url_prefix='/api/v1') sanic_app = Sanic(log_config=get_log_config()) configure()
def init(): global fleet_needs_resupply, current_fleetcomp, quest_item, expedition_item, combat_item, pvp_item, fleetcomp_switcher, default_quest_mode, sleep_cycle, settings settings, sleep_cycle = config_reader.get_config(settings, sleep_cycle) get_util_config() log_success("Config successfully loaded!") log_success("Starting kancolle_auto!") log_msg("Finding window!") focus_window() log_msg("Defining module items!") if settings['quests_enabled']: # Define quest item if quest module is enabled quest_item = quest_module.Quests(global_regions['game'], settings) log_success("Quest module started") if settings['expeditions_enabled']: # Define expedition list if expeditions module is enabled expedition_item = expedition_module.Expedition(global_regions['game'], settings) log_success("Expedition module started") if settings['pvp_enabled']: # Define PvP item if pvp module is enabled pvp_item = combat_module.PvP(global_regions['game'], settings) log_success("Combat module started (PvP mode)") if settings['combat_enabled']: # Define combat item if combat module is enabled combat_item = combat_module.Combat(global_regions['game'], settings) default_quest_mode = 'sortie' log_success("Combat module started (Sortie mode)") fleetcomp_switcher = combat_module.FleetcompSwitcher(global_regions['game'], settings) go_home(True) if settings['scheduled_sleep_enabled']: # If just starting script, set a sleep start time now_time = datetime.datetime.now() if now_time.hour * 100 + now_time.minute > int(settings['scheduled_sleep_start']): # If the schedule sleep start time for the day has passed, set it for the next day reset_next_sleep_time(True) else: # Otherwise, set it for later in the day reset_next_sleep_time() if settings['scheduled_stop_enabled'] and settings['scheduled_stop_mode'] == 'time': # If ScheduledStop is enabled and its mode is 'time', set the stop time on script start settings['scheduled_stop_time'] = datetime.datetime.now() + datetime.timedelta(hours=settings['scheduled_stop_count']) if settings['quests_enabled']: # Run through quests defined in quests item quest_action(default_quest_mode, True) if settings['expeditions_enabled']: # Run expeditions defined in expedition item go_home() expedition_item.go_expedition() expedition_action('all') if settings['pvp_enabled']: reset_next_pvp_time() now_time = datetime.datetime.now() if not 3 <= jst_convert(now_time).hour < 5: # Run PvP, but not between the time when PvP resets but quests do not! pvp_action() if settings['combat_enabled']: if settings['quests_enabled'] and settings['pvp_enabled']: # Run through quests defined in quests item quest_action('sortie', True) # Run sortie defined in combat item sortie_action() if settings['quests_enabled']: # Expedition or Combat event occured. Loop 'increases' quest_item.schedule_loop += 1 temp_need_to_check = quest_item.need_to_check() log_msg("Quest check loop count at %s; need to check is %s with %s quests being tracked" % (quest_item.schedule_loop, temp_need_to_check, quest_item.active_quests)) log_msg("Next quest check after %s sortie(s) / %s pvp(s) / %s expedition(s)" % ( quest_item.schedule_sorties[0] - quest_item.done_sorties if len(quest_item.schedule_sorties) > 0 else 0, quest_item.schedule_pvp[0] - quest_item.done_pvp if len(quest_item.schedule_pvp) > 0 else 0, quest_item.schedule_expeditions[0] - quest_item.done_expeditions if len(quest_item.schedule_expeditions) > 0 else 0 )) if temp_need_to_check: go_home() quest_action(default_quest_mode) temp_need_to_check = False # Disable need to check after checking display_timers()
var_list=variables) train_op = tf.group([train_op, ema_op]) return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default='../../data/conll2003ru') args = parser.parse_args() params = get_config(args.data) with Path('results/params.json').open('w') as f: json.dump(params, f, indent=4, sort_keys=True) def fwords(name): return str(Path(args.data, '{}.words.txt'.format(name))) def ftags(name): return str(Path(args.data, '{}.tags.txt'.format(name))) # Estimator, train and evaluate train_inpf = functools.partial(input_fn, fwords('train'), ftags('train'), params,
def get_imd(): # do imd = get_imd() in all functions config = config_reader.get_config() return imagery.ImageryDownloader(config['accessKey'])
from mod_main import main from config_reader import get_config if __name__ == '__main__': # コンフィグを読む get_config() # ロガーの設定 # メイン処理 main()
def get_program_config(): # do config = get_program_config() return config_reader.get_config()
import argparse from config_reader import get_config, get_valid_dataset_names from utils.data_loaders import get_data_loading_function from wrappers.model_factory import make_model def train(config, models_path, training_dataset): make_model(model_id=config['model_ids']['tener'], dataset=training_dataset, config=config).train_model(models_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--training_dataset', type=str, default='en-ontonotes') parser.add_argument('--models_folder', type=str, default='/home/dima/models/ner') args = parser.parse_args() config = get_config(args.training_dataset) train(config=config, models_path=args.models_folder, training_dataset=args.training_dataset)
# Django settings for idlebook project. import os import django.conf.global_settings as DEFAULT_SETTINGS import config_reader config = config_reader.get_config('settings.py') DEBUG = config['debug'] TEMPLATE_DEBUG = DEBUG ADMINS = ( (config['admin_name'], config['admin_email']), ) MANAGERS = ADMINS PROJECT_ROOT = os.path.realpath(os.path.dirname(__file__)) SITE_URL = config['site_url'] DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'. 'NAME': config['databases_name'], # Or path to database file if using sqlite3. 'USER': config['databases_user'], # Not used with sqlite3. 'PASSWORD': config['databases_password'], # Not used with sqlite3. 'HOST': config['databases_host'], # Set to empty string for localhost. Not used with sqlite3. 'PORT': config['databases_port'], # Set to empty string for default. Not used with sqlite3. } }
def init(): global fleet_needs_resupply, current_fleetcomp, quest_item, expedition_item, combat_item, pvp_item, fleetcomp_switcher, default_quest_mode, sleep_cycle, settings settings, sleep_cycle = config_reader.get_config(settings, sleep_cycle) get_util_config() log_success("Config successfully loaded!") log_success("Starting kancolle_auto!") log_msg("Finding window!") focus_window() log_msg("Defining module items!") if settings['quests_enabled']: # Define quest item if quest module is enabled quest_item = quest_module.Quests(global_regions['game'], settings) log_success("Quest module started") if settings['expeditions_enabled']: # Define expedition list if expeditions module is enabled expedition_item = expedition_module.Expedition(global_regions['game'], settings) log_success("Expedition module started") if settings['pvp_enabled']: # Define PvP item if pvp module is enabled pvp_item = combat_module.PvP(global_regions['game'], settings) log_success("Combat module started (PvP mode)") if settings['combat_enabled']: # Define combat item if combat module is enabled combat_item = combat_module.Combat(global_regions['game'], settings) default_quest_mode = 'sortie' log_success("Combat module started (Sortie mode)") if settings['pvp_enabled'] and settings['combat_enabled']: if settings['pvp_fleetcomp'] == 0 or settings['combat_fleetcomp'] == 0: # If either of the fleetcomp values are set to 0, do not define the fleet comp # switcher module pass elif settings['pvp_fleetcomp'] != settings['combat_fleetcomp']: # Define fleet comp switcher module if both pvp and combat modules are enabled # and they have different fleet comps assigned fleetcomp_switcher = combat_module.FleetcompSwitcher(global_regions['game'], settings) # Go home go_home(True) if settings['scheduled_sleep_enabled']: # If just starting script, set a sleep start time now_time = datetime.datetime.now() if now_time.hour * 100 + now_time.minute > int(settings['scheduled_sleep_start']): # If the schedule sleep start time for the day has passed, set it for the next day reset_next_sleep_time(True) else: # Otherwise, set it for later in the day reset_next_sleep_time() if settings['scheduled_stop_enabled'] and settings['scheduled_stop_mode'] == 'time': # If ScheduledStop is enabled and its mode is 'time', set the stop time on script start settings['scheduled_stop_time'] = datetime.datetime.now() + datetime.timedelta(hours=settings['scheduled_stop_count']) if settings['quests_enabled']: # Run through quests defined in quests item quest_action(default_quest_mode, True) if settings['expeditions_enabled']: # Run expeditions defined in expedition item go_home() expedition_item.go_expedition() expedition_action('all') if settings['pvp_enabled']: reset_next_pvp_time() now_time = datetime.datetime.now() if not 3 <= jst_convert(now_time).hour < 5: # Run PvP, but not between the time when PvP resets but quests do not! pvp_action() if settings['combat_enabled']: if settings['quests_enabled'] and settings['pvp_enabled']: # Run through quests defined in quests item quest_action('sortie', True) # Run sortie defined in combat item sortie_action() if settings['quests_enabled']: # Expedition or Combat event occured. Loop 'increases' quest_item.schedule_loop += 1 temp_need_to_check = quest_item.need_to_check() log_msg("Quest check loop count at %s; need to check is %s with %s quests being tracked" % (quest_item.schedule_loop, temp_need_to_check, quest_item.active_quests)) log_msg("Next quest check after %s sortie(s) / %s pvp(s) / %s expedition(s)" % ( quest_item.schedule_sorties[0] - quest_item.done_sorties if len(quest_item.schedule_sorties) > 0 else 0, quest_item.schedule_pvp[0] - quest_item.done_pvp if len(quest_item.schedule_pvp) > 0 else 0, quest_item.schedule_expeditions[0] - quest_item.done_expeditions if len(quest_item.schedule_expeditions) > 0 else 0 )) if temp_need_to_check: go_home() quest_action(default_quest_mode) temp_need_to_check = False # Disable need to check after checking display_timers()
import os import sys import redis import news_fetcher sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'utils')) import news_client from cloud_amqp_client import AMQPClient import queue_cleaner from config_reader import get_config config = get_config('../config/config.json') SCRAPE_QUEUE_URL = config["scrape_task_queue_url"] DEDUPE_QUEUE_URL = config["dedupe_task_queue_url"] SCRAPE_NEWS_TASK_QUEUE_NAME = config["scrape_task_queue_name"] DEDUPE_NEWS_TASK_QUEUE_NAME = config["dedupe_task_queue_name"] TEST_SCRAPE_TASK = [ 'not a dict', { 'url': 'some-other-source.com', 'source': 'not cnn', }, { 'title': 'Uber pulls self-driving cars after first fatal crash of autonomous vehicle', 'url': 'http://money.cnn.com/2018/03/19/technology/uber-autonomous-car-fatal-crash/index.html', 'source': 'cnn', 'publishedAt': '2018-03-18T20:36:47Z' }, {
def predict(config, model_path, prediction_dataset, prediction_subset, output_path): make_model(model_id=config['model_ids']['tener'], dataset=prediction_dataset, config=config).load(model_path).export_predictions( prediction_dataset, prediction_subset, output_path) if __name__ == "__main__": parser = argparse.ArgumentParser() #parser.add_argument('--training_dataset', type=str, default='en-ontonotes') parser.add_argument('--prediction_dataset', type=str, default='tmp/conll2003ru-predicted') parser.add_argument('--model_file', type=str, default='/home/dima/models/ner/bio') parser.add_argument('--output_file', type=str, default='predictions.txt') parser.add_argument('--subset', type=str, default='test') args = parser.parse_args() config = get_config(args.prediction_dataset) predict(config=config, model_path=args.model_file, prediction_dataset=args.prediction_dataset, prediction_subset=args.subset, output_path=args.output_file)