def route__do_lookup(): begin_location_name = request.forms.get("begin_location") end_location_name = request.forms.get("end_location") begin_location_data = calculation.get_place_data(begin_location_name) end_location_data = calculation.get_place_data(end_location_name) field = calculation.form_url( "http://openstreetmap.org/directions", { "engine": "fossgis_osrm_foot", "route": "{0},{1};{2},{3}".format(begin_location_data["location"].latitude, begin_location_data["location"].longitude, end_location_data["location"].latitude, end_location_data["location"].longitude) }) route = calculation.get_route(begin_location_data["location"], end_location_data["location"]) path_covid_results = calculation.get_path_covid_results(route) table_template_file = open(utils.get_project_dir() / "website" / "templates" / "table.html") table_template = SimpleTemplate(table_template_file.read()) table_template_file.close() print(path_covid_results["travel"]["places_with_most_covid"]) table_template_rendered = table_template.render({ "field": field, "tpc": path_covid_results["travel"]["total_people_contact"], "wtpc": path_covid_results["travel"]["weighted_total_people_contact"], "mcc": path_covid_results["travel"]["places_with_most_covid"] }) return table_template_rendered
def provision(): """ Provisions the server with PostgreSQL and an Nginx reverse proxy Currently only supports Debian (e.g. Ubuntu) targets """ require.deb.packages([ 'libpq-dev', 'python-dev', 'postgresql', 'postgresql-contrib' ]) # Set up a PostgreSQL server with a default user require.postgres.server() require.postgres.user( name=env.user, password=env.password, superuser=True ) require.postgres.database( name='%(user)s_db' % env, owner=env.user ) #Set up Nginix proxied site require.nginx.server() require.nginx.proxied_site('_', #empty server name port=80, proxy_url='http://127.0.0.1:8080', docroot='%s/www' % utils.get_project_dir(), ) require.nginx.disabled('default') require.python.virtualenv('virtual_environment')
def __init__(self, zoom_path=None) -> None: # reading config.ini self.project_dir = utils.get_project_dir() config = ConfigParser() config.read(self.project_dir + "/config.ini") self.data_csv_path = config["zoom_joiner"]["data_path"] self.WEB_DRIVER_PATH = config['zoom_joiner']['firefox_web_driver_path'] self.FIREFOX_PATH = config['zoom_joiner']['firefox_path'] self.ZOOM_PATH = config['zoom_joiner']['zoom_path'] # reading data self.data = pd.read_csv(self.data_csv_path) self.WEEKDAYS = { "monday": 1, "tuesday": 2, "wednesday": 3, "thursday": 4, "friday": 5, "saturday": 6, "sunday": 7 } # selenium options self.options = webdriver.FirefoxOptions() self.options.add_argument("--test-type") self.options.binary_location = self.FIREFOX_PATH # set path to browser file self.driver = None self.recording = False ################# self.debug = False
def __init__(self, file=None): self.project_dir = utils.get_project_dir() if file == None: self.conf_file = os.path.sep.join( [self.project_dir, 'docs', 'ss-python.conf']) else: self.conf_file = file self._conf = utils.load_file(self.conf_file)
def migrate(): """ Run your migrations, equivalent to fab <target> manage.migrate """ with cd(utils.get_project_dir()), fabtools.python.virtualenv(utils.get_venv_dir()): fabtools.python.install_requirements('requirements/requirements-prod.txt') run('./manage.py migrate')
def __init__( self, output_filename ) -> None: #TODO add time https://stackoverflow.com/questions/6896490/how-to-set-a-videos-duration-in-ffmpeg # reading config config = ConfigParser() config.read(utils.get_project_dir() + '/config.ini') # print(utils.get_project_dir() +'/config.ini') self.OUT_DIR = config['screen_recorder']['output_dir'] self.duration = config['screen_recorder']['default_duration'] self.AUDIO_DEVICE = config['screen_recorder']['audio_device'] #################### self.file_name = output_filename self.output_file_path = self.OUT_DIR + "/" + self.file_name + ".mkv"
def gunicorn(): """ Runs gunicorn with supervisord. Does not really monitor changes """ with cd(utils.get_project_dir()), fabtools.python.virtualenv(utils.get_venv_dir()): fabtools.python.install_requirements('requirements/requirements-prod.txt') stop() require.supervisor.process('django_app', command='%(venv)s/bin/gunicorn -b 127.0.0.1:8080 --chdir %(project_dir)s %(project_name)s.wsgi' % {'venv': utils.get_venv_dir(), 'project_dir': utils.get_project_dir(), 'project_name':env.project_name}, user=env.user, stopasgroup=True, # added for symmetry with manage.py runserver )
def runserver(): """ Runs Django's builtin runserver """ with cd(utils.get_project_dir()), fabtools.python.virtualenv(utils.get_venv_dir()): fabtools.python.install_requirements('requirements/requirements-prod.txt') stop() require.supervisor.process('django_app', command='%(venv)s/bin/python %(project)s/manage.py runserver 8080' % {'venv': utils.get_venv_dir(), 'project': utils.get_project_dir()}, user=env.user, stopasgroup=True, # manage.py doesn't clean up its children procesess )
def start_self(self): pid_file = self.config.get('pid_file', utils.get_default_pid_file()) if os.path.exists(pid_file): print('ss-python has started...') else: project_dir = utils.get_project_dir() sh = os.path.sep.join([project_dir, 'bin', 'ss-python.sh']) cmd = 'sh ' + sh exe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) print(exe.stdout.readlines())
def __call__(self, args): if args: return None if not is_mbed_dir(): error("Run this command from a mbed project directory.") return False rname, rdata = MbedProject.read_repo_info() indent, indent2 = max([len(repo_name(k["url"])) for k in rdata]), max([len(k["url"]) for k in rdata]) info("Base directory: %s" % get_project_dir()) info("List of repositories for project %s:\n" % rname) fmt = "%%%ds: %%-%ds (cloned in %%s)" % (indent, indent2) for r in rdata: dirname = "the base directory" if r["dir"] == "." else r["dir"] info(fmt % (repo_name(r["url"]), r["url"], dirname)) return True
def get_county(location): state_file = open(utils.get_project_dir() / "state_file.txt", "r") state_long_names_to_names = json.loads(state_file.read()) state_file.close() url_base = "http://www.openstreetmap.org/geocoder/search_osm_nominatim_reverse" url = form_url( url_base, { "lat": location.latitude, "lon": location.longitude, "zoom": 17, "minlon": -122, "minlat": 47, "maxlon": -121, "maxlat": 48 }) print(url) response = get_web_resource(url) print(response) class StreamingHTMLParser(html.parser.HTMLParser): def __init__(self): html.parser.HTMLParser.__init__(self) self.data = None def handle_starttag(self, tag_name, attributes): attributes_d = dict(attributes) if tag_name == "a" and "data-name" in attributes_d: self.data = attributes_d["data-name"] parser = StreamingHTMLParser() parser.feed(response) parser.close() match = re.search("(\w* County), ([^,]*)", parser.data) county_name = match.group(1) state_long_name = match.group(2) state_name = state_long_names_to_names[state_long_name] return (county_name, state_name)
def to_yaml_path(yaml_name): yaml_dir = os.path.join(get_project_dir(), "yamls") return os.path.join(yaml_dir, yaml_name)
def route__file(filepath): return static_file(filepath, root=utils.get_project_dir() / "website" / "static")
def setUp(self): proj_dir = utils.get_project_dir() self.source_file = os.path.join(proj_dir, 'tests', 'resources', TEST_FILE)
def full_path(p): return normpath(join(get_project_dir(), p))
def rel_path(p): return relpath(normpath(p), get_project_dir()).replace('\\', '/')
def load_experiment_and_model_params(experiment_name, model_name) -> MyParams: loaded_params = MyParams(yaml_file_to_config_name={ to_yaml_path("experiment.yaml"): experiment_name, to_yaml_path("models.yaml"): model_name, }, value_magician={ "model": lambda p: { "SFDEN": SFDEN, "SFEWC": SFEWC, "SFHPS": SFHPS, "SFLCL": SFLCL, }[p.model], "checkpoint_dir": lambda p: os.path.join( get_project_dir(), p.checkpoint_dir, p.model, p.mtype, ), "fig_dir": lambda p: os.path.join( get_project_dir(), p.fig_dir, "{}_{}".format(p.model, p.expr_type), p.mtype, ), "mask_type": lambda p: { "ADAPTIVE": MaskType.ADAPTIVE, "HARD": MaskType.HARD, }[p.mask_type], }, attribute_list_for_hash=[ "lr", "batch_size", "l1_lambda", "l2_lambda", "n_tasks", "importance_criteria", "need_coreset", "online_importance", "use_cges", "use_set_based_mask", "max_iter", "gl_lambda", "regular_lambda", "ex_k", "loss_thr", "spl_thr", "ewc_lambda", "mask_type", "mask_alpha", "mask_not_alpha", "dtype", "mtype", "use_batch_normalization", ]) gpu_num_list = blind_other_gpus(loaded_params.num_gpus_total, loaded_params.num_gpus_to_use) loaded_params.add_hparam("gpu_num_list", gpu_num_list) check_params(loaded_params) loaded_params.pprint() return loaded_params
from builds.pairplotlib import PairPlotLib from models.spread_builder import SpreadBuilder from builds.statistics import ADF, Half_Life, Hurst import itertools import numpy as np import pandas as pd import warnings warnings.filterwarnings("ignore") from tqdm import tqdm from datetime import datetime, timedelta from SETTINGS import METHODS, FROM_, PLOT_FROM_, ROLLING, WINDOW, ADDITIONAL_FEATURES, SHEET_COLUMNS, SHEET_HEIGHT IN_DIR = os.path.join(get_project_dir(), 'data', 'peer_groups') class PeerGroup: def __init__(self): self._analysis = self._summary = [] def set_peer_group(self, peer_group, **kwargs): lst = peer_group[['LONG ASSET SYMBOL', 'LONG ASSET NAME', 'SHORT ASSET SYMBOL', 'SHORT ASSET NAME']].values.tolist() lst = [[[x[0], x[1]], [x[2], x[3]]] for x in lst] self._group = lst def analyze(self, filter_ks=True): for pair in tqdm(self._group, total=len(self._group), desc=" [*] Analyzing Pairs..."): y_asset = bloomberg2asset(bloomberg_symbol=pair[0][0], name=pair[0][1], filter_ks=filter_ks) x_asset = bloomberg2asset(bloomberg_symbol=pair[1][0], name=pair[1][1], filter_ks=filter_ks)
def __init__(self, where = None): self.where = where or get_project_dir()
def wrapper(): with cd(utils.get_project_dir()), fabtools.python.virtualenv(utils.get_venv_dir()): run('./manage.py %s' % command_name)
def get_api_key(): return open(utils.get_project_dir() / "google_maps_api_key.txt").read().strip()
def summarize(self, outpath=os.path.join(get_project_dir(), 'output', 'peer_analysis', '{}'.format(datetime.now().strftime('%Y%m%d%H%M%S')))): Path(outpath).mkdir(exist_ok=True) xlsx_manager = XlsxMgr(path=outpath, fname='summary.xlsx') for i, method in enumerate(METHODS): xlsx_manager.add_sheet(method) xlsx_manager.set_sheet(method) # Initialize Sheet for col, items in enumerate(SHEET_COLUMNS.items()): key, value = items[0], items[1] xlsx_manager.sheet.set_column(col, col, float(value[1]), xlsx_manager.format_dic[value[2]]) xlsx_manager.write(0, col, value[0], cell_format=xlsx_manager.format_dic['header']) xlsx_manager.sheet.write_comment(row=0, col=col, comment=value[3]) matrix = np.empty(shape=(len(self._analysis), len(SHEET_COLUMNS)), dtype=object) for row, pair in tqdm(enumerate(self._analysis), total=len(self._analysis), desc=" {} Method ... ({}/{})".format(method, i+1, len(METHODS))): ppl = PairPlotLib(pair, from_=PLOT_FROM_) xlsx_manager.sheet.set_row(row + 1, SHEET_HEIGHT) for col, items in enumerate(SHEET_COLUMNS.items()): key, format_ = items[0], xlsx_manager.format_dic[items[1][2]] if key == 0: # 순서 value = row + 1 elif key == 1: value = '{} {} Equity'.format(pair.y_asset.symbol, pair.y_asset.exchange) elif key == 2: value = '{} {} Equity'.format(pair.x_asset.symbol, pair.x_asset.exchange) elif key == 3: value = pair.y_asset.name elif key == 4: value = pair.x_asset.name elif key == 5: value = pair.spread[method][-1] if len(pair.spread[method]) > 0 else None elif 6 <= key <= 10: value = ' ' if key == 6: y = pair.data['y'] x = pair.data['x'] fig, _ = ppl.price(x=x, y=y, title='Abs Price') elif key == 7: fig, _ = ppl.spread(method=method) elif key == 8: fig, _ = ppl.spread_box(method=method) elif key == 9: fig, _ = ppl.spread_dist(method=method) elif key == 10: fig, _ = ppl.linear_regression(method='TLS') fig.savefig(xlsx_manager.get_path(path=outpath, file_name='{}_{}_{}_{}.png'.format( pair.x_asset.symbol, pair.y_asset.symbol, method, key), is_img=True), bbox_inches="tight") xlsx_manager.insert_img(row + 1, col, file_name='{}_{}_{}_{}.png'.format(pair.x_asset.symbol, pair.y_asset.symbol, method, key), scale={'y_offset': 15, 'x_offset': 15}) elif key == 11: value = pair.data['y'][-1] elif key == 12: value = pair.data['x'][-1] elif key == 13: value = pair.data['rolling_corr'][-1] if len( pair.data['rolling_corr']) > 0 else None elif key == 14: value = pair.data[method + '_p_value'] elif key == 15: value = pair.data[method + '_hurst'] elif key == 16: value = pair.data[method + '_half_life'] elif key == 17: value = pair.y_asset.data['AVG_TRADING_VALUE'] elif key == 18: value = pair.x_asset.data['AVG_TRADING_VALUE'] elif key == 19: value = pair.y_asset.data["DIVIDEND_YIELD"] elif key == 20: value = pair.x_asset.data["DIVIDEND_YIELD"] else: raise Exception xlsx_manager.write(row+1, col, value, cell_format=format_) matrix[row, col] = value xlsx_manager.sheet.autofilter(0, 0, 0, len(SHEET_COLUMNS) - 1) writer = xlsx_manager.writer xlsx_manager.close() return
COLLECTION_LOG = 'data_collection' logger = logging.getLogger('data_collection') log_fle_handle = logging.FileHandler('collection.log') class DataCollector: def __init__(self, source): if not source: raise websteel_exc.NoSourceSpecified('Please specify a valid data source') self.source = source self.logger = logging.getLogger('data_collection') class SpreadSheetDataCollector(DataCollector): def __init__(self): # super(SpreadSheetDataCollector, self).__init__() pass def parse_source_file(self): book = xlrd.open_workbook(self.source, logging=log_fle_handle) sheet = book.sheet_by_index(0) sheet_data = {} for row_index in xrange(1, sheet.nrows): sheet.row(0) = if __name__=='__main__': x = utils.get_project_dir() print(x)