def main(): try: path = sys.argv[1] except: path = "server_config.json" config = read_config(path) server = Server(config) server.loop()
def main(): try: path = sys.argv[1] except: path = "client_config.json" config = read_config(path) client = Client(config) client.loop()
from utility import open_url, read_config open_url(read_config('base_url'))
import logging import os import traceback from datetime import datetime import base64 from google.api_core import retry from google.cloud import bigquery from google.cloud import firestore from google.cloud import pubsub_v1 from google.cloud import storage import pytz from utility import read_config config = read_config() PROJECT_ID = config[r'devshell_project_id'] BQ_DATASET = config[r'dataset'] BQ_TABLE = 'dc' ERROR_TOPIC = 'projects/%s/topics/%s' % (PROJECT_ID, config[r'streaming_error_topic']) SUCCESS_TOPIC = 'projects/%s/topics/%s' % (PROJECT_ID, config[r'streaming_success_topic']) DB = firestore.Client() CS = storage.Client() PS = pubsub_v1.PublisherClient() BQ = bigquery.Client() def move_file(data, context): '''This function is triggered by a Cloud Pub/Sub''' message = base64.b64decode(data['data']).decode('utf-8') logging.info("move_file receive the message {}".format(message)) file_name = data['attributes']['file_name']
from selenium import webdriver from utility import read_config driver = webdriver.Chrome(read_config('driver'))
import globals as gl import time from utility import read_config, clear_previous_results, check_pathway_update_history, load_last_csv, create_zip from draw import draw_json_run from analysis import run_analysis # --------------- INITIAL START TIME -------------- start_time = time.time() # -------------- INITIAL MAIN -------------- print("----- INITIAL SHELL PARAMETERS -----") read_config() if gl.mode_input == 0: print("----- CLEAN PREVIOUS RESULTS -----") clear_previous_results() starting_depth = 1 else: print("----- LOAD LAST RESULTS (CSV) SAVED -----") starting_depth = load_last_csv() print("----- CHECK UPDATED PATHWAYS -----") check_pathway_update_history('https://www.genome.jp/kegg/docs/upd_map.html') print("----- START ANALYSIS -----") run_analysis(starting_depth) print("----- END ANALYSIS -----") print("----- START GENERATE OUTPUT -----") draw_json_run() print("----- END GENERATE OUTPUT -----")
parser = argparse.ArgumentParser() parser.add_argument('-r', '--report',type=str, required=True, help="Name of the report which you want to generate.") args = parser.parse_args() REPORT_DIR = os.path.join(CURRENT_DIR, 'reports', args.report) TARGET_DIR = os.path.join(CURRENT_DIR, 'results') TARGET_REPORT = os.path.join(TARGET_DIR, '%s.html' % args.report) TEMP_DIR = os.path.join(CURRENT_DIR, 'temp') GLOBAL_MAPPINGS_PATH = os.path.join(CURRENT_DIR, 'global.conf') assert os.path.exists(REPORT_DIR) MAPPINGS_PATH, REPORT_TEMPLATE_PATH, SETTINGS_PATH = map(lambda n: os.path.join(REPORT_DIR, n),['mappings.json', 'report_template.html', 'settings.conf']) assert all(map(os.path.exists, [MAPPINGS_PATH, REPORT_TEMPLATE_PATH, SETTINGS_PATH])) with open(MAPPINGS_PATH, 'rb') as f: REPORT_MAPPING = json.load(f) CONNECTION_STR = utility.read_config(SETTINGS_PATH, 'database', 'connection_string') REPORT_NAME = utility.read_config(SETTINGS_PATH, 'global', 'report_name') FROM_EMAIL = utility.read_config(GLOBAL_MAPPINGS_PATH, 'email', 'from') TO_EMAIL = utility.read_config(GLOBAL_MAPPINGS_PATH, 'email', 'to') SMTP = utility.read_config(GLOBAL_MAPPINGS_PATH, 'email', 'smtp') USER = utility.read_config(GLOBAL_MAPPINGS_PATH, 'email', 'username') PWD = utility.read_config(GLOBAL_MAPPINGS_PATH, 'email', 'password') def generate_x_data(chart_data): x = chart_data["xAxis"]["categories"]["data"] x_col = chart_data["xAxis"]["categories"]["name"] chart_data["xAxis"]["categories"] = [xItem.__dict__[x_col] for xItem in report_data[x]] def generate_y_data(chart_data): series = [] for series_info in chart_data["series"]: