def test_run_engine(self): argv_copy = argv.copy() argv[1:] = [ '--engine', 'mock_engine.yaml', '--dir', self.workspace, '--targets', 'deliverables', '9900', '--deps', '--conf', 'mock_params.ini', ] try: run_engine(logs=False) finally: argv[1:] = argv_copy[1:] name = None for item in listdir(self.workspace): if '9900' in item: name = item break self.assertIsNotNone(name) with open(join(self.workspace, name)) as f: roll_call = f.readline().strip().split(' ') self.assertListEqual(roll_call, ['foo', 'baz_alternative', 'quux', 'report_and_plots', 'summary'])
# Nếu file được chạy trực tiếp # noinspection SpellCheckingInspection if __name__ == '__main__': from sys import argv # Khởi tạo FaceDetection và thiết lập danh sách người nhận mail nano = NanoFaceDetection(admines=[ ('Chiến', '*****@*****.**'), ]) # if len(argv) == 1: # exit(nano.run(test=True, webcam=True)) # Nếu có tham số if len(argv) == 2: # Lấy tham số command = argv.copy().pop() # Nếu là reset if command == 'reset': exit(nano.clean_classifier()) # Nếu là test if command == 'test': exit(nano.run(test=True)) # Nếu là webcam # noinspection SpellCheckingInspection if command == 'webcam': exit(nano.run(webcam=True)) # Nếu là 1 link web if command.startswith('http://') or command.startswith('https://'): from requests import get # Tải file
#!/usr/bin/env python3 from django.core.management import execute_from_command_line as execute from os import environ, remove from os.path import join, dirname, abspath, isfile from sys import argv FIXTURES = ['auth', 'teams', 'hosts', 'services', 'checks', 'injects'] PROJECT_PATH = dirname(abspath(__file__)) FIXTURES_PATH = join(PROJECT_PATH, 'fixtures') DATABASE = join(PROJECT_PATH, 'heartbeat', 'db.sqlite3') def init(): if isfile(DATABASE): remove(DATABASE) execute(['manage.py', 'migrate']) for fixture in FIXTURES: execute([ 'manage.py', 'loaddata', join(FIXTURES_PATH, '{}.json'.format(fixture)) ]) if __name__ == "__main__": args = argv.copy() environ.setdefault("DJANGO_SETTINGS_MODULE", "heartbeat.settings") if len(args) > 1 and args[1] == 'init': init() else: execute(args)
import datetime from pyspark.sql import Row from pyspark.sql.functions import * import pandas as pd import time spark = SparkSession.builder.appName('SparkLogProcessor') \ .enableHiveSupport().getOrCreate() spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true") spark.conf.set("spark.sql.execution.arrow.pyspark.fallback.enabled", "true") sc = spark.sparkContext sc.setLogLevel('ERROR') module, fileloc, wait_before_exit = argv.copy() print( f'module is {module} and fileloc is {fileloc} and wait_before_exit is {wait_before_exit}' ) logFileRDD = sc.textFile(fileloc) print("No of logs ", logFileRDD.count()) logFileRDD.take(2) # make provisions for processing the logs # use a regex to split the line into the nine groups and map it to a sql row of nine columns month_map = { 'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5,
sep = param.index("=") key = param[:sep] param_dict[key] = correct_type(source_type, key, param[sep + 1:]) return (param_dict, source_type) # TODO provide seprate mechanism for quiting the program def running_interface(connector: Connector): try: while True: print("Running...") sleep(3) except KeyboardInterrupt as e: connector.stop() raise # TODO write simple command line UI for testing code if __name__ == "__main__": parsed = parse_args(argv.copy()) if parsed: params, source_factory = parsed try: source = source_factory.create(**params) connector = Connector.factory(source, 24) connector.start() running_interface(connector) except TypeError as e: print_params(source_factory) raise e
''' This script will perform basic processing of .ms8.txt files produced by the MED-Associates legacy-Davis Rig Lickometer. USAGE: graphAll.py [path_to_files, use_first_block, no_normalization, group_by_TUBE] reads in raw files and executes gen_summary_dataframe, graph_cumulative_licks, individual_graph_by_group, and summarize_licking_curve ''' ##### PREAMBLE ##### # Safely remove script name from list of arguments . user_args = argv.copy() user_args.pop(0) # If the user has provided arguments, process them. if len(user_args) != 0: # Check for arguments pertaining to preferences for graphing if 'use_first_block' in user_args: drop_first_block = False user_args.remove('use_first_block') print('Including first presentation block in averages.') else: drop_first_block = True print('Excluding first presentation block from averages.') if 'no_normalization' in user_args: norm_by_water = False user_args.remove('no_normalization')