def main(yaml_file, root_paths, model_id):
    '''
    Args:
    -----
        root_paths: list of strings with the root file paths
    Returns:
    --------
        n_train_events: total number of events for training, across all root files
        n_test_events: total number of events for testing, across all root files
        n_validate_events: total number of events for validating, across all root files
    Alternatively, you can return the paths to hdf5 files being created, for logging
    '''

    # -- logging
    configure_logging()
    logger = logging.getLogger("parallel_generate_data_DL1")
    logger.debug('Files to process: {}'.format(root_paths))

    # -- open and process files in parallel
    from joblib import Parallel, delayed
    n_events = Parallel(n_jobs=-1, verbose=5, backend="multiprocessing") \
        (delayed(process)(i, filepath, yaml_file, model_id) for i, filepath in enumerate(root_paths))

    # -- add up events in the list of results to get the total number of events per type 
    n_train_events = sum(zip(*n_events)[0])
    n_test_events = sum(zip(*n_events)[1])
    n_validate_events = sum(zip(*n_events)[2])
    logger.info('There are {n_train_events} training events, {n_test_events} testing events,\
        and {n_validate_events} validating events'.format(
            n_train_events=n_train_events,
            n_test_events=n_test_events,
            n_validate_events=n_validate_events
            )
        )
    return n_train_events, n_test_events, n_validate_events
예제 #2
0
def main():
    utils.configure_logging()
    parser = argparse.ArgumentParser()

    # top-level parser, generic args used by all commands
    subparsers = parser.add_subparsers(help='Subcommand help')

    # init command
    parser_init = subparsers.add_parser('init', help='Initialise the cluster')
    parser_init.add_argument('--bar', help='init arg')
    parser_init.set_defaults(func=init)

    # promote command
    parser_promote = subparsers.add_parser('promote', help='Promote a slave')
    parser_promote.add_argument('--baz', help='promote arg')
    parser_promote.set_defaults(func=promote)

    default_args = [
        {'name': '--settings', 'help': 'Path to settings file'},
    ]

    _add_default_args([parser_init, parser_promote], default_args)

    # Parse the args, and pass them to the function for the chosen subcommand
    args = parser.parse_args()
    args.func(args)
예제 #3
0
파일: inferlib.py 프로젝트: goolic/infer
    def __init__(self, args, javac_args):

        self.args = args
        if self.args.analyzer not in MODES:
            help_exit('Unknown analysis mode "{0}"'.format(self.args.analyzer))

        utils.configure_logging(self.args.debug)

        self.javac = jwlib.CompilerCall(javac_args)

        if not self.javac.args.version:
            if javac_args is None:
                help_exit("No javac command detected")

            if self.args.infer_out is None:
                help_exit("Expect Infer results directory")

            if self.args.buck:
                self.args.infer_out = os.path.join(self.javac.args.classes_out, utils.BUCK_INFER_OUT)
                self.args.infer_out = os.path.abspath(self.args.infer_out)

            try:
                os.mkdir(self.args.infer_out)
            except OSError as e:
                if not os.path.isdir(self.args.infer_out):
                    raise e

            self.stats = {"int": {}, "float": {}}
            self.timing = {}
예제 #4
0
def main(config, test_run_file):
    """Main routine - assess which tests should be run, and create
    output file"""
    
    session = get_xapi_session(config)

    # Release current logger before running logrotate.
    utils.release_logging()

    # Run log rotate before ACK produces any log.
    for host_ref in session.xenapi.host.get_all():
        res = session.xenapi.host.call_plugin(host_ref, 
                                    'autocertkit',
                                    'run_ack_logrotate', 
                                    {})
    utils.configure_logging('auto-cert-kit')

    pre_flight_checks(session, config)

    config['xs_version'] = utils.get_xenserver_version(session)
    config['xcp_version'] = utils.get_xcp_version(session)

    generate_test_config(session, config, test_run_file)
    # Logout of XAPI session anyway - the test runner will create a new session
    # if needed. (We might only be generating).
    session.logout()

    if 'generate' in config:
        #Generate config file only
        utils.log.info("Test file generated")
        return "OK"

    #Kick off the testrunner
    utils.log.info("Starting Test Runner from ACK CLI.")
    test_file, output = test_runner.run_tests_from_file(test_run_file)
예제 #5
0
def main(return_tagdir=False):
    utils.parse_options()
    utils.configure_logging()
    releaser = Releaser()
    releaser.run()
    tagdir = releaser.data.get('tagdir')
    if tagdir:
        logger.info("Reminder: tag checkout is in %s", tagdir)
예제 #6
0
파일: __init__.py 프로젝트: Juanvvc/scfs
def init_default_conf():	
	""" Sets up a default configuration for DFS: uses $HOME/.dfs as
	the config dir, reads the default configuration from
	$HOME/.dfs/dfsrc, and sets up the logging system to use the file
	dfs.log in the configuration directory """
	
	global default_config_dir, default_config, default_log_file, default_config_file, dht
	
	# Creates a directory to the default config, if it does not exists.
	default_config_dir=os.path.expanduser('~%s.dfs'%os.path.sep)
	# Create the default config path if it does not exists
	if not os.path.exists(default_config_dir): os.mkdir(default_config_dir)
	default_config_file=default_config_dir+os.path.sep+'dfsrc'
	default_log_file=default_config_dir+os.path.sep+'dfs.log'
	# Load the default config file
	if not os.path.exists(default_config_file): open(default_config_file,'w').close()
	default_config=utils.Config()
	default_config.load(open(default_config_file,'r'))
	
	# Configures the logging system
	utils.configure_logging(level=logging.INFO,
		format='%(asctime)s %(name)s %(levelname)s %(message)s',
		datefmt='%H:%M:%S',
		filename=default_log_file,
		filemode='w')	
	
	logging.info('Default configuration: %s'%default_config_file)
	
	# sets default configuration, if not set
	changed=False
	if not default_config.get('DHT:datadir'):
		default_config.set('DHT:datadir',default_config_dir+os.path.sep+'dhtdata')
		changed=True
	if not default_config.get('Main:UID'):
		default_config.set('Main:uid',utils.random_string(16))
		changes=True
	if not default_config.get('Main:nick'):
		default_config.set('Main:nick',utils.random_nick())
		changed=True
	if not default_config.get('Keys:kf'):
		logging.warning('There are not file key')
	if not default_config.get('Keys:kd'):
		logging.warning('There are not description key')
	if changed:
		default_config.save(open(default_config_file,'w'))
		
	# Default DHT: a local DHT
	dht=DHT.LocalDHT(default_config)
예제 #7
0
def main():
    json_points = []
    client = InfluxDBClient(host=args.influxdb_host, ssl=args.ssl, verify_ssl=False, port=8086, database=args.database)
    logger = configure_logging('parse_mms_metrics')

    extracted_metrics = extract_metrics_from_mms_dump(args.input_file)


    json_points = []
    for tagset, metrics_for_all_timestamps in extracted_metrics.items():
        for timestamp, metrics_for_one_timestamp in metrics_for_all_timestamps.items():
            json_points.append({
                "timestamp": timestamp,
                "measurement": "cloudmanager_data",
                "tags": {
                    "project": tagset[0], # Magic number - not great
                    "hostname": tagset[1]
                },
                "fields": metrics_for_one_timestamp
            })
            if len(json_points) >= args.batch_size:
                print(len(json_points))
                write_points(logger, client, json_points, "N/A")
                json_points = []

    write_points(logger, client, json_points, "N/A")
예제 #8
0
def main():
    logger = configure_logging('parse_serverstatus')
    client = InfluxDBClient(host=args.influxdb_host, ssl=args.ssl, verify_ssl=False, port=8086, database=args.database)
    with open(args.input_file, 'r') as f:
        for line_number, chunk in enumerate(grouper(f, args.batch_size)):
            # print(line_number)
            json_points = []
            for line in chunk:
                # zip_longest will backfill any missing values with None, so we need to handle this, otherwise we'll miss the last batch
                if line:
                    try:
                        server_status_json = json.loads(line)
                        # print((line_number + 0) * _BATCH_SIZE)
                        # print((line_number + 1) * _BATCH_SIZE)
                        common_metric_data = get_metrics("serverstatus", server_status_json, common_metrics, line_number)
                        json_points.append(create_point(*common_metric_data))
                        wiredtiger_metric_data = get_metrics("serverstatus_wiredtiger", server_status_json, wiredtiger_metrics, line_number)
                        json_points.append(create_point(*wiredtiger_metric_data))
                        # for metric_data in get_metrics(server_status_json, common_metrics, line_number):
                        #     import ipdb; ipdb.set_trace()
                        #     print(json_points)
                        #     json_points.append(create_point(*metric_data))
                        # # for metric in get_metrics(server_status_json, wiredtiger_metrics, line_number):
                        #     json_points.append(create_point(*metric))
                        # for metric in get_metrics(server_status_json, mmapv1_metrics, line_number):
                        #     json_points.append(create_point(*metric))
                    except ValueError:
                        logger.error("Line {} does not appear to be valid JSON - \"{}\"".format(line_number, line.strip()))
            write_points(logger, client, json_points, line_number)
예제 #9
0
    def __init__(self, args, javac_args):

        self.args = args
        if self.args.analyzer not in MODES:
            help_exit(
                'Unknown analysis mode \"{0}\"'.format(self.args.analyzer)
            )

        utils.configure_logging(self.args.debug)

        self.javac = jwlib.CompilerCall(javac_args)

        if not self.javac.args.version:
            if javac_args is None:
                help_exit('No javac command detected')

            if self.args.infer_out is None:
                help_exit('Expect Infer results directory')

            if self.args.buck:
                self.args.infer_out = os.path.join(
                    self.javac.args.classes_out,
                    utils.BUCK_INFER_OUT)
                self.args.infer_out = os.path.abspath(self.args.infer_out)

            try:
                os.mkdir(self.args.infer_out)
            except OSError as e:
                if not os.path.isdir(self.args.infer_out):
                    raise e

            self.stats = {'int': {}}
            self.timing = {}

        if self.args.specs_dirs:
            # Each dir passed in input is prepended by '-lib'.
            # Convert each path to absolute because when running from
            # cluster Makefiles (multicore mode) InferAnalyze creates the wrong
            # absolute path from within the multicore folder
            self.args.specs_dirs = [item
                                    for argument in
                                    (['-lib', os.path.abspath(path)] for path in
                                     self.args.specs_dirs)
                                    for item in argument]
예제 #10
0
파일: inferlib.py 프로젝트: hckhanh/infer
    def __init__(self, args, javac_args):

        self.args = args
        if self.args.analyzer not in MODES:
            help_exit(
                'Unknown analysis mode \"{0}\"'.format(self.args.analyzer)
            )

        utils.configure_logging(self.args.debug)

        self.javac = jwlib.CompilerCall(javac_args)

        if not self.javac.args.version:
            if javac_args is None:
                help_exit('No javac command detected')

            if self.args.infer_out is None:
                help_exit('Expect Infer results directory')

            if self.args.buck:
                self.args.infer_out = os.path.join(
                    self.javac.args.classes_out,
                    utils.BUCK_INFER_OUT)
                self.args.infer_out = os.path.abspath(self.args.infer_out)

            try:
                os.mkdir(self.args.infer_out)
            except OSError as e:
                if not os.path.isdir(self.args.infer_out):
                    raise e

            self.stats = {'int': {}}
            self.timing = {}

        if self.args.specs_dirs:
            # Each dir passed in input is prepended by '-lib'.
            # Convert each path to absolute because when running from
            # cluster Makefiles (multicore mode) InferAnalyze creates the wrong
            # absolute path from within the multicore folder
            self.args.specs_dirs = [item
                                    for argument in
                                    (['-lib', os.path.abspath(path)] for path in
                                     self.args.specs_dirs)
                                    for item in argument]
예제 #11
0
def main(event, context):
    stage = os.environ['STAGE']
    body = json.loads(event.get('body', {}))

    if is_true(body.get('debug', '')):
        configure_logging(level="DEBUG")

    logging.debug(f'refund_failed_txn() stage:{stage}')
    logging.debug(f'event: {event}')
    logging.debug(f'context: {context}')

    logging.debug(f'body: {body}')
    receipt     = body.get('receipt', '')
    #bundle_id   = body.get('bundle_id', '')
    account_id  = body.get('account_id', '')
    product_id  = body.get('product_id', None)
    verify_receipt = True
    # todo: add optional existing account

    if os.environ['STAGE'] == 'dev':
        verify_receipt = is_true(body.get('verify_receipt', 'True'))

    if os.environ['STAGE'] != 'dev':
        if body.get('verify_receipt'):
            return response(444,{'msg':'invalid_dev_param'})

    msg, receipt_hash, total_usd = payments_apple.handle_receipt(receipt, product_id, stage, verify_receipt)

    if ((account_id is None) or (account_id == '')):
        account_id = receipt_hash

    if (msg == "success"):
        logging.debug(f'conditional writing receipt with hash: {receipt_hash}')
        try:
            w3_generic.dynamodb_cwrite1(os.environ['RECEIPT_TABLE_NAME'], 'receipt', receipt_hash )
        except Exception as e:
            logging.info(f'writing receipt exception: {str(e)} ')
            return response(403,{'msg':f'Receipt {receipt_hash} already redeemed'})
        w3_generic.credit_account_balance(account_id, total_usd)
        return response(200,{'msg':msg,'account_id':account_id,'total_usd':total_usd})
    else:
        return response(402,{'msg':msg})
예제 #12
0
def main():
    args = get_args()
    utils.configure_logging(verbose=args.verbose, error=args.error, debug=args.debug)
    session = utils.get_session(args.config)

    # Time parameters:
    average = datetime.timedelta(seconds=args.average)

    # Run query
    start, end = get_time_range(args.date)
    rows = get_data(session, start, end, average)

    # Clean up
    rows = (transform(row) for row in rows)

    # Sort chronologically
    rows = sort(rows, key='timestamp')

    # Save output file
    write_csv(args.output, rows=rows)
예제 #13
0
def main():
    utils.parse_options()
    utils.configure_logging()
    logger.info('Starting prerelease.')
    original_dir = os.getcwd()
    # prerelease
    prereleaser = prerelease.Prereleaser()
    prereleaser.run()
    logger.info('Starting release.')
    # release
    releaser = release.Releaser(vcs=prereleaser.vcs)
    releaser.run()
    tagdir = releaser.data.get('tagdir')
    logger.info('Starting postrelease.')
    # postrelease
    postreleaser = postrelease.Postreleaser(vcs=releaser.vcs)
    postreleaser.run()
    os.chdir(original_dir)
    logger.info('Finished full release.')
    if tagdir:
        logger.info("Reminder: tag checkout is in %s", tagdir)
def main(options):
    configure_logging()

    Entrez.email = options.email

    logging.info(
        f"Beginning abstracts gathering for mesh terms located in file {options.mesh_terms_path}"
    )

    with open(options.mesh_terms_path, "r") as mesh_file:
        mesh_terms = mesh_file.read().splitlines()

    logging.info(
        f"The following mesh terms were retrieved {', '.join(mesh_terms)}")

    os.makedirs(options.texts_dir, exist_ok=True)

    for mesh_term in tqdm(mesh_terms, desc="Processing mesh terms..."):
        process_mesh_term(mesh_term, options.texts_dir)

    logging.info("Done")
예제 #15
0
def main(event, context):
    stage = os.environ['STAGE']
    body = json.loads(event.get('body', {}))

    if is_true(body.get('debug', '')):
        configure_logging(level="DEBUG")

    logging.debug(f'recycle() stage:{stage}')
    logging.debug(f'event: {event}')
    logging.debug(f'context: {context}')
    logging.debug(f'body: {body}')
    funder = toChecksumAddress(address=body.get('funder', ''))
    signer = toChecksumAddress(address=body.get('signer', ''))
    password = body.get('password', '')

    if password != get_secret(key=os.environ['RECYCLE_KEY']):
        return incorrect_password()

    pac_funder = get_secret(key=os.environ['PAC_FUNDER_PUBKEY_SECRET'])

    if funder != pac_funder:
        return invalid_funder(funder, pac_funder)

    funder_keys = keys(funder)
    if signer == '' or signer not in funder_keys:
        return invalid_signer(signer)

    amount, escrow, unlock = look(funder, signer)

    amount_threshold = float("inf")
    escrow_threshold = float("inf")

    if amount > amount_threshold:
        return amount_too_high(amount, amount_threshold)

    if escrow > escrow_threshold:
        return escrow_too_high(escrow, escrow_threshold)

    store_account(funder, signer, unlock)
    return account_queued_response()
예제 #16
0
def main(event, context):
    stage = os.environ['STAGE']
    body = json.loads(event.get('body', {}))

    if is_true(body.get('debug', '')):
        configure_logging(level="DEBUG")

    logging.debug(f'refund_failed_txn() stage:{stage}')
    logging.debug(f'event: {event}')
    logging.debug(f'context: {context}')
    logging.debug(f'body: {body}')

    W3WSock = body.get('W3WSock', '')
    txnhash = body.get('txnhash', '')
    receiptHash = body.get('receiptHash', '')

    msg = refund_failed_txn(W3WSock, txnhash, receiptHash)

    if (msg == "success"):
        return response_success(txnhash)
    else:
        return response_error(msg)
def main(args, test_plan):
    log_level = logging.DEBUG if args.verbose else logging.INFO
    configure_logging(log_level)
    logging.info('Cloud Weather Report started.')
    results = []
    bundle = test_plan.get('bundle')
    args.bundle = test_plan.get('bundle_file')
    html_filename, json_filename = get_filenames(bundle)
    last_successful_status = None

    for env_name in args.controller:
        env = connect_juju_client(env_name, logging=logging)
        if not env:
            logging.error("Jujuclient could not connect to {} ".format(
                env_name))
            continue
        env_info = env.info()
        provider_name = get_provider_name(env_info["ProviderType"])
        logging.info('Running test on {}.'.format(provider_name))
        test_results, status = run_bundle_test(
            args=args, env_name=env_name, test_plan=test_plan, env=env)
        if status is None and test_results is None:
            continue
        last_successful_status = status
        benchmark_results = []
        if status is not None and test_plan.get('benchmark'):
            benchmark_results = run_benchmark(
                test_plan, bundle, json_filename, provider_name, env)
        results.append({
            "provider_name": provider_name,
            "test_results": json.loads(test_results) if test_results else None,
            "action_results": benchmark_results,
            "info": env_info})

    generate_report(
        bundle=bundle, results=results, options=args,
        status=last_successful_status, html_filename=html_filename,
        json_filename=json_filename)
    return html_filename
예제 #18
0
파일: pyepm.py 프로젝트: Georgi87/pyepm
def main():
    config = c.get_default_config()
    parser = ArgumentParser(version=__version__)

    config = create_config(parser)

    # Logging
    configure_logging(config.get('misc', 'logging') or '', verbosity=config.getint('misc', 'verbosity'))
    logger.info('PyEPM %s', __version__)
    logger.info('=====')

    logger.debug(c.dump_config(config))

    args = parser.parse_args()

    for filename in args.filename:
        if not os.path.exists(filename):
            logger.warn("File does not exist: %s" % filename)
        else:
            logger.info("Deploying %s..." % filename)
            deployment = deploy.Deploy(filename, config)
            deployment.deploy()
예제 #19
0
def main(event, context):
    stage = os.environ['STAGE']
    body = json.loads(event.get('body', {}))

    if is_true(body.get('debug', '')):
        configure_logging(level="DEBUG")

    logging.debug(f'entry_send_raw() stage:{stage}')
    logging.debug(f'event: {event}')
    logging.debug(f'context: {context}')
    logging.debug(f'body: {body}')

    W3WSock = body.get('W3WSock', '')
    txn = body.get('txn', '')
    receiptHash = body.get('receiptHash', '')

    txnhash, cost_usd, msg = send_raw(W3WSock, txn, receiptHash)

    if (txnhash != None):
        response_success(txnhash, cost_usd)
    else:
        response_error(msg)
예제 #20
0
def main():
    args = get_args()
    utils.configure_logging(verbose=args.verbose,
                            error=args.error,
                            debug=args.debug)

    # Retrieve raw data
    session = http_session.SensorSession()
    rows = get_data(session=session,
                    date=args.date,
                    sampling_features=settings.SAMPLING_FEATURES)

    # Clean data
    rows = filter_n(filter_row,
                    rows,
                    sampling_features=settings.SAMPLING_FEATURES)
    rows = transform(rows)
    rows = filter_n(validate, rows)
    rows = pivot(rows)
    rows = sort(rows)

    output.serialise(rows, path=args.output)
def main():
    config = configparser.ConfigParser()
    config.read('config.ini')
    base_config = config['crawler']
    configure_logging(base_config.get('logfile', None),
                      log_level=int(base_config['loglevel']))
    couch_config = config['couchdb']

    with DatabaseConn(couch_config) as db:
        logger = logging.getLogger('tweet_crawler')

        crawler = TweetCrawler(config['twitter'],
                               BoundingBox(base_config['bbox']), db, logger)
        try:
            crawler.download_tweets()
        except KeyboardInterrupt:
            logger.info('interrupt received; disconnecting')
        except Exception as ex:
            logger.exception(ex)
        finally:
            crawler.disconnect()
            sys.exit(0)
예제 #22
0
def main(event, context):
    stage = os.environ['STAGE']
    body = json.loads(event.get('body', {}))

    if is_true(body.get('debug', '')):
        configure_logging(level="DEBUG")

    logging.debug(f'refund_failed_txn() stage:{stage}')
    logging.debug(f'event: {event}')
    logging.debug(f'context: {context}')
    logging.debug(f'body: {body}')

    receipt = body.get('receipt', '')
    target_bundle_id = body.get('target_bundle_id', '')

    msg, receipt_hash, total_usd = handle_receipt_apple(
        receipt, target_bundle_id, Stage)

    if (msg == "success"):
        return response_success(receipt_hash, total_usd)
    else:
        return response_error(msg)
예제 #23
0
def main():
    args = docopt.docopt(__doc__, version='v0.0.1')
    utils.configure_logging(args['--debug'])

    out_file = args['--output']

    # Read from a .csv, or allow domains on the command line.
    domains = []
    if args['INPUT'][0].endswith(".csv"):
        domains = utils.load_domains(args['INPUT'][0])
    else:
        domains = args['INPUT']

    # If the user wants to sort them, sort them in place.
    if args['--sorted']:
        domains.sort()

    options = {
        'user_agent': args['--user-agent'],
        'timeout': args['--timeout'],
        'preload_cache': args['--preload-cache'],
        'cache': args['--cache']
    }
    results = pshtt.inspect_domains(domains, options)

    # JSON can go to STDOUT, or to a file.
    if args['--json']:
        output = utils.json_for(results)
        if out_file is None:
            print(output)
        else:
            utils.write(output, out_file)
            logging.warn("Wrote results to %s." % out_file)
    # CSV always goes to a file.
    else:
        if args['--output'] is None:
            out_file = 'results.csv'
        pshtt.csv_for(results, out_file)
        logging.warn("Wrote results to %s." % out_file)
예제 #24
0
파일: show.py 프로젝트: dambem/urban_flows
def main():
    parser, args = get_args()
    utils.configure_logging(verbose=args.verbose)

    if args.locations:
        cls = objects.Location
    elif args.sensors:
        cls = objects.Sensor
    elif args.sensor_categories:
        cls = objects.SensorCategory
    elif args.sensor_types:
        cls = objects.SensorType
    elif args.reading_categories:
        cls = objects.ReadingCategory
    elif args.reading_types:
        cls = objects.ReadingType
    else:
        parser.print_help()
        exit()

    with http_session.PortalSession(token_path=args.token) as session:
        print_objects(session, cls)
예제 #25
0
def main(train_desc_file, val_desc_file, epochs, save_dir, sortagrad):
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    # Configure logging
    configure_logging(file_log_path=os.path.join(save_dir, 'train_log.txt'))

    # Prepare the data generator
    datagen = DataGenerator()
    # Load the JSON file that contains the dataset
    datagen.load_train_data(train_desc_file)
    datagen.load_validation_data(val_desc_file)
    # Use a few samples from the dataset, to calculate the means and variance
    # of the features, so that we can center our inputs to the network
    datagen.fit_train(100)

    # Compile a Recurrent Network with 1 1D convolution layer, GRU units
    # and 1 fully connected layer
    model = compile_gru_model(recur_layers=5, batch_norm=True)
    # 输出模型结构
    print('*' * 20)
    print(model.summary())
    # 加载已训练好的模型
    # model.load_weights("result_open3/model_13000_weights.h5")

    # Compile the CTC training function
    train_fn = compile_train_fn(model)

    # Compile the validation function
    val_fn = compile_test_fn(model)

    # Train the model
    train(model,
          train_fn,
          val_fn,
          datagen,
          save_dir,
          epochs=epochs,
          do_sortagrad=sortagrad)
예제 #26
0
파일: pyepm.py 프로젝트: zebbra2014/pyepm
def main():
    config = c.get_default_config()
    parser = ArgumentParser(version=__version__)

    config = create_config(parser)

    # Logging
    configure_logging(config.get('misc', 'logging') or '', verbosity=config.getint('misc', 'verbosity'))
    logger.info(colors.HEADER + '=====' + colors.ENDC)
    logger.info(colors.OKGREEN + 'PyEPM ' + colors.ENDC + '%s', __version__)
    logger.info(colors.HEADER + '=====' + colors.ENDC)

    logger.debug(c.dump_config(config))

    args = parser.parse_args()

    for filename in args.filename:
        if not os.path.exists(filename):
            logger.warn("\nFile does not exist: %s" % filename)
        else:
            logger.info("\nDeploying " + colors.BOLD + "%s" % filename + colors.ENDC + "...")
            deployment = deploy.Deploy(filename, config)
            deployment.deploy()
예제 #27
0
파일: pyepm.py 프로젝트: Georgi87/pyepm
def main():
    config = c.get_default_config()
    parser = ArgumentParser(version=__version__)

    config = create_config(parser)

    # Logging
    configure_logging(config.get('misc', 'logging') or '',
                      verbosity=config.getint('misc', 'verbosity'))
    logger.info('PyEPM %s', __version__)
    logger.info('=====')

    logger.debug(c.dump_config(config))

    args = parser.parse_args()

    for filename in args.filename:
        if not os.path.exists(filename):
            logger.warn("File does not exist: %s" % filename)
        else:
            logger.info("Deploying %s..." % filename)
            deployment = deploy.Deploy(filename, config)
            deployment.deploy()
예제 #28
0
def main(args):
    log_level = logging.DEBUG if args.verbose else logging.INFO
    configure_logging(log_level)
    logging.info('Cloud Weather Report started.')
    test_plan = None
    if args.test_plan:
        test_plan = read_file(args.test_plan, 'yaml')
    results = []
    status = None
    bundle = test_plan.get('bundle')
    html_filename, json_filename = get_filenames(bundle)
    for env_name in args.controller:
        env = jujuclient.Environment.connect(env_name=env_name)
        env_info = env.info()
        provider_name = get_provider_name(env_info["ProviderType"])
        logging.info('Running test on {}.'.format(provider_name))
        test_results, status = run_bundle_test(
            args=args, env=env_name, test_plan=test_plan)
        client = jujuclient.Actions(env)
        action_results = []
        if test_plan.get('benchmark'):
            action_results = run_actions(test_plan, client, env.status())
            all_values = get_benchmark_data(
                file_prefix(bundle), os.path.dirname(json_filename),
                provider_name)
            value = action_results[0].values()[0]['value']
            action_results[0].values()[0]['all_values'] = all_values + [value]
        results.append({
            "provider_name": provider_name,
            "test_results": json.loads(test_results) if test_results else None,
            "action_results": action_results,
            "info": env_info})
    bundle_yaml = get_bundle_yaml(status)
    reporter = Reporter(bundle=bundle, results=results, options=args,
                        bundle_yaml=bundle_yaml)
    reporter.generate(html_filename=html_filename, json_filename=json_filename)
    return html_filename
예제 #29
0
def main():
    parser = AeroqualMetadataArgumentParser()
    args = parser.parse_args()
    utils.configure_logging(verbose=args.verbose,
                            debug=args.debug,
                            error=args.error)

    session = http_session.AeroqualSession(config_file=args.config)

    if args.csv:
        pass
    elif args.sensors:
        for serial_number in Instrument.list(session):
            instrument = Instrument(serial_number)
            instr_data = instrument.get(session)

            uf_sensor = maps.instrument_to_sensor(instrument=instr_data)
            print(uf_sensor)

    elif args.sites:
        pass

    else:
        parser.print_help()
예제 #30
0
def run():
	define('port', type=int, default=8000)

	define('log_level', type=str, default='INFO')
	define('log_file', type=str, default='logs/AutoCheck')

	options.logging = None
	parse_command_line()

	configure_logging(log_file=options.log_file, log_level=options.log_level)
	io_loop = tornado.ioloop.IOLoop()

	threads = []
	reservations = OrderedDict()

	init_sequence(threads=threads, reservations=reservations)

	handlers = [
		tornado.web.url(r'/?(.*)?', tornado.web.StaticFileHandler,
		                {'path': 'static', 'default_filename': 'index.html'}),
		tornado.web.url(r'/create/?(.*)?', CreateHandler,
		                {'reservations': reservations, 'threads': threads}),
		tornado.web.url(r'/reservations/?(.*)?', ReservationsHandler,
		                {'reservations': reservations}),
	]

	settings = {
		'debug': False,
	}
	applicaton = tornado.web.Application(handlers, **settings)
	applicaton.listen(options.port)

	try:
		io_loop.start()
	finally:
		logging.info('Shutting down.')
예제 #31
0
def main():
    client = InfluxDBClient(host=args.influxdb_host, ssl=args.ssl, verify_ssl=False, port=8086, database=args.database)
    logger = configure_logging('parse_sar_disk')
    sar_timezone = timezone(args.timezone)
    with open(args.input_file, 'r') as f:
        header_split = f.readline().split()
        hostname = header_split[2].strip("()")
        logger.info("Found hostname {}".format(hostname))
        date = header_split[3]
        logger.info("Found date {} (MM/DD/YYYY)".format(date))
        json_points = []
        for line_number, line in enumerate(f):
            if line.strip() and 'Average:' not in line: # We skip any empty lines, and also the "Average:" lines at the end
                if all(header_keyword in line for header_keyword in ['DEV', 'tps', 'rd_sec/s', 'wr_sec/s']):
                    # Skip the header lines - if a device name contains all of the four keywords below, I will eat my hat
                    pass
                else:
                    disk_stats = dict(zip(SAR_DISK_HEADERS, line.split()))
                    values = {}
                    local_timestamp = datetime.strptime("{} {} {}".format(date, disk_stats['timestamp'], disk_stats['AM_OR_PM']), "%m/%d/%Y %I:%M:%S %p")
                    timestamp = sar_timezone.localize(local_timestamp)
                    for metric_name, value in disk_stats.items():
                        if metric_name == 'device':
                            disk_name = value
                        elif metric_name in ['AM_OR_PM', 'timestamp']:
                            pass
                        else:
                            values[metric_name] = float(value)
                    json_points.append({
                        "measurement": "sar_disk",
                        "tags": {
                            "project": args.project,
                            "hostname": hostname,
                            "device": disk_name,
                        },
                        "time": timestamp.isoformat(),
                        "fields": values
                    })
            if len(json_points) >= args.batch_size:
                write_points(logger, client, json_points, line_number)
                json_points = []
        write_points(logger, client, json_points, line_number)
예제 #32
0
def main(iptagger, root_paths, model_id):
    configure_logging()
    logger = logging.getLogger("Combine_MV2IP")
    logger.info("Running on: {}".format(iptagger))

    branches, training_vars = set_features(iptagger)
    logger.info('Creating dataframe...')
    df = pup.root2panda('../data/final_production/*',
                        'bTag_AntiKt4EMTopoJets',
                        branches=branches)

    logger.info('Transforming variables...')
    df = transformVars(df, iptagger)

    logger.info('Flattening df...')
    df_flat = pd.DataFrame({k: pup.flatten(c) for k, c in df.iterkv()})
    del df

    logger.info('Applying cuts...')
    df_flat = apply_calojet_cuts(df_flat)

    logger.info('Will train on {}'.format(training_vars))
    logger.info('Creating X, y, w, mv2c10...')
    y = df_flat['jet_LabDr_HadF'].values
    mv2c10 = df_flat['jet_mv2c10'].values
    # -- slice df by only keeping the training variables
    X = df_flat[training_vars].values
    pteta = df_flat[['jet_pt', 'abs(jet_eta)']].values
    #w = reweight_to_b(pteta, y, pt_col=0, eta_col=1)
    w = reweight_to_l(pteta, y, pt_col=0, eta_col=1)
    del df_flat, pteta

    logger.info('Shuffling, splitting, scaling...')
    ix = np.array(range(len(y)))
    X_train, X_test, y_train, y_test, w_train, w_test, \
    ix_train, ix_test, mv2c10_train, mv2c10_test = train_test_split(
        X, y, w, ix, mv2c10, train_size=0.6
    )
    scaler = StandardScaler()
    X_train = scaler.fit_transform(X_train)
    X_test = scaler.transform(X_test)

    le = LabelEncoder()
    net = Sequential()
    net.add(Dense(16, input_shape=(X_train.shape[1], ), activation='relu'))
    net.add(Dropout(0.2))
    net.add(Dense(4, activation='softmax'))
    net.summary()
    net.compile('adam',
                'sparse_categorical_crossentropy',
                metrics=['accuracy'])

    weights_path = iptagger + '-' + model_id + '-progress.h5'
    try:
        logger.info('Trying to load weights from ' + weights_path)
        net.load_weights(weights_path)
        logger.info('Weights found and loaded from ' + weights_path)
    except IOError:
        logger.info('No weights found in ' + weights_path)

    # -- train
    try:
        net.fit(X_train,
                le.fit_transform(y_train),
                verbose=True,
                batch_size=64,
                sample_weight=w_train,
                callbacks=[
                    EarlyStopping(verbose=True,
                                  patience=100,
                                  monitor='val_loss'),
                    ModelCheckpoint(weights_path,
                                    monitor='val_loss',
                                    verbose=True,
                                    save_best_only=True)
                ],
                nb_epoch=200,
                validation_split=0.3)
    except KeyboardInterrupt:
        print '\n Stopping early.'

    # -- load in best network
    net.load_weights(weights_path)

    # -- test
    print 'Testing...'
    yhat = net.predict(X_test, verbose=True)

    # -- save the predicions to numpy file
    np.save('yhat-{}-{}.npy'.format(iptagger, model_id), yhat)
    test = {'X': X_test, 'y': y_test, 'w': w_test, 'mv2c10': mv2c10_test}
    # -- plot performance
    performance(yhat, test, iptagger)
예제 #33
0
def main():
    client = InfluxDBClient(host=args.influxdb_host, ssl=args.ssl, verify_ssl=False, port=8086, database=args.database)
    logger = configure_logging('parse_operations')
    with open(args.input_file, 'r', encoding="latin-1") as f:
        line_count = 0
        for chunk in grouper(f, args.batch_size):
            json_points = []
            for line in chunk:
                # zip_longest will backfill any missing values with None, so we need to handle this, otherwise we'll miss the last batch
                line_count += 1
                if line and line.strip().endswith("ms"):
                    values = {}
                    tags = {
                        'project': args.project,
                        'hostname': args.hostname,
                    }
                    try:
                        tags['operation'] = line.split("] ", 1)[1].split()[0]
                    except IndexError as e:
                        logger.error("Unable to get operation type - {} - {}".format(e, line))
                        break
                    if tags['operation'] in ['command', 'query', 'getmore', 'insert', 'update', 'remove', 'aggregate', 'mapreduce']:
                        thread = line.split("[", 1)[1].split("]")[0]
                        # Alternately - print(split_line[3])
                        if tags['operation'] == 'command':
                            tags['command'] = line.split("command: ")[1].split()[0]
                        if "conn" in thread:
                            tags['connection_id'] = thread
                        split_line = line.split()
                        values['duration_in_milliseconds'] = int(split_line[-1].rstrip('ms'))
                        # TODO 2.4.x timestamps have spaces
                        timestamp = parse(split_line[0])
                        if split_line[1].startswith("["):
                            # TODO - Parse locks from 2.6 style loglines
                            # 2.4 Logline:
                            tags['namespace'] = split_line[3]
                            for stat in reversed(split_line):
                                if "ms" in stat:
                                    pass
                                elif ":" in stat:
                                    key, value = stat.split(":", 1)
                                    values[key] = int(value)
                                elif stat == "locks(micros)":
                                    pass
                                else:
                                    break
                        else:
                            # 3.x logline:
                            tags['namespace'] = split_line[5]
                            # TODO - Should we be splitting on "locks:{" instead?
                            pre_locks, locks = line.rsplit("locks:", 1)
                            # Strip duration from locks
                            locks = locks.rsplit(" ", 1)[0]
                            # Add quotation marks around string, so that it is valid JSON
                            locks = re.sub(r"(\w+):", "\"\g<1>\":", locks)
                            locks_document = flatdict.FlatDict(json.loads(locks), delimiter="_")
                            for key, value in locks_document.iteritems():
                                values["locks_{}".format(key)] = int(value)



                            # We work backwards from the end, until we run out of key:value pairs
                            # TODO - Can we assume these are always integers?
                            for stat in reversed(pre_locks.split()):
                                if ":" in stat:
                                    key, value = stat.split(":", 1)
                                    values[key] = int(value)
                                else:
                                    break
                            # TODO - Parse the full query plan for IXSCAN
                            if 'planSummary: ' in line:
                                tags['plan_summary'] = (line.split('planSummary: ', 1)[1].split()[0])
                        json_points.append(create_point(timestamp, "operations", values, tags))
                    else:
                        logger.info("'{}' is not a recognised operation type - not parsing this line ({})".format(tags['operation'], line))
            if json_points:
                # TODO - We shouldn't need to wrap this in try/except - should be handled by retry decorator
                try:
                    # TODO - Have a dry-run mode
                    write_points(logger, client, json_points, line_count)
                    pass
                except Exception as e:
                    logger.error("Retries exceeded. Giving up on this point.")
    # save out the weights to hdf5 and the model to json
    net = nn_with_modes.train(data, model_name, mode)
    yhat = nn_with_modes.test(net, data, model_name)

    # -- plot performance by mode
    plot_performance(yhat, data, model_name, mode)


# --------------------------------------------------------------

if __name__ == '__main__':

    import sys
    import argparse

    utils.configure_logging()

    # -- read in arguments
    parser = argparse.ArgumentParser()
    parser.add_argument(
        'config',
        help=
        "path to JSON file that specifies classes and corresponding ROOT files' paths"
    )
    parser.add_argument('model_name',
                        help="name of the set from particular network")
    parser.add_argument('mode', help="classification or regression")
    parser.add_argument('--tree',
                        help="name of the tree to open in the ntuples",
                        default='CollectionTree')
    args = parser.parse_args()
예제 #35
0
from binance.client import Client
import pandas as pd
from utils import configure_logging
from multiprocessing import Process, freeze_support, Pool, cpu_count
import os

try:
    from credentials import API_KEY, API_SECRET
except ImportError:
    API_KEY = API_SECRET = None
    exit("CAN'T RUN SCRIPT WITHOUT BINANCE API KEY/SECRET")

log = configure_logging()


class FuturesDataPuller(Process):

    SYMBOLS = ['BTCUSDT', 'ETHUSDT', 'LTCUSDT', 'LINKUSDT']
    # 'BTCUSDT', 'XRPUSDT', 'SXPUSDT', 'ADAUSDT', 'EOSUSDT', 'DOTUSDT', 'VETUSDT', 'ETHUSDT', 'LTCUSDT', 'LINKUSDT'
    KLINE_INTERVALS = [
        '1m', '3m', '5m', '15m', '30m', '1h', '2h', '4h', '6h', '8h', '12h',
        '1d', '3d', '1w', '1M'
    ]

    def __init__(self, client, symbol, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.client = client
        self.symbol = symbol

    def run(self):
        klines = self.get_klines(
예제 #36
0
def lambda_handler(event, context):
    """Lambda function entry point."""
    configure_logging({'aws_request_id': context.aws_request_id})
    main(event, context)
예제 #37
0
    logging.debug(event)
    actions = {
        'init-inventory-hosts': init_inventory_hosts,
        'inventory-hosts': inventory_hosts,
        'inventory-wait': command_complete,
        'report-delta': make_delta,
    }
    if not event.get('Records'):
        logging.error('No Records key.')
        logging.error(event)
    # We should have only one record per event, but sanity
    for record in event.get('Records', []):  # if we don't have
        logging.debug(json_dumps(record))
        data = json.loads(record['body'])
        logging.info(data)
        if data['action'] in actions:
            return json_dumps(actions[data['action']](data, context))
        raise Exception('Unknown action {}.'.format(data['action']))


if __name__ == '__main__':
    configure_logging({'aws_request_id': "local"})
    os.environ['AWS_DEFAULT_REGION'] = 'us-west-2'
    os.environ['BUCKET'] = 'edwards-asadmin-patching-bucket-us-west-2'
    os.environ['SCHEDULE_SSM_KEY'] = '/config/patching/schedule'
    os.environ['SLACK_CONFIG'] = '/onica/slack/webhook'
    resp = {'action': 'init-inventory-hosts'}
    while resp != True:
        resp = main({'Records': [{'body': json_dumps(resp)}]}, None)
        time.sleep(10)
def main(start_time):
    tf.enable_eager_execution()
    configure_logging()

    # handle arguments and config
    args = parse_arguments()
    args.start_time = start_time

    tf.logging.info("Args: {}".format(args))
    args.second_data_dir = args.second_data_dir or args.data_dir
    args.target_data_dir = args.target_data_dir or args.data_dir

    args.match_pattern = None
    args.augmentation_flip_lr = False
    args.augmentation_flip_ud = False
    args.has_noise_input = args.input_type == "noise"
    args.has_colored_input = args.input_type == "image"
    args.has_colored_second_input = args.second_input_type == "image"
    args.has_colored_target = args.target_type == "image"
    args.discriminator_classes = 1
    if os.path.exists(os.path.join("output", args.eval_dir, "checkpoints")):
        args.checkpoint_dir = os.path.join("output", args.eval_dir,
                                           "checkpoints")
    else:
        args.checkpoint_dir = os.path.join("old-output", args.eval_dir,
                                           "checkpoints")
    model = load_model(args)
    discriminator = model.get_discriminator()
    if args.evaluate_generator:
        generator = model.get_generator()
        load_checkpoint(args,
                        checkpoint_number=(args.epoch + 24) //
                        25 if args.epoch else args.epoch,
                        generator=generator,
                        discriminator=discriminator)
    else:
        generator = None
        load_checkpoint(args,
                        checkpoint_number=(args.epoch + 24) //
                        25 if args.epoch else args.epoch,
                        discriminator=discriminator)

    # assert not (bool(args.test_data_dir) and bool(args.test_data_samples)), \
    #     "either use a part of the training data for test *OR* some actual test data"

    training_input_image_names = load_image_names(args.data_dir)
    training_second_input_image_names = load_image_names(
        args.second_data_dir) if args.second_input_type else None
    training_target_image_names = load_image_names(args.target_data_dir)
    if training_second_input_image_names is not None:
        if len(training_input_image_names) > len(
                training_second_input_image_names):
            tf.logging.info(
                "Input and second input data are different; shuffling inputs before reducing"
            )
            np.random.shuffle(training_input_image_names)
            training_input_image_names = training_input_image_names[:len(
                training_second_input_image_names)]
        assert len(training_input_image_names) == len(
            training_second_input_image_names)
    assert len(training_target_image_names) == len(training_input_image_names)
    if args.data_dir != args.target_data_dir or args.second_data_dir != args.target_data_dir:
        tf.logging.info(
            "Input and target data are different; shuffling targets before reducing"
        )
        np.random.shuffle(training_target_image_names)
        training_target_image_names = training_target_image_names[:len(
            training_input_image_names)]
    if args.compute_test_scores:
        if args.test_data_samples:  # remove data that's not actually in the training set from the training data
            if args.split_data:
                raise NotImplementedError()
            tf.logging.warning(
                "Using the first {} unaugmented samples of the training data for testing"
                .format(args.test_data_samples))
            test_input_image_names = training_input_image_names[:args.
                                                                test_data_samples]
            training_input_image_names = training_input_image_names[
                args.test_data_samples:]
            if args.second_input_type:
                test_second_input_image_names = training_second_input_image_names[:
                                                                                  args
                                                                                  .
                                                                                  test_data_samples]
                training_second_input_image_names = training_second_input_image_names[
                    args.test_data_samples:]
            else:
                test_second_input_image_names = None
            test_target_image_names = training_target_image_names[:args.
                                                                  test_data_samples]
            training_target_image_names = training_target_image_names[
                args.test_data_samples:]
            args.test_data_dir = args.data_dir
            args.test_second_data_dir = args.second_data_dir
            args.test_target_data_dir = args.target_data_dir
        else:
            args.test_data_dir = args.data_dir + "-TEST"
            args.test_second_data_dir = args.second_data_dir + "-TEST"
            args.test_target_data_dir = args.target_data_dir + "-TEST"
            test_input_image_names = load_image_names(args.test_data_dir)
            test_second_input_image_names = load_image_names(
                args.test_second_data_dir) if args.second_input_type else None
            test_target_image_names = load_image_names(
                args.test_target_data_dir)
            if test_second_input_image_names is not None:
                if len(test_input_image_names) > len(
                        test_second_input_image_names):
                    tf.logging.info(
                        "TEST input and second input data are different; shuffling inputs before reducing"
                    )
                    np.random.shuffle(test_input_image_names)
                    test_input_image_names = test_input_image_names[:len(
                        test_second_input_image_names)]
                assert len(test_input_image_names) == len(
                    test_second_input_image_names)
            assert len(test_target_image_names) >= len(test_input_image_names)
            if args.test_data_dir != args.test_target_data_dir or args.test_second_data_dir != args.test_target_data_dir:
                tf.logging.info(
                    "TEST input and target data are different; shuffling targets before reducing"
                )
                np.random.shuffle(test_target_image_names)
                test_target_image_names = test_target_image_names[:len(
                    test_input_image_names)]
    else:
        test_input_image_names = test_second_input_image_names = test_target_image_names = None

    if args.evaluate_discriminator:
        # evaluate D on real images
        tf.logging.warning("Evaluating D")
        evaluate_discriminations(
            args, None, discriminator, training_input_image_names,
            training_second_input_image_names, training_target_image_names,
            test_input_image_names, test_second_input_image_names,
            test_target_image_names)

    if generator:
        # if there's also a generator, evalaute D on generated images
        tf.logging.warning("Evaluating G (evaluating D on generated images)")
        evaluate_discriminations(
            args, generator, discriminator, training_input_image_names,
            training_second_input_image_names, training_target_image_names,
            test_input_image_names, test_second_input_image_names,
            test_target_image_names)

    tf.logging.info("Finished evaluation")
예제 #39
0
import boto3
import json
import logging
import os
import w3_generic
import payments_apple

from decimal import Decimal
from utils import configure_logging

configure_logging(level="DEBUG")


def response_error(msg=None):
    logging.warning(msg)
    response = {
        "isBase64Encoded": False,
        "statusCode": 401,
        "headers": {},
        "body": json.dumps({
            "message": msg,
        })
    }
    return response


def response_success(receipt_hash, total_usd):
    msg = f'Successfully processed apple receipt with hash: {receipt_hash} for credit of ${total_usd}.'
    logging.debug(msg)
    response = {
        "isBase64Encoded":
예제 #40
0
def main():
    client = InfluxDBClient(host=args.influxdb_host,
                            ssl=args.ssl,
                            verify_ssl=False,
                            port=8086,
                            database=args.database)
    logger = configure_logging('parse_operations')
    with open(args.input_file, 'r', encoding="latin-1") as f:
        line_count = 0
        for chunk in grouper(f, args.batch_size):
            json_points = []
            for line in chunk:
                # zip_longest will backfill any missing values with None, so we need to handle this, otherwise we'll miss the last batch
                line_count += 1
                if line and line.strip().endswith("ms"):
                    values = {}
                    tags = {
                        'project': args.project,
                        'hostname': args.hostname,
                    }
                    try:
                        tags['operation'] = line.split("] ", 1)[1].split()[0]
                    except IndexError as e:
                        logger.error(
                            "Unable to get operation type - {} - {}".format(
                                e, line))
                        break
                    if tags['operation'] in [
                            'command', 'query', 'getmore', 'insert', 'update',
                            'remove', 'aggregate', 'mapreduce'
                    ]:
                        thread = line.split("[", 1)[1].split("]")[0]
                        # Alternately - print(split_line[3])
                        if tags['operation'] == 'command':
                            tags['command'] = line.split(
                                "command: ")[1].split()[0]
                        if "conn" in thread:
                            tags['connection_id'] = thread
                        split_line = line.split()
                        values['duration_in_milliseconds'] = int(
                            split_line[-1].rstrip('ms'))
                        # TODO 2.4.x timestamps have spaces
                        timestamp = parse(split_line[0])
                        if split_line[1].startswith("["):
                            # TODO - Parse locks from 2.6 style loglines
                            # 2.4 Logline:
                            tags['namespace'] = split_line[3]
                            for stat in reversed(split_line):
                                if "ms" in stat:
                                    pass
                                elif ":" in stat:
                                    key, value = stat.split(":", 1)
                                    values[key] = int(value)
                                elif stat == "locks(micros)":
                                    pass
                                else:
                                    break
                        else:
                            # 3.x logline:
                            tags['namespace'] = split_line[5]
                            # TODO - Should we be splitting on "locks:{" instead?
                            pre_locks, locks = line.rsplit("locks:", 1)
                            # Strip duration from locks
                            locks = locks.rsplit(" ", 1)[0]
                            # Add quotation marks around string, so that it is valid JSON
                            locks = re.sub(r"(\w+):", "\"\g<1>\":", locks)
                            locks_document = flatdict.FlatDict(
                                json.loads(locks), delimiter="_")
                            for key, value in locks_document.iteritems():
                                values["locks_{}".format(key)] = int(value)

                            # We work backwards from the end, until we run out of key:value pairs
                            # TODO - Can we assume these are always integers?
                            for stat in reversed(pre_locks.split()):
                                if ":" in stat:
                                    key, value = stat.split(":", 1)
                                    values[key] = int(value)
                                else:
                                    break
                            # TODO - Parse the full query plan for IXSCAN
                            if 'planSummary: ' in line:
                                tags['plan_summary'] = (line.split(
                                    'planSummary: ', 1)[1].split()[0])
                        json_points.append(
                            create_point(timestamp, "operations", values,
                                         tags))
                    else:
                        logger.info(
                            "'{}' is not a recognised operation type - not parsing this line ({})"
                            .format(tags['operation'], line))
            if json_points:
                # TODO - We shouldn't need to wrap this in try/except - should be handled by retry decorator
                try:
                    # TODO - Have a dry-run mode
                    write_points(logger, client, json_points, line_count)
                    pass
                except Exception as e:
                    logger.error("Retries exceeded. Giving up on this point.")
예제 #41
0
                    "--daemonize",
                    action="store_true",
                    help="Start as daemon")
parser.add_argument("-i",
                    "--icinga-path",
                    default="/var/run/icinga2/cmd/icinga2.cmd",
                    help="Absolut path to icinga2.cmd file")

config = vars(parser.parse_args())

result_writer = ResultWriter(config["rabbit_mq_host"],
                             config["result_exchange"],
                             config["task_exchange"], config["icinga_path"])

logger = get_logger(config["verbose"])
configure_logging(logger, config["logging"])


def signal_handler(signum, frame):
    logging.warn("SIGNAL " + str(signum) + " received! Frame: " + str(frame))
    logging.debug("Stop ResultWriter thread")
    result_writer.stop()
    logging.debug("Wait for ResultWriter thread to join")
    result_writer.join()
    logging.debug("ResultWriter thread joined")
    if config["daemonize"]:
        os.remove("/var/run/monitunnel.pid")
    sys.exit(0)


if "__main__" == __name__:
예제 #42
0
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 
# SUCH DAMAGE.


"""CLI for generating auto cert kit configuration file"""
#@PRODUCT_VERSION@
#@BUILD_NUMBER@
import utils
import sys
import os
utils.configure_logging('auto-cert-kit')

import testbase
import inspect

import operator
import itertools
from test_generators import *
from status import check_for_process

import test_report
import test_runner
from optparse import OptionParser
from exceptions import *

MIN_VLAN = 0
예제 #43
0
import boto3
import json
import logging
import os
import sys
import w3_generic

from web3 import Web3
from boto3.dynamodb.conditions import Key
from decimal import Decimal
from typing import Any, Dict, Optional, Tuple

from utils import configure_logging, is_true

configure_logging(level="INFO")


def update_txns():

    w3wsmap = w3_generic.get_w3wsock_providers()

    logging.info(f'update_txns  reading DB  w3wsmap: {str(w3wsmap)} ')
    results = w3_generic.dynamodb_readall(os.environ['TXNS_TABLE_NAME'])

    num_txns = results['Count']
    logging.info(f'update_txns  num_txns: {num_txns}')

    for txn in results['Items']:
        w3_generic.update_txn(w3wsmap, txn)
    return
#!/usr/bin/python
import unittest

import test_base
from testbase import *
import sys
from test_generators import *

sys.path.append('../kit/')
import utils

utils.configure_logging('ack_tests')

def expect_system_exit(func, code='0'):
    try:
        func()
    except SystemExit as exp:
        if str(exp) == code:
            #Valid System Exit
            pass
        else:
            raise exp


class DocumentationTests(test_base.DevTestCase):
    """Test that documentation is correctly generated for the testkit"""

    def testPrintTestList(self):
        expect_system_exit(print_all_test_classes)
    
    def testPrintClassInformation(self):
예제 #45
0
파일: __init__.py 프로젝트: Juanvvc/scfs
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#m=mdencrypter.encrypt(pmeta.save())
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#

__all__=['utils','filesystem','DHT','ring']

import logging
import utils
utils.configure_logging(level=logging.CRITICAL)
import DHT
import os

# If True, security is not actived at all. ONLY FOR DEVELOPMENT
NO_SECURITY = True

default_config_dir=os.path.expanduser('~%s.dfs'%os.path.sep)
""" The dir to use for configurations """
default_config_file=default_config_dir+os.path.sep+'dfsrc'
default_log_file=default_config_dir+os.path.sep+'dfs.log'
default_config=utils.Config()
""" The default dfs.utils.Config object. Although the configuration object is not mandatory, having a global configuration is really
advisable """
dht=None
""" The DHT to use. You MUST set up this variable before
예제 #46
0
def main(inputfiles, treename, ftrain, max_n_pairs, exclude_list):
    '''
    Args:
    -----
        inputfiles: list of strings with the paths to root files
        treename: string, name of the TTree that contains the branches
        ftrain: float in range [0, 1], training fraction
        max_n_pairs: int, maximum number of jet pairs to consider per event
        exclude_list: 
    Returns:
    --------
    '''
    # -- configure logging
    utils.configure_logging()
    logger = logging.getLogger('main')

    # -- concatenate all files into a pandas df
    short_filenames = [f.split('/')[-1] for f in inputfiles]
    logger.info('Creating pandas dataframes from: {}'.format(short_filenames))
    #df = pd.concat([pup.root2panda(f, treename) for f in inputfiles], ignore_index=True)
    df_list = []
    for f in inputfiles:
        df_temp = pup.root2panda(f, treename)
        df_temp['sample'] = f.split('/')[-1].split('.')[0]
        df_list.append(df_temp)
    df = pd.concat(df_list, ignore_index=True)

    # -- remove events with more than one correct jet pair
    # -- because that shouldn't happen and complicates the task
    # -- of finding the correct jet pair
    logger.info('Removing events with more than one correct jet pair')
    keep = np.array([sum(yen) for yen in df['isCorrect'].values]) <= 1
    df = df[keep].reset_index(drop=True)

    # -- target
    logger.info('Building one-hot target')
    y = df['isCorrect'].values

    # -- extract array of names of sample of origin
    sample = df['sample'].values

    # -- prepend 1 to all entries in y where there is no correct jet pair,
    # -- 0 if there exists a correct jet pair already
    # -- each entry in y will now have length (n_jet_pairs + 1)
    y_long = np.array([
        np.insert(yev, 0, 1) if sum(yev) == 0 else np.insert(yev, 0, 0)
        for yev in y
    ])

    # -- weights
    logger.info('Extracting weights from event_weight')
    w = df['event_weight'].values
    del df['event_weight'], df['isCorrect'], df['sample']
    df = df.drop(exclude_list, axis=1)  # maybe in the future do something
    # better with these variables instead of just removing them

    # -- matrix of predictors
    X = df.values
    ix = range(X.shape[0])
    varlist = df.columns.values.tolist()

    # -- maximum number of jet pairs to consider in each event
    # -- can be set to whatever number makes sense
    #max_length = max([len(b) for b in df['Delta_eta_jb']]) + 1
    max_length = max_n_pairs + 1
    logger.info(
        'The max number of jet pairs per event will be {}'.format(max_n_pairs))

    X_train, X_test, y_train, y_test, w_train, w_test,\
    sample_train, sample_test, ix_train, ix_test, scaler_list = shuffle_split_scale_pad(
        X, y_long, w, sample, ix, ftrain, max_length
    )

    logger.info('Saving processed data as hdf5 in data/')
    io.save(
        os.path.join('data', 'train_dict.hdf5'), {
            'X': X_train,
            'y': y_train,
            'w': w_train,
            'ix': ix_train,
            'vars': varlist,
            'sample': sample_train.tolist(),
            'scalers': scaler_list
        })

    io.save(
        os.path.join('data', 'test_dict.hdf5'), {
            'X': X_test,
            'y': y_test,
            'w': w_test,
            'ix': ix_test,
            'vars': varlist,
            'sample': sample_test.tolist(),
            'scalers': scaler_list
        })
예제 #47
0
파일: w3.py 프로젝트: baajur/orchid
import logging
import os

from abis import lottery_abi, token_abi
from utils import configure_logging, get_secret
from web3 import Web3


configure_logging()


def refresh_w3(w3=None):
    if not w3 or not w3.isConnected():
        logging.debug('Refreshing Web3 Connection...')
        w3 = Web3(Web3.WebsocketProvider(os.environ['WEB3_WEBSOCKET'], websocket_timeout=900))
    if not w3 or not w3.isConnected():
        raise Exception('Unable to establish connection to Web3!')
    return w3


def get_token_name(address: str = os.environ['TOKEN']):
    logging.debug(f'get_token_name() address: {address}')
    w3 = refresh_w3()
    token_contract = w3.eth.contract(
        abi=token_abi,
        address=address,
    )
    token_name = token_contract.functions.name().call()
    logging.debug(f'Token Name: {token_name}')
    return token_name
예제 #48
0
    already there, we just need to run "pip install -r requirements.txt" to get
    the new dependencies.
    """
    if not os.path.exists(VIRTUALENV_DIR):
        run_cmd('virtualenv %s' % VIRTUALENV_DIR)
        
    run_cmd('%s/bin/pip install -r %s/requirements.txt' % (VIRTUALENV_DIR,
                                                           DJANGO_MOTH_DIR,))

def start_daemons(log_directory=ARTIFACTS_DIR):
    """
    Start the django application in HTTP and HTTPS.
    """
    cmd = '%s/bin/python %s/start_daemons.py --log-directory=%s' % (VIRTUALENV_DIR,
                                                                    DJANGO_MOTH_DIR,
                                                                    log_directory)
    run_cmd(cmd)

def run_cmd(cmd, cwd=None):
    logging.debug('[s] %s (cwd: %s)' % (cmd, cwd))
    p = subprocess.Popen(shlex.split(cmd), cwd=cwd)
    p.wait()
    logging.debug('[e] %s (retcode: %s) (cwd: %s)' % (cmd, p.returncode, cwd))
    return p.returncode

if __name__ == '__main__':
    configure_logging(LOG_FILE)
    get_source_code()
    install_dependencies()
    start_daemons()
예제 #49
0
import sys
import os
import logging
import time
import json
import fcntl
import subprocess

import utils

logger = utils.configure_logging()
logger.info("Outgoing")

configuration = json.load(open("configuration.json"))
instances = json.load(open("instances.json"))

critic = utils.Critic()

outgoing_dir = os.path.join(configuration["queue-dir"], "outgoing")

try:
    while True:
        with utils.locked_directory(configuration["queue-dir"]):
            filenames = [filename for filename in os.listdir(outgoing_dir)
                         if filename.endswith(".json")]

        if filenames:
            filename = filenames.pop(0)

            outgoing_filename = os.path.join(outgoing_dir, filename)
            with open(outgoing_filename) as outgoing_file:
예제 #50
0
def main():
    global log_tracebacks  # can be updated

    import argparse

    parser = argparse.ArgumentParser(description="Collect metrics from amqp and dispatch them to carbon daemon.")
    parser.add_argument(
        "-c",
        "--config",
        action="append",
        default=list(),
        help="Additional configuration files to read. Can be specified"
        " multiple times, values from later ones override values in the former.",
    )
    parser.add_argument(
        "--delete-queue",
        nargs="?",
        default=False,
        help="Delete queue before re-declaring it,"
        ' useful to change bindings. Accepts "if-empty" argument,'
        " overrides net.amqp.queue.delete_first configuration parameter.",
    )
    parser.add_argument("-n", "--dry-run", action="store_true", help="Do not actually send data.")
    parser.add_argument("--dump", action="store_true", help="Dump polled data to stdout.")
    parser.add_argument("--debug", action="store_true", help="Verbose operation mode.")
    optz = parser.parse_args()

    cfg = AttrDict.from_yaml("{}.yaml".format(os.path.splitext(os.path.realpath(__file__))[0]), if_exists=True)
    for k in optz.config:
        cfg.update_yaml(k)
    configure_logging(cfg.logging, logging.DEBUG if optz.debug else logging.WARNING)
    logging.captureWarnings(cfg.logging.warnings)

    cfg.net.amqp.queue.delete_first = optz.delete_queue if optz.delete_queue is not None else True
    optz.dump = optz.dump or cfg.debug.dump_data
    optz.dry_run = optz.dry_run or cfg.debug.dry_run
    log_tracebacks = cfg.logging.tracebacks

    dst = cfg.net.carbon.host
    if isinstance(dst, types.StringTypes):
        dst = dst.rsplit(":", 1)
        dst = dst[0], int(dst[1]) if len(dst) > 1 else cfg.net.carbon.default_port

    dump = (
        (lambda metric, val, ts: print("{} {} {}".format(metric, val, ts)))
        if optz.dump
        else lambda metric, val, ts: None
    )
    if not optz.dry_run:
        carbon = CarbonClient(dst)
        dst = lambda metric, ts, val, val_raw: (dump(metric, val, ts), carbon.send(metric, val, ts))
    else:
        dst = lambda metric, ts, val, val_raw: dump(metric, val, ts)

    amqp = AMQPHarvester(
        host=cfg.net.amqp.host,
        auth=(cfg.net.amqp.user, cfg.net.amqp.password),
        exchange=cfg.net.amqp.exchange,
        queue=cfg.net.amqp.queue,
        heartbeat=cfg.net.amqp.heartbeat,
        log=logging.getLogger("amqp_carbon.amqp_link"),
        callback=dst,
        exclusive=cfg.net.amqp.consume.exclusive,
        ack_batch=cfg.net.amqp.consume.ack_batch,
    )

    amqp.harvest()