示例#1
0
def main():
    parser, options, arguments = parse_options()
    if not arguments:
        parser.print_help()
        return

    # configure logging
    setup_logging(options.loglevel, options.logfile)

    proxy = TransparentProxy(arguments[0])
    throttler = WsgiThrottler(proxy, options.pool_size)

    # SSL settings
    if options.certfile and options.keyfile:
        ssl_settings = {
            "certfile": options.certfile,
            "keyfile": options.keyfile
        }
    else:
        ssl_settings = {}

    main_logger = getLogger(__name__)
    main_logger.info(
        "Proxying %s on %s:%i with a maximum of %i concurrent requests" % (
            arguments[0],
            options.interface,
            options.port,
            options.pool_size,
        ))

    server = WSGIServer((options.interface, options.port),
                        throttler,
                        log=sys.stdout,
                        **ssl_settings)
    server.serve_forever()
示例#2
0
def main(argv=None):
    """ main. """
    #setup_logging() # Setup proto logging
    conf = load_config(argv)
    setup_logging(**dict(conf.LogConfig))
    map_contacts = get_map_contacts(conf)
    dump_map_contacts(conf, map_contacts)
示例#3
0
def main():
    parser, options, arguments = parse_options()
    if not arguments:
        parser.print_help()
        return
    
    # configure logging
    setup_logging(options.loglevel, options.logfile)
    
    proxy = TransparentProxy(arguments[0])
    throttler = WsgiThrottler(proxy, options.pool_size)
    
    # SSL settings
    if options.certfile and options.keyfile:
        ssl_settings = {"certfile": options.certfile, "keyfile": options.keyfile}
    else:
        ssl_settings = {}
    
    main_logger = getLogger(__name__)
    main_logger.info("Proxying %s on %s:%i with a maximum of %i concurrent requests" %(
        arguments[0],
        options.interface,
        options.port,
        options.pool_size,
    ))
    
    server = WSGIServer((options.interface, options.port), throttler, log=sys.stdout, **ssl_settings)
    server.serve_forever()
示例#4
0
def main():
    setup_logging(enabled=True)
    if sys.stdin.isatty():
        run(sys.argv)
    else:
        argv = sys.stdin.read().split(' ')
        run(argv)
示例#5
0
文件: daemon.py 项目: quodt/etaui
def main(args=None, cb=sys.exit):
    try:
        parser = argparse.ArgumentParser(
            description="Monitoring daemon for ETA",
            formatter_class=argparse.ArgumentDefaultsHelpFormatter,
            )
        parser.add_argument('-d', '--database-path',
                            help="Path to database file",
                            default='var/eta.db')
        parser.add_argument('-t', '--tty',
                            help="TTY of connected ETA",
                            default='/dev/ttyUSB0')
        parser.add_argument('-l', '--level',
                            help="Log level",
                            default='INFO',
                            choices=levels)
        args = parser.parse_args(args)

        setup_logging(args.level)
        init_db(args.database_path)
        monitor = Monitor(args.tty)
        monitor.add_handler(create_entry)
        monitor.start()
    except KeyboardInterrupt:
        log.info("Exiting.")
        return
示例#6
0
def main():
    if not os.environ.get('BOT_TOKEN'):
        exit("ERROR: `BOT TOKEN` env variable is required.")
    setup_logging()
    logger.info('Starting scheduler.')
    scheduler.start()
    logger.info('Creating bot with given token.')
    logger.info('Registering all message handlers.')
    dispatcher.register_message_handler(start_ticket_info_survey,
                                        commands=['start'])
    dispatcher.register_message_handler(cancel_order_handler,
                                        commands=['cancel'],
                                        state='*')
    get_departure_station(dispatcher)
    get_departure_station_input(dispatcher)
    get_arrival_station(dispatcher)
    get_arrival_station_input(dispatcher)
    get_wagon_number(dispatcher)
    get_seats_number(dispatcher)
    get_date(dispatcher)
    get_final_confirmation(dispatcher)
    get_train_code(dispatcher)
    get_wagon_type(dispatcher)
    list_all_jobs(dispatcher)
    cancel_order_by_id(dispatcher)
    logger.info('Started bot polling.')
    executor.start_polling(dispatcher)
示例#7
0
def main():
    setup_logging()
    logging.info('Starting DB setup')
    setup_db()
    logging.info('Fetching EPC data')
    fetch_epc_data()
    logging.info('Writing EPC data to DB')
    write_data()
示例#8
0
def main():
    global MODEL
    parser = argparse.ArgumentParser(
        description="AI for Earth Land Cover Worker")

    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="Enable verbose debugging",
                        default=False)

    parser.add_argument("--port",
                        action="store",
                        type=int,
                        help="Port we are listenning on",
                        default=0)
    parser.add_argument("--model",
                        action="store",
                        dest="model",
                        choices=["keras_dense", "pytorch"],
                        help="Model to use",
                        required=True)
    parser.add_argument("--model_fn",
                        action="store",
                        dest="model_fn",
                        type=str,
                        help="Model fn to use",
                        default=None)
    parser.add_argument("--fine_tune_layer",
                        action="store",
                        dest="fine_tune_layer",
                        type=int,
                        help="Layer of model to fine tune",
                        default=-2)
    parser.add_argument("--fine_tune_seed_data_fn",
                        action="store",
                        dest="fine_tune_seed_data_fn",
                        type=str,
                        help="Path to npz containing seed data to use",
                        default=None)
    parser.add_argument("--gpu",
                        action="store",
                        dest="gpuid",
                        type=int,
                        help="GPU to use",
                        required=True)

    args = parser.parse_args(sys.argv[1:])

    # Setup logging
    log_path = os.getcwd() + "/logs"
    setup_logging(log_path, "worker")

    # Setup model
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = "" if args.gpuid is None else str(
        args.gpuid)
    os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
def load_cli_config(argv=None, config=None):
    if config is None:
        config = Config()
    config.argparse_loader = get_argparse_loader()
    try:
        cli_config = config.argparse_loader.load_config(argv)
    except ArgumentError as exc:
        config_runtime_exception(exc, config)
    setup_logging(**cli_config.LogConfig)
    ROOT_LOGGER.info("cli config is \n%s", pprint.pformat(cli_config))
    return cli_config
示例#10
0
def setLogFileName():
    try:
        if not os.path.exists(const.LOG_FOLDER_PATH):
            os.makedirs(const.LOG_FOLDER_PATH)
    except OSError as e:
        print(e)
    conf = {}
    conf['name'] = datetime.today().strftime('%Y-%m-%d') + '-log.log'
    conf['verbose'] = const.LOG_LEVEL
    conf['log_path'] = const.LOG_FOLDER_PATH
    conf['log_file'] = conf['log_path'] + conf['name']
    logpy.setup_logging(conf)
    log.info("change log file name to:" + conf['name'])
示例#11
0
文件: run.py 项目: nwilming/bifl
def main():
    setup_logging()
    for fn in sys.argv[1:] or ["input.png"]:
        im = cv.LoadImage(fn)
        fts = extract(im)

        pfn = fn + "-features.dat"
        info("Storing feature pickle in %s", pfn)
        dump(fts, file(pfn, "wb"))

        for l, layer in enumerate(fts):
            for fname, fval in layer.items():
                ffn = "%s-feat-%d-%s.png" % (fn, l, fname)
                info("Rendering feature %s", ffn)
                mat2pil(fval).save(ffn)
示例#12
0
def run():
    '''Main application entry point'''
    files_processed = False
    setup_log_dir()
    log.setup_logging()
    logging.info("Running load to schema %s", config.DB_STG_SCHEMA)
    input_files = get_input_files()
    try:
        if len(input_files) != config.EXPECTED_FILE_COUNT:
            raise ValueError('Not all expected extract files are present')

        process_file(input_files[0], clean_tables=True, complete=True)

        files_processed = True
    except Exception, ex:
        logging.error(ex.message, exc_info=True)
示例#13
0
def main():
    print('Started.')
    config = configparser.ConfigParser()
    config.read('config.cfg')

    image_num = int(config.get('DEFAULTS', 'image_num'))
    output_location = config.get('DEFAULTS', 'output_location')
    bot_name = config.get('DEFAULTS', 'bot')
    subreddit_name = config.get('DEFAULTS', 'subreddit')
    time_period = config.get('DEFAULTS', 'time')
    log_location = config.get('DEFAULTS', 'log_location')

    logger = log.setup_logging(log_location)
    start = datetime.now()
    logger.debug('Starting...')

    # Setup logging and establish bot connection with Reddit
    reddit = praw.Reddit(bot_name)
    subreddit = reddit.subreddit(subreddit_name)

    print('Getting images... please wait...')
    downloadimages.getImages(subreddit, time_period, image_num,
                             output_location)

    end = datetime.now()
    logger.debug('Completed. Time taken: %s', str(end - start))
    print('Completed! The images are stored at \'' + output_location + '\'')
示例#14
0
def load_config(argv=None, proto_config=None):
    """
    Successively merge config files from different sources, overriding the previous.

    Config merge order:
    - proto config          (config provided initially to load_config)
    - local config          (config file specified after analysing proto and cli config)
    - cli config            (command line arguments)
    """
    if proto_config is None:
        proto_config = Config()
    config = RichConfig()
    config.merge_source('proto', proto_config)
    setup_logging(**config.LogConfig)
    cli_config = load_cli_config(argv, config)
    for trait, group in [('config_path', 'BaseConfig'),
                         ('config_dir', 'BaseConfig'),
                         ('stream_log_level', 'LogConfig')]:
        if trait in getattr(cli_config, group):
            setattr(getattr(config, group), trait,
                    getattr(getattr(cli_config, group), trait))
    # if 'config_dir' in cli_config.BaseConfig:
    #     config.BaseConfig.config_dir = cli_config.BaseConfig.config_dir
    # if 'stream_log_level' in cli_config.LogConfig:
    #     config.LogConfig.stream_log_level = cli_config.LogConfig.stream_log_level
    # TODO: replace this wiht custom traitlet subclass "immediate" where settings are applied as soon as they are loaded
    # TODO: implement partial merge like this
    # config.partial_merge(
    #     cli_config, [
    #         ('BaseConfig', ['config_path', 'config_dir']),
    #         ('LogConfig', ['stream_log_level']),
    #     ]
    # )
    file_config = load_file_config(config)
    config.merge_source('file', file_config)
    config.merge_source('cli', cli_config)
    try:
        validate_config(config)
    except ConfigException as exc:
        config_runtime_exception(exc, config)
    if not config_quiet(config):
        ROOT_LOGGER.info("config is \n%s", pprint.pformat(config))
    return config
示例#15
0
def main ():

    log.setup_logging (conf.APP_NAME, False)

    (option, ignore) = parse_commandline ()

    if not conf.check_config ():
        sys.exit (1)

    logging.info ("%s startup by %s" % (conf.APP_NAME, sys.argv))

    if option.clean:
        crawl_clean ()
        return


    crawler = Crawler (testmode = option.test)
    crawler.start ()

    logging.info ("%s finish ..." % conf.APP_NAME)
示例#16
0
def main():
    setup_logging()
    logger.info('Start app')
    try:
        comic_info, filename = download_random_comic()
        logger.info(f'Downloaded comic: {filename}')

        upload_image_to_group_wall(access_token=CONFIG['VK_TOKEN'],
                                   group_id=CONFIG['GROUP_ID'],
                                   image_path=filename,
                                   image_comment=comic_info['alt'])
        logger.info('Comic uploaded to the group wall')

    except HTTPError:
        logger.warning('HTTP Error!')

    try:
        os.remove(filename)
        logger.info('Image removed')
    except:
        pass
示例#17
0
def main():
    logger = logging.getLogger('main')
    setup_logging()
    logger.info('Program started')

    args = parse_args()
    if len(sys.argv) <=1:
        logger.info('No arguments passed. Program can not be started')
        print('Please input at least one argument and run script again')
        return
    logger.info(f'Passed args: {args}')
    hubble_collection = args.hubble
    spacex_launch = args.spacex
    qty = args.qty
    instagram_timeout = args.instagram

    if hubble_collection:
        fetch_hubble_collection(hubble_collection, qty=qty)
    if spacex_launch:
        fetch_spacex_latest_launch(launch=spacex_launch, qty=qty)
    if instagram_timeout:
        upload_photos_to_instagram(timeout=instagram_timeout, qty=qty)
示例#18
0
def main():
    args = parse_args()
    if args.verbose:
        log.setup_logging(logging.DEBUG)
    else:
        log.setup_logging(logging.INFO)

    # Allow a redirect URI over plain HTTP (no TLS):
    os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = "1"

    # Authorize the app:
    s = onenote_auth.get_session(args.new_session)

    output_dir = pathlib.Path(args.output_dir)
    output_dir.mkdir(parents=True, exist_ok=True)
    logger.info('Writing to "%s"', output_dir)

    start_time = time.perf_counter()
    pipe = pipeline.Pipeline(s, args.notebook, output_dir)
    pages = 0
    try:
        for page_count, page in enumerate(
                onenote.get_notebook_pages(s, args.notebook), 1):
            log_msg = f'Page {page_count}: {page["title"]}'
            if args.start_page is None or page_count >= args.start_page:
                logger.info(log_msg)
                pipe.add_page(page)
                pages += 1
            else:
                logger.info(log_msg + ' [skipped]')
            if args.max_pages and page_count > args.max_pages:
                break
    except onenote.NotebookNotFound as e:
        logger.error(str(e))

    pipe.done()
    stop_time = time.perf_counter()
    logger.info('Done!')
    logger.info('%s pages in %.1f seconds', pages, stop_time - start_time)
示例#19
0
    def main(self):
        (self.options, self.args) = self.parser.parse_args()

        self._load_defaults_from_rc(self.options)

        # Setup logging, this must happen early!
        setup_logging(self.options.log_file, self.options.log_level)
        log.debug("Running cli commands: %s" % self.name)

        self._validate_options()

        if len(sys.argv) < 2:
            self.parser.error("Please enter at least 2 args")

        try:
            # connect to smugmug.com
            self.smugmug = Smugmug(self.options.login, self.options.password)

            # do the work
            self._do_command()
        except SmugmugException, e:
            print(e.value)
            sys.exit(1)
示例#20
0
    def main(self):
        (self.options, self.args) = self.parser.parse_args()

        self._load_defaults_from_rc(self.options)

        # Setup logging, this must happen early!
        setup_logging(self.options.log_file, self.options.log_level)
        log.debug("Running cli commands: %s", self.name)

        self._validate_options()

        if len(sys.argv) < 2:
            print(self.parser.error("Please enter at least 2 args"))

        try:
            # connect to smugmug.com
            self.smugmug = Smugmug(self.options.login, self.options.password)

            # do the work
            self._do_command()
        except SmugmugException, e:
            print(e.value)
            sys.exit(1)
示例#21
0
def main():
    setup_logging()
    logger = get_logger(log_bot_updater, CONFIG['CHAT_ID'], 'my_logger')
    logger.info('Bot started')

    # start polling dvmn API
    timestamp = None
    while True:
        try:
            lessons_info, timestamp = poll_dvmn_lesson_info(timestamp=timestamp)
        except ReadTimeout:
            continue
        except ConnectionError:
            logger.error('Connection Error')
        except HTTPError:
            logger.error('HTTPError')
        except Exception:
            logger.error('Unexpected exception!', exc_info=True)
        else:
            for lesson_info in lessons_info or []:
                logger.info(f'Get lesson info from dvmn.org :\n{lesson_info}')
                formated_info = format_lesson_info(lesson_info)
                updater.bot.send_message(chat_id=CONFIG['CHAT_ID'], text=formated_info)
示例#22
0
    def __init__(self,server=None,log_level=0,log=None):
        """
        Accepts keyword args for Server and log_level

        @type server: str
        @param server: Zabbix Server URL
        @type log_level: int
        @param log_level: Logging level for this class
        
        """
        self.zapi = ZabbixAPI(server=server,log_level=log_level)
        if log == None:
            self._log = setup_logging()
        else:
            self._log = log
示例#23
0
__version__ = "0.1.1-16.04"

import log, util, logging

log.setup_logging()

logger = logging.getLogger(__name__)


def main():
    if util.not_sudo():
        logger.critical("Restart script as root")

    logger.info("Installing packages...")
    if util.install_packages():
        logger.info("OK")
    else:
        logger.critical("Failed to install packages")

    logger.info("Applying sysctl parameters...")
    if util.setup_sysctl():
        logger.info("OK")
    else:
        logger.critical("Failed to apply sysctl parameters")

    logger.info("Creating random passwords...")
    if util.setup_passwords():
        logger.info("OK")
    else:
        logger.critical("Failed to create random passwords")
示例#24
0
import logging
from locust import HttpLocust, TaskSet, task, web
from flask import request, Response

from hubTasks import HubTasks, get_response_as_json
from hubUser import HubUser
from log import setup_logging
import utils

setup_logging('/mnt/log/batch.log')
logger = logging.getLogger(__name__)


class BatchUser(HubUser):
    def name(self):
        return "batch_test_"

    def start_channel(self, payload, tasks):
        payload["storage"] = "BATCH"

    def start_webhook(self, config):
        # First User - create channel - posts to channel, parallel group callback on channel
        # Second User - create channel - posts to channel, parallel group callback on channel
        # Third User - create channel - posts to channel, minute group callback on channel
        config['parallel'] = 10
        config['batch'] = "SINGLE"
        if config['number'] == 3:
            config['parallel'] = 1
            config['batch'] = "MINUTE"

示例#25
0
from log import setup_logging
import logging
logger  = setup_logging()

def b():
    logger.info("b")
示例#26
0
文件: large.py 项目: flightstats/hub
import json
import logging
import os
import random
import string
from locust import HttpLocust, TaskSet, task, web
from flask import request, Response

from hubTasks import HubTasks
from hubUser import HubUser
from log import setup_logging
import utils

setup_logging('/mnt/log/large.log')
logger = logging.getLogger(__name__)


class LargeUser(HubUser):
    def name(self):
        return "large_test_"

    def start_channel(self, payload, tasks):
        pass

    def start_webhook(self, config):
        if config['number'] == 1:
            config['webhook_channel'] = config['channel'] + "_replicated"
            url = "/channel/" + config['webhook_channel']
            headers = {"Content-Type": "application/json"}
            channel_config = {
                "name": config['webhook_channel'],
示例#27
0
文件: main.py 项目: kdoetz/locust
def main():
    parser, options, arguments = parse_options()
    #print "Options:", options, dir(options)
    #print "Arguments:", arguments
    #print "largs:", parser.largs
    #print "rargs:", parser.rargs
    
    # setup logging
    setup_logging(options.loglevel, options.logfile)
    logger = logging.getLogger(__name__)
    
    if options.show_version:
        print "Locust %s" % (version)
        sys.exit(0)

    locustfile = find_locustfile(options.locustfile)
    if not locustfile:
        logger.error("Could not find any locustfile! See --help for available options.")
        sys.exit(1)

    docstring, locusts = load_locustfile(locustfile)

    if options.list_commands:
        print "Available Locusts:"
        for name in locusts:
            print "    " + name
        sys.exit(0)

    if not locusts:
        logger.error("No Locust class found!")
        sys.exit(1)

    # make sure specified Locust exists
    if arguments:
        missing = set(arguments) - set(locusts.keys())
        if missing:
            logger.error("Unknown Locust(s): %s\n" % (", ".join(missing)))
            sys.exit(1)
        else:
            names = set(arguments) & set(locusts.keys())
            locust_classes = [locusts[n] for n in names]
    else:
        locust_classes = locusts.values()
    
    if options.show_task_ratio:
        print "\n Task ratio per locust class"
        print "-" * 80
        inspectlocust.print_task_ratio(locust_classes)
        print "\n Total task ratio"
        print "-" * 80
        inspectlocust.print_task_ratio(locust_classes, total=True)
        sys.exit(0)
    
    if options.show_task_ratio_confluence:
        print "\nh1. Task ratio per locust class"
        print
        inspectlocust.print_task_ratio_confluence(locust_classes)
        print "\nh1. Total task ratio"
        print
        inspectlocust.print_task_ratio_confluence(locust_classes, total=True)
        sys.exit(0)
    
    # if --master is set, implicitly set --web
    if options.master:
        options.web = True

    if options.web and not options.slave:
        # spawn web greenlet
        logger.info("Starting web monitor on port 8089")
        main_greenlet = gevent.spawn(web.start, locust_classes, options.hatch_rate, options.num_clients, options.num_requests, options.ramp)
    
    # enable/disable gzip in WebLocust's HTTP client
    WebLocust.gzip = options.gzip

    if not options.master and not options.slave:
        core.locust_runner = LocalLocustRunner(locust_classes, options.hatch_rate, options.num_clients, options.num_requests, options.host)
        # spawn client spawning/hatching greenlet
        if not options.web:
            core.locust_runner.start_hatching(wait=True)
            main_greenlet = core.locust_runner.greenlet
    elif options.master:
        core.locust_runner = MasterLocustRunner(locust_classes, options.hatch_rate, options.num_clients, num_requests=options.num_requests, host=options.host, master_host=options.master_host)
    elif options.slave:
        core.locust_runner = SlaveLocustRunner(locust_classes, options.hatch_rate, options.num_clients, num_requests=options.num_requests, host=options.host, master_host=options.master_host)
        main_greenlet = core.locust_runner.greenlet
    
    if options.ramp:
        import rampstats
        from rampstats import on_request_success, on_report_to_master, on_slave_report
        import events
        if options.slave:
            events.report_to_master += on_report_to_master
        if options.master:
            events.slave_report += on_slave_report
        else:
            events.request_success += on_request_success
    
    if options.print_stats or (not options.web and not options.slave):
        # spawn stats printing greenlet
        gevent.spawn(stats_printer)
    
    try:
        logger.info("Starting Locust %s" % version)
        main_greenlet.join()
    except KeyboardInterrupt, e:
        time.sleep(0.2)
        print_stats(core.locust_runner.request_stats)
        print_percentile_stats(core.locust_runner.request_stats)
        print_error_report()
        logger.info("Got KeyboardInterrupt. Exiting, bye..")
示例#28
0
def main():
    parser, options, arguments = parse_options()

    # setup logging
    setup_logging(options.loglevel, options.logfile)
    logger = logging.getLogger(__name__)

    if options.show_version:
        print "Locust %s" % (version, )
        sys.exit(0)

    locustfile = find_locustfile(options.locustfile)
    if not locustfile:
        logger.error(
            "Could not find any locustfile! Ensure file ends in '.py' and see --help for available options."
        )
        sys.exit(1)

    docstring, locusts = load_locustfile(locustfile)

    if options.list_commands:
        console_logger.info("Available Locusts:")
        for name in locusts:
            console_logger.info("    " + name)
        sys.exit(0)

    if not locusts:
        logger.error("No Locust class found!")
        sys.exit(1)

    # make sure specified Locust exists
    if arguments:
        missing = set(arguments) - set(locusts.keys())
        if missing:
            logger.error("Unknown Locust(s): %s\n" % (", ".join(missing)))
            sys.exit(1)
        else:
            names = set(arguments) & set(locusts.keys())
            locust_classes = [locusts[n] for n in names]
    else:
        locust_classes = locusts.values()

    if options.show_task_ratio:
        console_logger.info("\n Task ratio per locust class")
        console_logger.info("-" * 80)
        print_task_ratio(locust_classes)
        console_logger.info("\n Total task ratio")
        console_logger.info("-" * 80)
        print_task_ratio(locust_classes, total=True)
        sys.exit(0)
    if options.show_task_ratio_json:
        from json import dumps
        task_data = {
            "per_class": get_task_ratio_dict(locust_classes),
            "total": get_task_ratio_dict(locust_classes, total=True)
        }
        console_logger.info(dumps(task_data))
        sys.exit(0)

    # if --master is set, make sure --no-web isn't set
    if options.master and options.no_web:
        logger.error(
            "Locust can not run distributed with the web interface disabled (do not use --no-web and --master together)"
        )
        sys.exit(0)

    if not options.no_web and not options.slave:
        # spawn web greenlet
        logger.info("Starting web monitor at %s:%s" %
                    (options.web_host or "*", options.port))
        main_greenlet = gevent.spawn(web.start, locust_classes, options)

    if not options.master and not options.slave:
        runners.locust_runner = LocalLocustRunner(locust_classes, options)
        # spawn client spawning/hatching greenlet
        if options.no_web:
            runners.locust_runner.start_hatching(wait=True)
            main_greenlet = runners.locust_runner.greenlet
    elif options.master:
        runners.locust_runner = MasterLocustRunner(locust_classes, options)
    elif options.slave:
        try:
            runners.locust_runner = SlaveLocustRunner(locust_classes, options)
            main_greenlet = runners.locust_runner.greenlet
        except socket.error, e:
            logger.error("Failed to connect to the Locust master: %s", e)
            sys.exit(-1)
示例#29
0
import logging
from flask import Flask

from log import setup_logging
setup_logging(logging.DEBUG)

import version
import db

# last import
import api

app = Flask(__name__)
app.register_blueprint(api.blueprint, url_prefix="/api/1.0")

if __name__ == "__main__":
    from werkzeug import SharedDataMiddleware
    import os

    db.generate_fake_data()

    app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {
      '/': os.path.join(os.path.dirname(__file__), 'static')
    })

    # http://werkzeug.pocoo.org/docs/serving/#werkzeug.serving.run_simple
    app.run(debug=True,
            threaded=True,
            use_reloader=True,
            reloader_interval=2)
示例#30
0
def main():
    parser, options, arguments = parse_options()
    #print "Options:", options, dir(options)
    #print "Arguments:", arguments
    #print "largs:", parser.largs
    #print "rargs:", parser.rargs
    
    # setup logging
    setup_logging(options.loglevel, options.logfile)
    logger = logging.getLogger(__name__)
    
    if options.show_version:
        print "Locust %s" % (version)
        sys.exit(0)

    locustfile = find_locustfile(options.locustfile)
    if not locustfile:
        logger.error("Could not find any locustfile! See --help for available options.")
        sys.exit(1)

    docstring, locusts = load_locustfile(locustfile)

    if options.list_commands:
        print "Available Locusts:"
        for name in locusts:
            print "    " + name
        sys.exit(0)

    if not locusts:
        logger.error("No Locust class found!")
        sys.exit(1)

    # make sure specified Locust exists
    if arguments:
        missing = set(arguments) - set(locusts.keys())
        if missing:
            logger.error("Unknown Locust(s): %s\n" % (", ".join(missing)))
            sys.exit(1)
        else:
            names = set(arguments) & set(locusts.keys())
            locust_classes = [locusts[n] for n in names]
    else:
        locust_classes = locusts.values()
    
    if options.show_task_ratio:
        console_logger.info("\n Task ratio per locust class")
        console_logger.info( "-" * 80)
        print_task_ratio(locust_classes)
        console_logger.info("\n Total task ratio")
        console_logger.info("-" * 80)
        print_task_ratio(locust_classes, total=True)
        sys.exit(0)
    if options.show_task_ratio_json:
        from json import dumps
        task_data = {
            "per_class": get_task_ratio_dict(locust_classes), 
            "total": get_task_ratio_dict(locust_classes, total=True)
        }
        console_logger.info(dumps(task_data))
        sys.exit(0)
    
    # if --master is set, make sure --no-web isn't set
    if options.master and options.no_web:
        logger.error("Locust can not run distributed with the web interface disabled (do not use --no-web and --master together)")
        sys.exit(0)

    if not options.no_web and not options.slave:
        # spawn web greenlet
        logger.info("Starting web monitor on port 8089")
        main_greenlet = gevent.spawn(web.start, locust_classes, options.hatch_rate, options.num_clients, options.num_requests, options.ramp)
    
    # enable/disable gzip in WebLocust's HTTP client
    WebLocust.gzip = options.gzip

    if not options.master and not options.slave:
        runners.locust_runner = LocalLocustRunner(locust_classes, options.hatch_rate, options.num_clients, options.num_requests, options.host)
        # spawn client spawning/hatching greenlet
        if options.no_web:
            runners.locust_runner.start_hatching(wait=True)
            main_greenlet = runners.locust_runner.greenlet
    elif options.master:
        runners.locust_runner = MasterLocustRunner(locust_classes, options.hatch_rate, options.num_clients, num_requests=options.num_requests, host=options.host, master_host=options.master_host)
    elif options.slave:
        runners.locust_runner = SlaveLocustRunner(locust_classes, options.hatch_rate, options.num_clients, num_requests=options.num_requests, host=options.host, master_host=options.master_host)
        main_greenlet = runners.locust_runner.greenlet
    
    if options.print_stats or (options.no_web and not options.slave):
        # spawn stats printing greenlet
        gevent.spawn(stats_printer)
    
    try:
        logger.info("Starting Locust %s" % version)
        main_greenlet.join()
    except KeyboardInterrupt, e:
        events.quitting.fire()
        time.sleep(0.2)
        print_stats(runners.locust_runner.request_stats)
        print_percentile_stats(runners.locust_runner.request_stats)
        print_error_report()
        logger.info("Got KeyboardInterrupt. Exiting, bye..")
示例#31
0
def main(config_file=None, logging_params=DEFAULT_LOGGING):

    # Setup logging
    log.setup_logging(logging_params)
    mlog = log.get_logger(__name__)

    # Set config
    config = DEFAULTS.deepcopy()
    if config_file is not None:
        config.merge(NameSpace(load_yaml_config(config_file)))

    # Set niceness
    current_niceness = os.nice(0)
    os.nice(config.niceness - current_niceness)
    mlog.info('Changing process niceness from %d to %d.  Confirm:  %d' %
              (current_niceness, config.niceness, os.nice(0)))

    # Find acquisition files
    acq_files = sorted(glob(os.path.join(config.data_dir, config.acq, "*.h5")))
    nfiles = len(acq_files)

    # Determine time range of each file
    findex = []
    tindex = []
    for ii, filename in enumerate(acq_files):
        subdata = andata.CorrData.from_acq_h5(filename, datasets=())

        findex += [ii] * subdata.ntime
        tindex += range(subdata.ntime)

    findex = np.array(findex)
    tindex = np.array(tindex)

    # Determine transits within these files
    transits = []

    data = andata.CorrData.from_acq_h5(acq_files, datasets=())

    solar_rise = ephemeris.solar_rising(data.time[0] - 24.0 * 3600.0,
                                        end_time=data.time[-1])

    for rr in solar_rise:

        ss = ephemeris.solar_setting(rr)[0]

        solar_flag = np.flatnonzero((data.time >= rr) & (data.time <= ss))

        if solar_flag.size > 0:

            solar_flag = solar_flag[::config.downsample]

            tval = data.time[solar_flag]

            this_findex = findex[solar_flag]
            this_tindex = tindex[solar_flag]

            file_list, tindices = [], []

            for ii in range(nfiles):

                this_file = np.flatnonzero(this_findex == ii)

                if this_file.size > 0:

                    file_list.append(acq_files[ii])
                    tindices.append(this_tindex[this_file])

            date = ephemeris.unix_to_datetime(rr).strftime('%Y%m%dT%H%M%SZ')
            transits.append((date, tval, file_list, tindices))

    # Create file prefix and suffix
    prefix = []

    prefix.append("redundant_calibration")

    if config.output_prefix is not None:
        prefix.append(config.output_prefix)

    prefix = '_'.join(prefix)

    suffix = []

    if config.include_auto:
        suffix.append("wauto")
    else:
        suffix.append("noauto")

    if config.include_intracyl:
        suffix.append("wintra")
    else:
        suffix.append("nointra")

    if config.fix_degen:
        suffix.append("fixed_degen")
    else:
        suffix.append("degen")

    suffix = '_'.join(suffix)

    # Loop over solar transits
    for date, timestamps, files, time_indices in transits:

        nfiles = len(files)

        mlog.info("%s (%d files) " % (date, nfiles))

        output_file = os.path.join(config.output_dir,
                                   "%s_SUN_%s_%s.h5" % (prefix, date, suffix))

        mlog.info("Saving to:  %s" % output_file)

        # Get info about this set of files
        data = andata.CorrData.from_acq_h5(files,
                                           datasets=['flags/inputs'],
                                           apply_gain=False,
                                           renormalize=False)

        coord = sun_coord(timestamps, deg=True)

        fstart = config.freq_start if config.freq_start is not None else 0
        fstop = config.freq_stop if config.freq_stop is not None else data.freq.size
        freq_index = range(fstart, fstop)

        freq = data.freq[freq_index]

        ntime = timestamps.size
        nfreq = freq.size

        # Determind bad inputs
        if config.bad_input_file is None or not os.path.isfile(
                config.bad_input_file):
            bad_input = np.flatnonzero(
                ~np.all(data.flags['inputs'][:], axis=-1))
        else:
            with open(config.bad_input_file, 'r') as handler:
                bad_input = pickle.load(handler)

        mlog.info("%d inputs flagged as bad." % bad_input.size)

        nant = data.ninput

        # Determine polarization product maps
        dbinputs = tools.get_correlator_inputs(ephemeris.unix_to_datetime(
            timestamps[0]),
                                               correlator='chime')

        dbinputs = tools.reorder_correlator_inputs(data.input, dbinputs)

        feedpos = tools.get_feed_positions(dbinputs)

        prod = defaultdict(list)
        dist = defaultdict(list)

        for pp, this_prod in enumerate(data.prod):

            aa, bb = this_prod
            inp_aa = dbinputs[aa]
            inp_bb = dbinputs[bb]

            if (aa in bad_input) or (bb in bad_input):
                continue

            if not tools.is_chime(inp_aa) or not tools.is_chime(inp_bb):
                continue

            if not config.include_intracyl and (inp_aa.cyl == inp_bb.cyl):
                continue

            if not config.include_auto and (aa == bb):
                continue

            this_dist = list(feedpos[aa, :] - feedpos[bb, :])

            if tools.is_array_x(inp_aa) and tools.is_array_x(inp_bb):
                key = 'XX'

            elif tools.is_array_y(inp_aa) and tools.is_array_y(inp_bb):
                key = 'YY'

            elif not config.include_crosspol:
                continue

            elif tools.is_array_x(inp_aa) and tools.is_array_y(inp_bb):
                key = 'XY'

            elif tools.is_array_y(inp_aa) and tools.is_array_x(inp_bb):
                key = 'YX'

            else:
                raise RuntimeError("CHIME feeds not polarized.")

            prod[key].append(pp)
            dist[key].append(this_dist)

        polstr = sorted(prod.keys())
        polcnt = 0
        pol_sky_id = []
        bmap = {}
        for key in polstr:
            prod[key] = np.array(prod[key])
            dist[key] = np.array(dist[key])

            p_bmap, p_ubaseline = generate_mapping(dist[key])
            nubase = p_ubaseline.shape[0]

            bmap[key] = p_bmap + polcnt

            if polcnt > 0:

                ubaseline = np.concatenate((ubaseline, p_ubaseline), axis=0)
                pol_sky_id += [key] * nubase

            else:

                ubaseline = p_ubaseline.copy()
                pol_sky_id = [key] * nubase

            polcnt += nubase
            mlog.info("%d unique baselines" % polcnt)

        nsky = ubaseline.shape[0]

        # Create arrays to hold the results
        ores = {}
        ores['freq'] = freq
        ores['input'] = data.input
        ores['time'] = timestamps
        ores['coord'] = coord
        ores['pol'] = np.array(pol_sky_id)
        ores['baseline'] = ubaseline

        # Create array to hold gain results
        ores['gain'] = np.zeros((nfreq, nant, ntime), dtype=np.complex)
        ores['sky'] = np.zeros((nfreq, nsky, ntime), dtype=np.complex)
        ores['err'] = np.zeros((nfreq, nant + nsky, ntime, 2), dtype=np.float)

        # Loop over polarisations
        for key in polstr:

            reverse_map = bmap[key]
            p_prod = prod[key]

            isort = np.argsort(reverse_map)

            p_prod = p_prod[isort]

            p_ant1 = data.prod['input_a'][p_prod]
            p_ant2 = data.prod['input_b'][p_prod]
            p_vismap = reverse_map[isort]

            # Find the redundant groups
            tmp = np.where(np.diff(p_vismap) != 0)[0]
            edges = np.zeros(2 + tmp.size, dtype='int')
            edges[0] = 0
            edges[1:-1] = tmp + 1
            edges[-1] = p_vismap.size

            kept_base = np.unique(p_vismap)

            # Determine the unique antennas
            kept_ants = np.unique(np.concatenate([p_ant1, p_ant2]))
            antmap = np.zeros(kept_ants.max() + 1, dtype='int') - 1

            p_nant = kept_ants.size
            for i in range(p_nant):
                antmap[kept_ants[i]] = i

            p_ant1_use = antmap[p_ant1].copy()
            p_ant2_use = antmap[p_ant2].copy()

            # Create matrix
            p_nvis = p_prod.size
            nred = edges.size - 1

            npar = p_nant + nred

            A = np.zeros((p_nvis, npar), dtype=np.float32)
            B = np.zeros((p_nvis, npar), dtype=np.float32)

            for kk in range(p_nant):

                flag_ant1 = p_ant1_use == kk
                if np.any(flag_ant1):
                    A[flag_ant1, kk] = 1.0
                    B[flag_ant1, kk] = 1.0

                flag_ant2 = p_ant2_use == kk
                if np.any(flag_ant2):
                    A[flag_ant2, kk] = 1.0
                    B[flag_ant2, kk] = -1.0

            for ee in range(nred):

                A[edges[ee]:edges[ee + 1], p_nant + ee] = 1.0

                B[edges[ee]:edges[ee + 1], p_nant + ee] = 1.0

            # Add equations to break degeneracy
            if config.fix_degen:
                A = np.concatenate((A, np.zeros((1, npar), dtype=np.float32)))
                A[-1, 0:p_nant] = 1.0

                B = np.concatenate((B, np.zeros((3, npar), dtype=np.float32)))
                B[-3, 0:p_nant] = 1.0
                B[-2, 0:p_nant] = feedpos[kept_ants, 0]
                B[-1, 0:p_nant] = feedpos[kept_ants, 1]

            # Loop over frequencies
            for ff, find in enumerate(freq_index):

                mlog.info("Freq %d of %d.  %0.2f MHz." %
                          (ff + 1, nfreq, freq[ff]))

                cnt = 0

                # Loop over files
                for ii, (filename, tind) in enumerate(zip(files,
                                                          time_indices)):

                    ntind = len(tind)
                    mlog.info("Processing file %s (%d time samples)" %
                              (filename, ntind))

                    # Compute noise weight
                    with h5py.File(filename, 'r') as hf:
                        wnoise = np.median(hf['flags/vis_weight'][find, :, :],
                                           axis=-1)

                    # Loop over times
                    for tt in tind:

                        t0 = time.time()

                        mlog.info("Time %d of %d.  %d index of current file." %
                                  (cnt + 1, ntime, tt))

                        # Load visibilities
                        with h5py.File(filename, 'r') as hf:

                            snap = hf['vis'][find, :, tt]
                            wsnap = wnoise * (
                                (hf['flags/vis_weight'][find, :, tt] > 0.0) &
                                (np.abs(snap) > 0.0)).astype(np.float32)

                        # Extract relevant products for this polarization
                        snap = snap[p_prod]
                        wsnap = wsnap[p_prod]

                        # Turn into amplitude and phase, avoiding NaN
                        mask = (wsnap > 0.0)

                        amp = np.where(mask, np.log(np.abs(snap)), 0.0)
                        phi = np.where(mask, np.angle(snap), 0.0)

                        # Deal with phase wrapping
                        for aa, bb in zip(edges[:-1], edges[1:]):
                            dphi = phi[aa:bb] - np.sort(phi[aa:bb])[int(
                                (bb - aa) / 2)]
                            phi[aa:bb] += (2.0 * np.pi * (dphi < -np.pi) -
                                           2.0 * np.pi * (dphi > np.pi))

                        # Add elements to fix degeneracy
                        if config.fix_degen:
                            amp = np.concatenate((amp, np.zeros(1)))
                            phi = np.concatenate((phi, np.zeros(3)))

                        # Determine noise matrix
                        inv_diagC = wsnap * np.abs(snap)**2 * 2.0

                        if config.fix_degen:
                            inv_diagC = np.concatenate((inv_diagC, np.ones(1)))

                        # Amplitude estimate and covariance
                        amp_param_cov = np.linalg.inv(
                            np.dot(A.T, inv_diagC[:, np.newaxis] * A))
                        amp_param = np.dot(amp_param_cov,
                                           np.dot(A.T, inv_diagC * amp))

                        # Phase estimate and covariance
                        if config.fix_degen:
                            inv_diagC = np.concatenate((inv_diagC, np.ones(2)))

                        phi_param_cov = np.linalg.inv(
                            np.dot(B.T, inv_diagC[:, np.newaxis] * B))
                        phi_param = np.dot(phi_param_cov,
                                           np.dot(B.T, inv_diagC * phi))

                        # Save to large array
                        ores['gain'][ff, kept_ants,
                                     cnt] = np.exp(amp_param[0:p_nant] +
                                                   1.0J * phi_param[0:p_nant])

                        ores['sky'][ff, kept_base,
                                    cnt] = np.exp(amp_param[p_nant:] +
                                                  1.0J * phi_param[p_nant:])

                        ores['err'][ff, kept_ants, cnt,
                                    0] = np.diag(amp_param_cov[0:p_nant,
                                                               0:p_nant])
                        ores['err'][ff, nant + kept_base, cnt,
                                    0] = np.diag(amp_param_cov[p_nant:,
                                                               p_nant:])

                        ores['err'][ff, kept_ants, cnt,
                                    1] = np.diag(phi_param_cov[0:p_nant,
                                                               0:p_nant])
                        ores['err'][ff, nant + kept_base, cnt,
                                    1] = np.diag(phi_param_cov[p_nant:,
                                                               p_nant:])

                        # Increment time counter
                        cnt += 1

                        # Print time elapsed
                        mlog.info("Took %0.1f seconds." % (time.time() - t0, ))

        # Save to pickle file
        with h5py.File(output_file, 'w') as handler:

            handler.attrs['date'] = date

            for key, val in ores.iteritems():
                handler.create_dataset(key, data=val)
示例#32
0
def main(config_file=None, logging_params=DEFAULT_LOGGING):

    # Setup logging
    log.setup_logging(logging_params)
    mlog = log.get_logger(__name__)

    # Set config
    config = DEFAULTS.deepcopy()
    if config_file is not None:
        config.merge(NameSpace(load_yaml_config(config_file)))

    # Set niceness
    current_niceness = os.nice(0)
    os.nice(config.niceness - current_niceness)
    mlog.info('Changing process niceness from %d to %d.  Confirm:  %d' %
              (current_niceness, config.niceness, os.nice(0)))

    # Find acquisition files
    acq_files = sorted(glob(os.path.join(config.data_dir, config.acq, "*.h5")))
    nfiles = len(acq_files)

    # Determine time range of each file
    findex = []
    tindex = []
    for ii, filename in enumerate(acq_files):
        subdata = andata.CorrData.from_acq_h5(filename, datasets=())

        findex += [ii] * subdata.ntime
        tindex += range(subdata.ntime)

    findex = np.array(findex)
    tindex = np.array(tindex)

    # Determine transits within these files
    transits = []

    data = andata.CorrData.from_acq_h5(acq_files, datasets=())

    solar_rise = ephemeris.solar_rising(data.time[0] - 24.0 * 3600.0,
                                        end_time=data.time[-1])

    for rr in solar_rise:

        ss = ephemeris.solar_setting(rr)[0]

        solar_flag = np.flatnonzero((data.time >= rr) & (data.time <= ss))

        if solar_flag.size > 0:

            solar_flag = solar_flag[::config.downsample]

            tval = data.time[solar_flag]

            this_findex = findex[solar_flag]
            this_tindex = tindex[solar_flag]

            file_list, tindices = [], []

            for ii in range(nfiles):

                this_file = np.flatnonzero(this_findex == ii)

                if this_file.size > 0:

                    file_list.append(acq_files[ii])
                    tindices.append(this_tindex[this_file])

            date = ephemeris.unix_to_datetime(rr).strftime('%Y%m%dT%H%M%SZ')
            transits.append((date, tval, file_list, tindices))

    # Specify some parameters for algorithm
    N = 2048

    noffset = len(config.offsets)

    if config.sep_pol:
        rank = 1
        cross_pol = False
        pol = np.array(['S', 'E'])
        pol_s = np.array(
            [rr + 256 * xx for xx in range(0, 8, 2) for rr in range(256)])
        pol_e = np.array(
            [rr + 256 * xx for xx in range(1, 8, 2) for rr in range(256)])
        prod_ss = []
        prod_ee = []
    else:
        rank = 8
        cross_pol = config.cross_pol
        pol = np.array(['all'])

    npol = pol.size

    # Create file prefix and suffix
    prefix = []

    prefix.append("gain_solutions")

    if config.output_prefix is not None:
        prefix.append(config.output_prefix)

    prefix = '_'.join(prefix)

    suffix = []

    suffix.append("pol_%s" % '_'.join(pol))

    suffix.append("niter_%d" % config.niter)

    if cross_pol:
        suffix.append("zerocross")
    else:
        suffix.append("keepcross")

    if config.normalize:
        suffix.append("normed")
    else:
        suffix.append("notnormed")

    suffix = '_'.join(suffix)

    # Loop over solar transits
    for date, timestamps, files, time_indices in transits:

        nfiles = len(files)

        mlog.info("%s (%d files) " % (date, nfiles))

        output_file = os.path.join(
            config.output_dir, "%s_SUN_%s_%s.pickle" % (prefix, date, suffix))

        mlog.info("Saving to:  %s" % output_file)

        # Get info about this set of files
        data = andata.CorrData.from_acq_h5(files, datasets=['flags/inputs'])

        prod = data.prod

        coord = sun_coord(timestamps, deg=True)

        fstart = config.freq_start if config.freq_start is not None else 0
        fstop = config.freq_stop if config.freq_stop is not None else data.freq.size
        freq_index = range(fstart, fstop)

        freq = data.freq[freq_index]

        ntime = timestamps.size
        nfreq = freq.size

        # Determind bad inputs
        if config.bad_input_file is None or not os.path.isfile(
                config.bad_input_file):
            bad_input = np.flatnonzero(
                ~np.all(data.flags['inputs'][:], axis=-1))
        else:
            with open(config.bad_input_file, 'r') as handler:
                bad_input = pickle.load(handler)

        mlog.info("%d inputs flagged as bad." % bad_input.size)
        bad_prod = np.array([
            ii for ii, pp in enumerate(prod)
            if (pp[0] in bad_input) or (pp[1] in bad_input)
        ])

        # Create arrays to hold the results
        ores = {}
        ores['date'] = date
        ores['coord'] = coord
        ores['time'] = timestamps
        ores['freq'] = freq
        ores['offsets'] = config.offsets
        ores['pol'] = pol

        ores['evalue'] = np.zeros((noffset, nfreq, ntime, N), dtype=np.float32)
        ores['resp'] = np.zeros((noffset, nfreq, ntime, N, config.neigen),
                                dtype=np.complex64)
        ores['resp_err'] = np.zeros((noffset, nfreq, ntime, N, config.neigen),
                                    dtype=np.float32)

        # Loop over frequencies
        for ff, find in enumerate(freq_index):

            mlog.info("Freq %d of %d.  %0.2f MHz." % (ff + 1, nfreq, freq[ff]))

            cnt = 0

            # Loop over files
            for ii, (filename, tind) in enumerate(zip(files, time_indices)):

                ntind = len(tind)
                mlog.info("Processing file %s (%d time samples)" %
                          (filename, ntind))

                # Loop over times
                for tt in tind:

                    t0 = time.time()

                    mlog.info("Time %d of %d.  %d index of current file." %
                              (cnt + 1, ntime, tt))

                    # Load visibilities
                    with h5py.File(filename, 'r') as hf:

                        vis = hf['vis'][find, :, tt]

                    # Set bad products equal to zero
                    vis[bad_prod] = 0.0

                    # Different code if we are separating polarisations
                    if config.sep_pol:

                        if not any(prod_ss):

                            for pind, pp in enumerate(prod):
                                if (pp[0] in pol_s) and (pp[1] in pol_s):
                                    prod_ss.append(pind)

                                elif (pp[0] in pol_e) and (pp[1] in pol_e):
                                    prod_ee.append(pind)

                            prod_ss = np.array(prod_ss)
                            prod_ee = np.array(prod_ee)

                            mlog.info("Product sizes: %d, %d" %
                                      (prod_ss.size, prod_ee.size))

                        # Loop over polarisations
                        for pp, (input_pol,
                                 prod_pol) in enumerate([(pol_s, prod_ss),
                                                         (pol_e, prod_ee)]):

                            visp = vis[prod_pol]

                            mlog.info("pol %s, visibility size:  %d" %
                                      (pol[pp], visp.size))

                            # Loop over offsets
                            for oo, off in enumerate(config.offsets):

                                mlog.info(
                                    "pol %s, rank %d, niter %d, offset %d, cross_pol %s, neigen %d"
                                    % (pol[pp], rank, config.niter, off,
                                       cross_pol, config.neigen))

                                ev, rr, rre = solve_gain(
                                    visp,
                                    cutoff=off,
                                    cross_pol=cross_pol,
                                    normalize=config.normalize,
                                    rank=rank,
                                    niter=config.niter,
                                    neigen=config.neigen)

                                ores['evalue'][oo, ff, cnt, input_pol] = ev
                                ores['resp'][oo, ff, cnt, input_pol, :] = rr
                                ores['resp_err'][oo, ff, cnt,
                                                 input_pol, :] = rre

                    else:

                        # Loop over offsets
                        for oo, off in enumerate(config.offsets):

                            mlog.info(
                                "rank %d, niter %d, offset %d, cross_pol %s, neigen %d"
                                % (rank, config.niter, off, cross_pol,
                                   config.neigen))

                            ev, rr, rre = solve_gain(
                                vis,
                                cutoff=off,
                                cross_pol=cross_pol,
                                normalize=config.normalize,
                                rank=rank,
                                niter=config.niter,
                                neigen=config.neigen)

                            ores['evalue'][oo, ff, cnt, :] = ev
                            ores['resp'][oo, ff, cnt, :, :] = rr
                            ores['resp_err'][oo, ff, cnt, :, :] = rre

                    # Increment time counter
                    cnt += 1

                    # Print time elapsed
                    mlog.info("Took %0.1f seconds." % (time.time() - t0, ))

        # Save to pickle file
        with open(output_file, 'w') as handle:

            pickle.dump(ores, handle)
示例#33
0
def cli(debug):
    log.setup_logging(level=debug and logging.DEBUG or logging.ERROR)
示例#34
0
def main():
    parser, options, arguments = parse_options()

    # setup logging
    setup_logging(options.loglevel, options.logfile)
    logger = logging.getLogger(__name__)
    
    if options.show_version:
        print "Locust %s" % (version,)
        sys.exit(0)

    if os.path.isdir(options.locustfile):
        all_locustfiles = collect_locustfiles(options.locustfile)
    else:
        locustfile = find_locustfile(options.locustfile)
        if not locustfile:
            logger.error("Could not find any locustfile! Ensure file ends in '.py' and see --help for available options.")
            sys.exit(1)

        all_locustfiles = load_locustfile(locustfile)

    logger.info("All available locustfiles: {}".format(all_locustfiles))

    # Use the first locustfile for the default locusts
    locusts = all_locustfiles.values()[0]

    if options.list_commands:
        console_logger.info("Available Locusts:")
        for name in locusts:
            console_logger.info("    " + name)
        sys.exit(0)

    if not locusts:
        logger.error("No Locust class found!")
        sys.exit(1)

    # make sure specified Locust exists
    if arguments:
        missing = set(arguments) - set(locusts.keys())
        if missing:
            logger.error("Unknown Locust(s): %s\n" % (", ".join(missing)))
            sys.exit(1)
        else:
            names = set(arguments) & set(locusts.keys())
            locust_classes = [locusts[n] for n in names]
    else:
        locust_classes = locusts.values()
    
    if options.show_task_ratio:
        console_logger.info("\n Task ratio per locust class")
        console_logger.info( "-" * 80)
        print_task_ratio(locust_classes)
        console_logger.info("\n Total task ratio")
        console_logger.info("-" * 80)
        print_task_ratio(locust_classes, total=True)
        sys.exit(0)
    if options.show_task_ratio_json:
        from json import dumps
        task_data = {
            "per_class": get_task_ratio_dict(locust_classes), 
            "total": get_task_ratio_dict(locust_classes, total=True)
        }
        console_logger.info(dumps(task_data))
        sys.exit(0)

    if options.master and options.no_web and not options.min_slaves:
        logger.error("When running --master and --no-web, you must specify --min-slaves to be available before starting to swarm")
        sys.exit(1)

    if options.master and options.no_web and not (options.timeout or options.num_requests):
        logger.error("When running --master and --no-web, you must specify either --num-request or --timeout to tell the slaves when to stop running each locustfile")
        sys.exit(1)

    if not options.no_web and not options.slave:
        # spawn web greenlet
        logger.info("Starting web monitor at %s:%s" % (options.web_host or "*", options.port))
        main_greenlet = gevent.spawn(web.start, locust_classes, options)

    if options.slave:
        logger.info("Waiting for master to become available")
        try:
            runners.locust_runner = polling.poll(
                lambda: SlaveLocustRunner(locust_classes, options, available_locustfiles=all_locustfiles),
                timeout=60,
                step=1,
                ignore_exceptions=(socket.error,))

        except polling.TimeoutException, e:
            logger.error("Failed to connect to the Locust master: %s", e.last)
            sys.exit(-1)

        main_greenlet = runners.locust_runner.greenlet
示例#35
0
def main():
    global MODEL
    parser = argparse.ArgumentParser(
        description="AI for Earth Land Cover Worker")

    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="Enable verbose debugging",
                        default=False)

    parser.add_argument("--port",
                        action="store",
                        type=int,
                        help="Port we are listenning on",
                        default=0)
    parser.add_argument("--model",
                        action="store",
                        dest="model",
                        choices=["keras_dense"],
                        help="Model to use",
                        required=True)
    parser.add_argument("--model_fn",
                        action="store",
                        dest="model_fn",
                        type=str,
                        help="Model fn to use",
                        default=None)
    parser.add_argument("--fine_tune_layer",
                        action="store",
                        dest="fine_tune_layer",
                        type=int,
                        help="Layer of model to fine tune",
                        default=-2)

    parser.add_argument("--gpu",
                        action="store",
                        dest="gpuid",
                        type=int,
                        help="GPU to use",
                        required=False)

    args = parser.parse_args(sys.argv[1:])

    # Setup logging
    log_path = os.getcwd() + "/logs"
    setup_logging(log_path, "worker")

    # Setup model
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = "" if args.gpuid is None else str(
        args.gpuid)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

    if args.model == "keras_dense":
        model = KerasDenseFineTune(args.model_fn, args.gpuid,
                                   args.fine_tune_layer)
    else:
        raise NotImplementedError(
            "The given model type is not implemented yet.")

    t = OneShotServer(MyService(model), port=args.port)
    t.start()
示例#36
0
import logging
import json
from locust import HttpLocust, TaskSet, task, web
from flask import request, Response

from hubTasks import HubTasks
from hubUser import HubUser
from log import setup_logging
import utils

setup_logging('/mnt/log/verifier.log')
logger = logging.getLogger(__name__)


class VerifierUser(HubUser):
    def name(self):
        return "verifier_test_"

    def start_webhook(self, config):
        # First - posts to channel, webhook on channel
        # Second - posts to channel, parallel webhook on channel
        # Third - posts to channel, replicate channel, webhook on replicated channel
        if config['number'] == 2:
            config['parallel'] = 2
        if config['number'] == 3:
            config['webhook_channel'] = config['channel'] + "_replicated"
            config['client'].put("/channel/" + config['webhook_channel'],
                                 data=json.dumps({
                                     "name":
                                     config['webhook_channel'],
                                     "ttlDays":
示例#37
0
            database.add_raw_loan_dates(asOfDate, db_conn)
            database.add_raw_loans(loans, db_conn)
            database.add_loans_funded_as_of_date(loans, db_conn)

            logger.info("%s added %s loans." % (asOfDate, len(loans)))
        else:
            logger.info("%s already exists." % asOfDate)


def execute_with_delay(delay=None, token=None):
    if delay == None:
        delay = config.POLLING_INTERVAL

    run_time = time.time()
    execute(token=token)
    remaining_time = delay - (time.time() - run_time)

    if remaining_time > 0:
        time.sleep(remaining_time)


if __name__ == "__main__":
    """When executing as a script, will run indefinitely with default delay
    between requests and database inserts. For full functionality, execute the
    top level `run.py` instead.
    """
    log.setup_logging(config.LOG_PATH)

    while True:
        execute_with_delay()
示例#38
0
def test():
    'test server'
    log.setup_logging()
    server()
示例#39
0
def main():
    global SESSION_HANDLER
    parser = argparse.ArgumentParser(description="AI for Earth Land Cover")

    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="Enable verbose debugging",
                        default=False)

    # TODO: make sure the storage type is passed onto the Session objects
    parser.add_argument('--storage_type',
                        action="store",
                        dest="storage_type",
                        type=str,
                        choices=["table", "file"],
                        default=None)
    parser.add_argument("--storage_path",
                        action="store",
                        dest="storage_path",
                        type=str,
                        help="Path to directory where output will be stored",
                        default=None)

    parser.add_argument("--host",
                        action="store",
                        dest="host",
                        type=str,
                        help="Host to bind to",
                        default="0.0.0.0")
    parser.add_argument("--port",
                        action="store",
                        dest="port",
                        type=int,
                        help="Port to listen on",
                        default=8080)

    subparsers = parser.add_subparsers(
        dest="subcommand", help='Help for subcommands'
    )  # TODO: If we use Python3.7 we can use the required keyword here
    parser_a = subparsers.add_parser(
        'local', help='For running models on the local server')

    parser_b = subparsers.add_parser('remote',
                                     help='For running models with RPC calls')
    parser.add_argument("--remote_host",
                        action="store",
                        dest="remote_host",
                        type=str,
                        help="RabbitMQ host",
                        default="0.0.0.0")
    parser.add_argument("--remote_port",
                        action="store",
                        dest="remote_port",
                        type=int,
                        help="RabbitMQ port",
                        default=8080)

    args = parser.parse_args(sys.argv[1:])

    # create Session factory to use based on whether we are running locally or remotely
    run_local = None
    if args.subcommand == "local":
        print("Sessions will be spawned on the local machine")
        run_local = True
    elif args.subcommand == "remote":
        print("Sessions will be spawned remotely")
        run_local = False
    else:
        print("Must specify 'local' or 'remote' on command line")
        return
    SESSION_HANDLER = SessionHandler(run_local, args)
    SESSION_HANDLER.start_monitor()

    # Setup logging
    log_path = os.getcwd() + "/logs"
    setup_logging(log_path, "server")  # TODO: don't delete logs

    # Setup the bottle server
    app = bottle.Bottle()

    app.add_hook("after_request", enable_cors)
    app.add_hook("before_request", setup_sessions)
    app.add_hook(
        "before_request", manage_sessions
    )  # before every request we want to check to make sure there are no session issues

    # API paths
    app.route(
        "/predPatch", method="OPTIONS", callback=do_options
    )  # TODO: all of our web requests from index.html fire an OPTIONS call because of https://stackoverflow.com/questions/1256593/why-am-i-getting-an-options-request-instead-of-a-get-request, we should fix this
    app.route('/predPatch', method="POST", callback=pred_patch)

    app.route("/predTile", method="OPTIONS", callback=do_options)
    app.route('/predTile', method="POST", callback=pred_tile)

    app.route("/getInput", method="OPTIONS", callback=do_options)
    app.route('/getInput', method="POST", callback=get_input)

    app.route("/recordCorrection", method="OPTIONS", callback=do_options)
    app.route('/recordCorrection', method="POST", callback=record_correction)

    app.route("/retrainModel", method="OPTIONS", callback=do_options)
    app.route('/retrainModel', method="POST", callback=retrain_model)

    app.route("/resetModel", method="OPTIONS", callback=do_options)
    app.route('/resetModel', method="POST", callback=reset_model)

    app.route("/doUndo", method="OPTIONS", callback=do_options)
    app.route("/doUndo", method="POST", callback=do_undo)

    app.route("/doLoad", method="OPTIONS", callback=do_options)
    app.route("/doLoad", method="POST", callback=do_load)

    app.route("/createSession", method="OPTIONS", callback=do_options)
    app.route("/createSession", method="POST", callback=create_session)

    app.route("/killSession", method="OPTIONS", callback=do_options)
    app.route("/killSession", method="POST", callback=kill_session)

    app.route("/whoami", method="GET", callback=whoami)

    # Content paths
    app.route("/", method="GET", callback=get_landing_page)
    app.route("/favicon.ico", method="GET", callback=get_favicon)
    app.route("/<filepath:re:.*>", method="GET", callback=get_everything_else)

    manage_session_folders()
    session_opts = {
        'session.type': 'file',
        'session.cookie_expires': 3000,
        'session.data_dir': SESSION_FOLDER,
        'session.auto': True
    }
    app = beaker.middleware.SessionMiddleware(app, session_opts)

    server = cheroot.wsgi.Server((args.host, args.port), app)
    server.max_request_header_size = 2**13
    server.max_request_body_size = 2**27

    try:
        server.start()
    finally:
        server.stop()
示例#40
0
文件: main.py 项目: hrosenhorn/locust
def main():
    parser, options, arguments = parse_options()

    # setup logging
    setup_logging(options.loglevel, options.logfile)
    logger = logging.getLogger(__name__)

    if options.show_version:
        print "Locust %s" % (version,)
        sys.exit(0)

    locustfile = find_locustfile(options.locustfile)
    if not locustfile:
        logger.error("Could not find any locustfile! See --help for available options.")
        sys.exit(1)

    docstring, locusts = load_locustfile(locustfile)

    if options.list_commands:
        print "Available Locusts:"
        for name in locusts:
            print "    " + name
        sys.exit(0)

    if not locusts:
        logger.error("No Locust class found!")
        sys.exit(1)

    # make sure specified Locust exists
    if arguments:
        missing = set(arguments) - set(locusts.keys())
        if missing:
            logger.error("Unknown Locust(s): %s\n" % (", ".join(missing)))
            sys.exit(1)
        else:
            names = set(arguments) & set(locusts.keys())
            locust_classes = [locusts[n] for n in names]
    else:
        locust_classes = locusts.values()

    if options.show_task_ratio:
        console_logger.info("\n Task ratio per locust class")
        console_logger.info("-" * 80)
        print_task_ratio(locust_classes)
        console_logger.info("\n Total task ratio")
        console_logger.info("-" * 80)
        print_task_ratio(locust_classes, total=True)
        sys.exit(0)
    if options.show_task_ratio_json:
        from json import dumps

        task_data = {
            "per_class": get_task_ratio_dict(locust_classes),
            "total": get_task_ratio_dict(locust_classes, total=True),
        }
        console_logger.info(dumps(task_data))
        sys.exit(0)

    # if --master is set, make sure --no-web isn't set
    if options.master and options.no_web:
        logger.error(
            "Locust can not run distributed with the web interface disabled (do not use --no-web and --master together)"
        )
        sys.exit(0)

    if not options.no_web and not options.slave:
        # spawn web greenlet
        logger.info("Starting web monitor on port %s" % options.port)
        main_greenlet = gevent.spawn(
            web.start,
            locust_classes,
            options.hatch_rate,
            options.num_clients,
            options.num_requests,
            options.ramp,
            options.port,
        )

    if not options.master and not options.slave:
        runners.locust_runner = LocalLocustRunner(
            locust_classes, options.hatch_rate, options.num_clients, options.num_requests, options.host
        )
        # spawn client spawning/hatching greenlet
        if options.no_web:
            runners.locust_runner.start_hatching(wait=True)
            main_greenlet = runners.locust_runner.greenlet
    elif options.master:
        runners.locust_runner = MasterLocustRunner(
            locust_classes,
            options.hatch_rate,
            options.num_clients,
            num_requests=options.num_requests,
            host=options.host,
            master_host=options.master_host,
        )
    elif options.slave:
        runners.locust_runner = SlaveLocustRunner(
            locust_classes,
            options.hatch_rate,
            options.num_clients,
            num_requests=options.num_requests,
            host=options.host,
            master_host=options.master_host,
        )
        main_greenlet = runners.locust_runner.greenlet

    if options.print_stats or (options.no_web and not options.slave):
        # spawn stats printing greenlet
        gevent.spawn(stats_printer)

    def shutdown(code=0):
        """
        Shut down locust by firing quitting event, printing stats and exiting
        """
        logger.info("Shutting down, bye..")
        events.quitting.fire()
        print_stats(runners.locust_runner.request_stats)
        print_percentile_stats(runners.locust_runner.request_stats)
        print_error_report()
        sys.exit(code)

    # install SIGTERM handler
    def sig_term_handler():
        logger.info("Got SIGTERM signal")
        shutdown(0)

    gevent.signal(signal.SIGTERM, sig_term_handler)

    try:
        logger.info("Starting Locust %s" % version)
        main_greenlet.join()
        shutdown(0)
    except KeyboardInterrupt, e:
        shutdown(0)
示例#41
0
def main():
    parser, options, arguments = parse_options()

    # setup logging
    setup_logging(options.loglevel, options.logfile)
    logger = logging.getLogger(__name__)

    if options.show_version:
        print "Locust %s" % (version, )
        sys.exit(0)

    locustfile = find_locustfile(options.locustfile)
    if not locustfile:
        logger.error(
            "Could not find any locustfile! See --help for available options.")
        sys.exit(1)

    docstring, locusts = load_locustfile(locustfile)

    if options.list_commands:
        print "Available Locusts:"
        for name in locusts:
            print "    " + name
        sys.exit(0)

    if not locusts:
        logger.error("No Locust class found!")
        sys.exit(1)

    # make sure specified Locust exists
    if arguments:
        missing = set(arguments) - set(locusts.keys())
        if missing:
            logger.error("Unknown Locust(s): %s\n" % (", ".join(missing)))
            sys.exit(1)
        else:
            names = set(arguments) & set(locusts.keys())
            locust_classes = [locusts[n] for n in names]
    else:
        locust_classes = locusts.values()

    if options.show_task_ratio:
        console_logger.info("\n Task ratio per locust class")
        console_logger.info("-" * 80)
        print_task_ratio(locust_classes)
        console_logger.info("\n Total task ratio")
        console_logger.info("-" * 80)
        print_task_ratio(locust_classes, total=True)
        sys.exit(0)
    if options.show_task_ratio_json:
        from json import dumps
        task_data = {
            "per_class": get_task_ratio_dict(locust_classes),
            "total": get_task_ratio_dict(locust_classes, total=True)
        }
        console_logger.info(dumps(task_data))
        sys.exit(0)

    # if --master is set, make sure --no-web isn't set
    if options.master and options.no_web:
        logger.error(
            "Locust can not run distributed with the web interface disabled (do not use --no-web and --master together)"
        )
        sys.exit(0)

    if not options.no_web and not options.slave:
        # spawn web greenlet
        logger.info("Starting web monitor on port %s" % options.port)
        main_greenlet = gevent.spawn(web.start, locust_classes,
                                     options.hatch_rate, options.num_clients,
                                     options.num_requests, options.ramp,
                                     options.port)

    if not options.master and not options.slave:
        runners.locust_runner = LocalLocustRunner(locust_classes,
                                                  options.hatch_rate,
                                                  options.num_clients,
                                                  options.num_requests,
                                                  options.host)
        # spawn client spawning/hatching greenlet
        if options.no_web:
            runners.locust_runner.start_hatching(wait=True)
            main_greenlet = runners.locust_runner.greenlet
    elif options.master:
        runners.locust_runner = MasterLocustRunner(
            locust_classes,
            options.hatch_rate,
            options.num_clients,
            num_requests=options.num_requests,
            host=options.host,
            master_host=options.master_host)
    elif options.slave:
        runners.locust_runner = SlaveLocustRunner(
            locust_classes,
            options.hatch_rate,
            options.num_clients,
            num_requests=options.num_requests,
            host=options.host,
            master_host=options.master_host)
        main_greenlet = runners.locust_runner.greenlet

    if options.print_stats or (options.no_web and not options.slave):
        # spawn stats printing greenlet
        gevent.spawn(stats_printer)

    def shutdown(code=0):
        """
        Shut down locust by firing quitting event, printing stats and exiting
        """
        logger.info("Shutting down, bye..")
        events.quitting.fire()
        print_stats(runners.locust_runner.request_stats)
        print_percentile_stats(runners.locust_runner.request_stats)
        print_error_report()
        sys.exit(code)

    # install SIGTERM handler
    def sig_term_handler():
        logger.info("Got SIGTERM signal")
        shutdown(0)

    gevent.signal(signal.SIGTERM, sig_term_handler)

    try:
        logger.info("Starting Locust %s" % version)
        main_greenlet.join()
        shutdown(0)
    except KeyboardInterrupt as e:
        shutdown(0)
示例#42
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import logging
from log import setup_logging

setup_logging(filename='info.log', title='seekplum')

error_logger = logging.getLogger("seekplum.cloud")
info_logger = logging.getLogger("seekplum")

error_logger.error("error error")
info_logger.info('info info')
示例#43
0
import logging
import json
from locust import HttpLocust, TaskSet, task, web
from flask import request, Response

from hubTasks import HubTasks
from hubUser import HubUser
from log import setup_logging
import utils

setup_logging('/mnt/log/verifier.log')
logger = logging.getLogger(__name__)


class VerifierUser(HubUser):
    def name(self):
        return "verifier_test_"

    def start_webhook(self, config):
        # First - posts to channel, webhook on channel
        # Second - posts to channel, parallel webhook on channel
        # Third - posts to channel, replicate channel, webhook on replicated channel
        if config['number'] == 2:
            config['parallel'] = 2
        if config['number'] == 3:
            config['webhook_channel'] = config['channel'] + "_replicated"
            config['client'].put("/channel/" + config['webhook_channel'],
                                 data=json.dumps({"name": config['webhook_channel'], "ttlDays": "3",
                                                  "replicationSource": config['host'] + "/channel/" + config[
                                                      'channel']}),
                                 headers={"Content-Type": "application/json"},
示例#44
0
def _main(args, config):
    log.setup_logging(config)
    gerrkins = Gerrkins(config)
    gerrkins.start()
示例#45
0
from ._apollo import Tensor, Net, Caffe, make_numpy_data_param
import caffe_pb2
import log
from architecture import Architecture

log.setup_logging()
示例#46
0
文件: main.py 项目: FGtatsuro/locust
def main(args=None):
    parser, options, arguments = parse_options(args)

    # setup logging
    setup_logging(options.loglevel, options.logfile)
    logger = logging.getLogger(__name__)
    
    if options.show_version:
        print "Locust %s" % (version,)
        sys.exit(0)

    locustfile = find_locustfile(options.locustfile)
    if not locustfile:
        logger.error("Could not find any locustfile! Ensure file ends in '.py' and see --help for available options.")
        sys.exit(1)

    docstring, locusts = load_locustfile(locustfile)

    if options.list_commands:
        console_logger.info("Available Locusts:")
        for name in locusts:
            console_logger.info("    " + name)
        sys.exit(0)

    if not locusts:
        logger.error("No Locust class found!")
        sys.exit(1)

    # make sure specified Locust exists
    if arguments:
        missing = set(arguments) - set(locusts.keys())
        if missing:
            logger.error("Unknown Locust(s): %s\n" % (", ".join(missing)))
            sys.exit(1)
        else:
            names = set(arguments) & set(locusts.keys())
            locust_classes = [locusts[n] for n in names]
    else:
        locust_classes = locusts.values()
    
    if options.show_task_ratio:
        console_logger.info("\n Task ratio per locust class")
        console_logger.info( "-" * 80)
        print_task_ratio(locust_classes)
        console_logger.info("\n Total task ratio")
        console_logger.info("-" * 80)
        print_task_ratio(locust_classes, total=True)
        sys.exit(0)
    if options.show_task_ratio_json:
        from json import dumps
        task_data = {
            "per_class": get_task_ratio_dict(locust_classes), 
            "total": get_task_ratio_dict(locust_classes, total=True)
        }
        console_logger.info(dumps(task_data))
        sys.exit(0)
    
    # if --master is set, make sure --no-web isn't set
    if options.master and options.no_web:
        logger.error("Locust can not run distributed with the web interface disabled (do not use --no-web and --master together)")
        sys.exit(0)

    if not options.no_web and not options.slave:
        # spawn web greenlet
        logger.info("Starting web monitor at %s:%s" % (options.web_host or "*", options.port))
        main_greenlet = gevent.spawn(web.start, locust_classes, options)
    
    if not options.master and not options.slave:
        runners.locust_runner = LocalLocustRunner(locust_classes, options)
        # spawn client spawning/hatching greenlet
        if options.no_web:
            runners.locust_runner.start_hatching(wait=True)
            main_greenlet = runners.locust_runner.greenlet
    elif options.master:
        runners.locust_runner = MasterLocustRunner(locust_classes, options)
    elif options.slave:
        try:
            runners.locust_runner = SlaveLocustRunner(locust_classes, options)
            main_greenlet = runners.locust_runner.greenlet
        except socket.error, e:
            logger.error("Failed to connect to the Locust master: %s", e)
            sys.exit(-1)
示例#47
0
def offline_point_source_calibration(file_list,
                                     source,
                                     inputmap=None,
                                     start=None,
                                     stop=None,
                                     physical_freq=None,
                                     tcorr=None,
                                     logging_params=DEFAULT_LOGGING,
                                     **kwargs):
    # Load config
    config = DEFAULTS.deepcopy()
    config.merge(NameSpace(kwargs))

    # Setup logging
    log.setup_logging(logging_params)
    mlog = log.get_logger(__name__)

    mlog.info("ephemeris file: %s" % ephemeris.__file__)

    # Set the model to use
    fitter_function = utils.fit_point_source_transit
    model_function = utils.model_point_source_transit

    farg = inspect.getargspec(fitter_function)
    defaults = {
        key: val
        for key, val in zip(farg.args[-len(farg.defaults):], farg.defaults)
    }
    poly_deg_amp = kwargs.get('poly_deg_amp', defaults['poly_deg_amp'])
    poly_deg_phi = kwargs.get('poly_deg_phi', defaults['poly_deg_phi'])
    poly_type = kwargs.get('poly_type', defaults['poly_type'])

    param_name = ([
        '%s_poly_amp_coeff%d' % (poly_type, cc)
        for cc in range(poly_deg_amp + 1)
    ] + [
        '%s_poly_phi_coeff%d' % (poly_type, cc)
        for cc in range(poly_deg_phi + 1)
    ])

    model_kwargs = [('poly_deg_amp', poly_deg_amp),
                    ('poly_deg_phi', poly_deg_phi), ('poly_type', poly_type)]
    model_name = '.'.join(
        [getattr(model_function, key) for key in ['__module__', '__name__']])

    tval = {}

    # Set where to evaluate gain
    ha_eval_str = ['raw_transit']

    if config.multi_sample:
        ha_eval_str += ['transit', 'peak']
        ha_eval = [0.0, None]
        fitslc = slice(1, 3)

    ind_eval = ha_eval_str.index(config.evaluate_gain_at)

    # Determine dimensions
    direction = ['amp', 'phi']
    nparam = len(param_name)
    ngain = len(ha_eval_str)
    ndir = len(direction)

    # Determine frequencies
    data = andata.CorrData.from_acq_h5(file_list,
                                       datasets=(),
                                       start=start,
                                       stop=stop)
    freq = data.freq

    if physical_freq is not None:
        index_freq = np.array(
            [np.argmin(np.abs(ff - freq)) for ff in physical_freq])
        freq_sel = utils.convert_to_slice(index_freq)
        freq = freq[index_freq]
    else:
        index_freq = np.arange(freq.size)
        freq_sel = None

    nfreq = freq.size

    # Compute flux of source
    inv_rt_flux_density = tools.invert_no_zero(
        np.sqrt(FluxCatalog[source].predict_flux(freq)))

    # Read in the eigenvaluess for all frequencies
    data = andata.CorrData.from_acq_h5(file_list,
                                       datasets=['erms', 'eval'],
                                       freq_sel=freq_sel,
                                       start=start,
                                       stop=stop)

    # Determine source coordinates
    this_csd = np.floor(ephemeris.unix_to_csd(np.median(data.time)))
    timestamp0 = ephemeris.transit_times(FluxCatalog[source].skyfield,
                                         ephemeris.csd_to_unix(this_csd))[0]
    src_ra, src_dec = ephemeris.object_coords(FluxCatalog[source].skyfield,
                                              date=timestamp0,
                                              deg=True)

    ra = ephemeris.lsa(data.time)
    ha = ra - src_ra
    ha = ha - (ha > 180.0) * 360.0 + (ha < -180.0) * 360.0
    ha = np.radians(ha)

    itrans = np.argmin(np.abs(ha))

    window = 0.75 * np.max(np.abs(ha))

    off_source = np.abs(ha) > window

    mlog.info("CSD %d" % this_csd)
    mlog.info("Hour angle at transit (%d of %d):  %0.2f deg   " %
              (itrans, len(ha), np.degrees(ha[itrans])))
    mlog.info("Hour angle off source: %0.2f deg" %
              np.median(np.abs(np.degrees(ha[off_source]))))

    src_dec = np.radians(src_dec)
    lat = np.radians(ephemeris.CHIMELATITUDE)

    # Determine division of frequencies
    ninput = data.ninput
    ntime = data.ntime
    nblock_freq = int(np.ceil(nfreq / float(config.nfreq_per_block)))

    # Determine bad inputs
    eps = 10.0 * np.finfo(data['erms'].dtype).eps
    good_freq = np.flatnonzero(np.all(data['erms'][:] > eps, axis=-1))
    ind_sub_freq = good_freq[slice(0, good_freq.size,
                                   max(int(good_freq.size / 10), 1))]

    tmp_data = andata.CorrData.from_acq_h5(file_list,
                                           datasets=['evec'],
                                           freq_sel=ind_sub_freq,
                                           start=start,
                                           stop=stop)
    eps = 10.0 * np.finfo(tmp_data['evec'].dtype).eps
    bad_input = np.flatnonzero(
        np.all(np.abs(tmp_data['evec'][:, 0]) < eps, axis=(0, 2)))

    input_axis = tmp_data.input.copy()

    del tmp_data

    # Query layout database for correlator inputs
    if inputmap is None:
        inputmap = tools.get_correlator_inputs(
            datetime.datetime.utcfromtimestamp(data.time[itrans]),
            correlator='chime')

    inputmap = tools.reorder_correlator_inputs(input_axis, inputmap)

    tools.change_chime_location(rotation=config.telescope_rotation)

    # Determine x and y pol index
    xfeeds = np.array([
        idf for idf, inp in enumerate(inputmap)
        if (idf not in bad_input) and tools.is_array_x(inp)
    ])
    yfeeds = np.array([
        idf for idf, inp in enumerate(inputmap)
        if (idf not in bad_input) and tools.is_array_y(inp)
    ])

    nfeed = xfeeds.size + yfeeds.size

    pol = [yfeeds, xfeeds]
    polstr = ['Y', 'X']
    npol = len(pol)

    neigen = min(max(npol, config.neigen), data['eval'].shape[1])

    phase_ref = config.phase_reference_index
    phase_ref_by_pol = [
        pol[pp].tolist().index(phase_ref[pp]) for pp in range(npol)
    ]

    # Calculate dynamic range
    eval0_off_source = np.median(data['eval'][:, 0, off_source], axis=-1)

    dyn = data['eval'][:, 1, :] * tools.invert_no_zero(
        eval0_off_source[:, np.newaxis])

    # Determine frequencies to mask
    not_rfi = np.ones((nfreq, 1), dtype=np.bool)
    if config.mask_rfi is not None:
        for frng in config.mask_rfi:
            not_rfi[:, 0] &= ((freq < frng[0]) | (freq > frng[1]))

    mlog.info("%0.1f percent of frequencies available after masking RFI." %
              (100.0 * np.sum(not_rfi, dtype=np.float32) / float(nfreq), ))

    #dyn_flg = utils.contiguous_flag(dyn > config.dyn_rng_threshold, centre=itrans)
    if source in config.dyn_rng_threshold:
        dyn_rng_threshold = config.dyn_rng_threshold[source]
    else:
        dyn_rng_threshold = config.dyn_rng_threshold.default

    mlog.info("Dynamic range threshold set to %0.1f." % dyn_rng_threshold)

    dyn_flg = dyn > dyn_rng_threshold

    # Calculate fit flag
    fit_flag = np.zeros((nfreq, npol, ntime), dtype=np.bool)
    for pp in range(npol):

        mlog.info("Dynamic Range Nsample, Pol %d:  %s" % (pp, ','.join([
            "%d" % xx for xx in np.percentile(np.sum(dyn_flg, axis=-1),
                                              [25, 50, 75, 100])
        ])))

        if config.nsigma1 is None:
            fit_flag[:, pp, :] = dyn_flg & not_rfi

        else:

            fit_window = config.nsigma1 * np.radians(
                utils.get_window(freq, pol=polstr[pp], dec=src_dec, deg=True))

            win_flg = np.abs(ha)[np.newaxis, :] <= fit_window[:, np.newaxis]

            fit_flag[:, pp, :] = (dyn_flg & win_flg & not_rfi)

    # Calculate base error
    base_err = data['erms'][:, np.newaxis, :]

    # Check for sign flips
    ref_resp = andata.CorrData.from_acq_h5(file_list,
                                           datasets=['evec'],
                                           input_sel=config.eigen_reference,
                                           freq_sel=freq_sel,
                                           start=start,
                                           stop=stop)['evec'][:, 0:neigen,
                                                              0, :]

    sign0 = 1.0 - 2.0 * (ref_resp.real < 0.0)

    # Check that we have the correct reference feed
    if np.any(np.abs(ref_resp.imag) > 0.0):
        ValueError("Reference feed %d is incorrect." % config.eigen_reference)

    del ref_resp

    # Save index_map
    results = {}
    results['model'] = model_name
    results['param'] = param_name
    results['freq'] = data.index_map['freq'][:]
    results['input'] = input_axis
    results['eval'] = ha_eval_str
    results['dir'] = direction

    for key, val in model_kwargs:
        results[key] = val

    # Initialize numpy arrays to hold results
    if config.return_response:

        results['response'] = np.zeros((nfreq, ninput, ntime),
                                       dtype=np.complex64)
        results['response_err'] = np.zeros((nfreq, ninput, ntime),
                                           dtype=np.float32)
        results['fit_flag'] = fit_flag
        results['ha_axis'] = ha
        results['ra'] = ra

    else:

        results['gain_eval'] = np.zeros((nfreq, ninput, ngain),
                                        dtype=np.complex64)
        results['weight_eval'] = np.zeros((nfreq, ninput, ngain),
                                          dtype=np.float32)
        results['frac_gain_err'] = np.zeros((nfreq, ninput, ngain, ndir),
                                            dtype=np.float32)

        results['parameter'] = np.zeros((nfreq, ninput, nparam),
                                        dtype=np.float32)
        results['parameter_err'] = np.zeros((nfreq, ninput, nparam),
                                            dtype=np.float32)

        results['index_eval'] = np.full((nfreq, ninput), -1, dtype=np.int8)
        results['gain'] = np.zeros((nfreq, ninput), dtype=np.complex64)
        results['weight'] = np.zeros((nfreq, ninput), dtype=np.float32)

        results['ndof'] = np.zeros((nfreq, ninput, ndir), dtype=np.float32)
        results['chisq'] = np.zeros((nfreq, ninput, ndir), dtype=np.float32)

        results['timing'] = np.zeros((nfreq, ninput), dtype=np.complex64)

    # Initialize metric like variables
    results['runtime'] = np.zeros((nblock_freq, 2), dtype=np.float64)

    # Compute distances
    dist = tools.get_feed_positions(inputmap)
    for pp, feeds in enumerate(pol):
        dist[feeds, :] -= dist[phase_ref[pp], np.newaxis, :]

    # Loop over frequency blocks
    for gg in range(nblock_freq):

        mlog.info("Frequency block %d of %d." % (gg, nblock_freq))

        fstart = gg * config.nfreq_per_block
        fstop = min((gg + 1) * config.nfreq_per_block, nfreq)
        findex = np.arange(fstart, fstop)
        ngroup = findex.size

        freq_sel = utils.convert_to_slice(index_freq[findex])

        timeit_start_gg = time.time()

        #
        if config.return_response:
            gstart = start
            gstop = stop

            tslc = slice(0, ntime)

        else:
            good_times = np.flatnonzero(np.any(fit_flag[findex], axis=(0, 1)))

            if good_times.size == 0:
                continue

            gstart = int(np.min(good_times))
            gstop = int(np.max(good_times)) + 1

            tslc = slice(gstart, gstop)

            gstart += start
            gstop += start

        hag = ha[tslc]
        itrans = np.argmin(np.abs(hag))

        # Load eigenvectors.
        nudata = andata.CorrData.from_acq_h5(
            file_list,
            datasets=['evec', 'vis', 'flags/vis_weight'],
            apply_gain=False,
            freq_sel=freq_sel,
            start=gstart,
            stop=gstop)

        # Save time to load data
        results['runtime'][gg, 0] = time.time() - timeit_start_gg
        timeit_start_gg = time.time()

        mlog.info("Time to load (per frequency):  %0.3f sec" %
                  (results['runtime'][gg, 0] / ngroup, ))

        # Loop over polarizations
        for pp, feeds in enumerate(pol):

            # Get timing correction
            if tcorr is not None:
                tgain = tcorr.get_gain(nudata.freq, nudata.input[feeds],
                                       nudata.time)
                tgain *= tgain[:, phase_ref_by_pol[pp], np.newaxis, :].conj()

                tgain_transit = tgain[:, :, itrans].copy()
                tgain *= tgain_transit[:, :, np.newaxis].conj()

            # Create the polarization masking vector
            P = np.zeros((1, ninput, 1), dtype=np.float64)
            P[:, feeds, :] = 1.0

            # Loop over frequencies
            for gff, ff in enumerate(findex):

                flg = fit_flag[ff, pp, tslc]

                if (2 * int(np.sum(flg))) < (nparam +
                                             1) and not config.return_response:
                    continue

                # Normalize by eigenvalue and correct for pi phase flips in process.
                resp = (nudata['evec'][gff, 0:neigen, :, :] *
                        np.sqrt(data['eval'][ff, 0:neigen, np.newaxis, tslc]) *
                        sign0[ff, :, np.newaxis, tslc])

                # Rotate to single-pol response
                # Move time to first axis for the matrix multiplication
                invL = tools.invert_no_zero(
                    np.rollaxis(data['eval'][ff, 0:neigen, np.newaxis, tslc],
                                -1, 0))

                UT = np.rollaxis(resp, -1, 0)
                U = np.swapaxes(UT, -1, -2)

                mu, vp = np.linalg.eigh(np.matmul(UT.conj(), P * U))

                rsign0 = (1.0 - 2.0 * (vp[:, 0, np.newaxis, :].real < 0.0))

                resp = mu[:, np.newaxis, :] * np.matmul(U, rsign0 * vp * invL)

                # Extract feeds of this pol
                # Transpose so that time is back to last axis
                resp = resp[:, feeds, -1].T

                # Compute error on response
                dataflg = ((nudata.weight[gff, feeds, :] > 0.0)
                           & np.isfinite(nudata.weight[gff, feeds, :])).astype(
                               np.float32)

                resp_err = dataflg * base_err[ff, :, tslc] * np.sqrt(
                    nudata.vis[gff, feeds, :].real) * tools.invert_no_zero(
                        np.sqrt(mu[np.newaxis, :, -1]))

                # Reference to specific input
                resp *= np.exp(
                    -1.0J *
                    np.angle(resp[phase_ref_by_pol[pp], np.newaxis, :]))

                # Apply timing correction
                if tcorr is not None:
                    resp *= tgain[gff]

                    results['timing'][ff, feeds] = tgain_transit[gff]

                # Fringestop
                lmbda = scipy.constants.c * 1e-6 / nudata.freq[gff]

                resp *= tools.fringestop_phase(
                    hag[np.newaxis, :], lat, src_dec,
                    dist[feeds, 0, np.newaxis] / lmbda,
                    dist[feeds, 1, np.newaxis] / lmbda)

                # Normalize by source flux
                resp *= inv_rt_flux_density[ff]
                resp_err *= inv_rt_flux_density[ff]

                # If requested, reference phase to the median value
                if config.med_phase_ref:
                    phi0 = np.angle(resp[:, itrans, np.newaxis])
                    resp *= np.exp(-1.0J * phi0)
                    resp *= np.exp(
                        -1.0J *
                        np.median(np.angle(resp), axis=0, keepdims=True))
                    resp *= np.exp(1.0J * phi0)

                # Check if return_response flag was set by user
                if not config.return_response:

                    if config.multi_sample:
                        moving_window = config.nsigma2 and config.nsigma2 * np.radians(
                            utils.get_window(nudata.freq[gff],
                                             pol=polstr[pp],
                                             dec=src_dec,
                                             deg=True))

                    # Loop over inputs
                    for pii, ii in enumerate(feeds):

                        is_good = flg & (np.abs(resp[pii, :]) >
                                         0.0) & (resp_err[pii, :] > 0.0)

                        # Set the intial gains based on raw response at transit
                        if is_good[itrans]:
                            results['gain_eval'][ff, ii,
                                                 0] = tools.invert_no_zero(
                                                     resp[pii, itrans])
                            results['frac_gain_err'][ff, ii, 0, :] = (
                                resp_err[pii, itrans] * tools.invert_no_zero(
                                    np.abs(resp[pii, itrans])))
                            results['weight_eval'][ff, ii, 0] = 0.5 * (
                                np.abs(resp[pii, itrans])**2 *
                                tools.invert_no_zero(resp_err[pii, itrans]))**2

                            results['index_eval'][ff, ii] = 0
                            results['gain'][ff,
                                            ii] = results['gain_eval'][ff, ii,
                                                                       0]
                            results['weight'][ff,
                                              ii] = results['weight_eval'][ff,
                                                                           ii,
                                                                           0]

                        # Exit if not performing multi time sample fit
                        if not config.multi_sample:
                            continue

                        if (2 * int(np.sum(is_good))) < (nparam + 1):
                            continue

                        try:
                            param, param_err, gain, gain_err, ndof, chisq, tval = fitter_function(
                                hag[is_good],
                                resp[pii, is_good],
                                resp_err[pii, is_good],
                                ha_eval,
                                window=moving_window,
                                tval=tval,
                                **config.fit)
                        except Exception as rex:
                            if config.verbose:
                                mlog.info(
                                    "Frequency %0.2f, Feed %d failed with error: %s"
                                    % (nudata.freq[gff], ii, rex))
                            continue

                        # Check for nan
                        wfit = (np.abs(gain) *
                                tools.invert_no_zero(np.abs(gain_err)))**2
                        if np.any(~np.isfinite(np.abs(gain))) or np.any(
                                ~np.isfinite(wfit)):
                            continue

                        # Save to results using the convention that you should *multiply* the visibilites by the gains
                        results['gain_eval'][
                            ff, ii, fitslc] = tools.invert_no_zero(gain)
                        results['frac_gain_err'][ff, ii, fitslc,
                                                 0] = gain_err.real
                        results['frac_gain_err'][ff, ii, fitslc,
                                                 1] = gain_err.imag
                        results['weight_eval'][ff, ii, fitslc] = wfit

                        results['parameter'][ff, ii, :] = param
                        results['parameter_err'][ff, ii, :] = param_err

                        results['ndof'][ff, ii, :] = ndof
                        results['chisq'][ff, ii, :] = chisq

                        # Check if the fit was succesful and update the gain evaluation index appropriately
                        if np.all((chisq / ndof.astype(np.float32)
                                   ) <= config.chisq_per_dof_threshold):
                            results['index_eval'][ff, ii] = ind_eval
                            results['gain'][ff, ii] = results['gain_eval'][
                                ff, ii, ind_eval]
                            results['weight'][ff, ii] = results['weight_eval'][
                                ff, ii, ind_eval]

                else:

                    # Return response only (do not fit model)
                    results['response'][ff, feeds, :] = resp
                    results['response_err'][ff, feeds, :] = resp_err

        # Save time to fit data
        results['runtime'][gg, 1] = time.time() - timeit_start_gg

        mlog.info("Time to fit (per frequency):  %0.3f sec" %
                  (results['runtime'][gg, 1] / ngroup, ))

        # Clean up
        del nudata
        gc.collect()

    # Print total run time
    mlog.info("TOTAL TIME TO LOAD: %0.3f min" %
              (np.sum(results['runtime'][:, 0]) / 60.0, ))
    mlog.info("TOTAL TIME TO FIT:  %0.3f min" %
              (np.sum(results['runtime'][:, 1]) / 60.0, ))

    # Set the best estimate of the gain
    if not config.return_response:

        flag = results['index_eval'] >= 0
        gain = results['gain']

        # Compute amplitude
        amp = np.abs(gain)

        # Hard cutoffs on the amplitude
        med_amp = np.median(amp[flag])
        min_amp = med_amp * config.min_amp_scale_factor
        max_amp = med_amp * config.max_amp_scale_factor

        flag &= ((amp >= min_amp) & (amp <= max_amp))

        # Flag outliers in amplitude for each frequency
        for pp, feeds in enumerate(pol):

            med_amp_by_pol = np.zeros(nfreq, dtype=np.float32)
            sig_amp_by_pol = np.zeros(nfreq, dtype=np.float32)

            for ff in range(nfreq):

                this_flag = flag[ff, feeds]

                if np.any(this_flag):

                    med, slow, shigh = utils.estimate_directional_scale(
                        amp[ff, feeds[this_flag]])
                    lower = med - config.nsigma_outlier * slow
                    upper = med + config.nsigma_outlier * shigh

                    flag[ff, feeds] &= ((amp[ff, feeds] >= lower) &
                                        (amp[ff, feeds] <= upper))

                    med_amp_by_pol[ff] = med
                    sig_amp_by_pol[ff] = 0.5 * (shigh - slow) / np.sqrt(
                        np.sum(this_flag, dtype=np.float32))

            if config.nsigma_med_outlier:

                med_flag = med_amp_by_pol > 0.0

                not_outlier = flag_outliers(med_amp_by_pol,
                                            med_flag,
                                            window=config.window_med_outlier,
                                            nsigma=config.nsigma_med_outlier)
                flag[:, feeds] &= not_outlier[:, np.newaxis]

                mlog.info("Pol %s:  %d frequencies are outliers." %
                          (polstr[pp],
                           np.sum(~not_outlier & med_flag, dtype=np.int)))

        # Determine bad frequencies
        flag_freq = (np.sum(flag, axis=1, dtype=np.float32) /
                     float(ninput)) > config.threshold_good_freq
        good_freq = np.flatnonzero(flag_freq)

        # Determine bad inputs
        fraction_good = np.sum(flag[good_freq, :], axis=0,
                               dtype=np.float32) / float(good_freq.size)
        flag_input = fraction_good > config.threshold_good_input

        # Finalize flag
        flag &= (flag_freq[:, np.newaxis] & flag_input[np.newaxis, :])

        # Interpolate gains
        interp_gain, interp_weight = interpolate_gain(
            freq,
            gain,
            results['weight'],
            flag=flag,
            length_scale=config.interpolation_length_scale,
            mlog=mlog)
        # Save gains to object
        results['flag'] = flag
        results['gain'] = interp_gain
        results['weight'] = interp_weight

    # Return results
    return results
示例#48
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
u""""
json objects collected with TweetScraper contain a "nbr_reply" field.
json objects collected with basic Twitter API lack "reply_count field.
This script adds this field.
"""

import cfg
import json
import os
from log import setup_logging
import logging
import myprettyprint

setup_logging()
logger = logging.getLogger(__name__)

path = "../data/reply"

tweets = []

for filename in os.listdir(path):
    p = path + filename
    with open(p, mode='r') as f:
        tweets.append(json.load(f))

sum = 0
for tweet in tweets:
    sum += tweet["reply_count"]
print(sum)
示例#49
0
文件: config.py 项目: laevar/jupiter
def runtimeconfig(pyfile):
	"""
	Configure remaining runtime options and return runtype.
	
	You need to pass in a path to the calling script (e.g. use the __file__ 
	attribute).
	
	"""
	from log import setup_logging
	setup_logging()
	if debug or verbose >= 1:
		from log import safe_print
	if debug:
		safe_print("[D] pydir:", pydir)
	if isapp:
		runtype = ".app"
	elif isexe:
		if debug:
			safe_print("[D] _MEIPASS2 or pydir:", getenvu("_MEIPASS2", exedir))
		if getenvu("_MEIPASS2", exedir) not in data_dirs:
			data_dirs.insert(1, getenvu("_MEIPASS2", exedir))
		runtype = exe_ext
	else:
		pydir_parent = os.path.dirname(pydir)
		if debug:
			safe_print("[D] dirname(os.path.abspath(sys.argv[0])):", 
					   os.path.dirname(os.path.abspath(sys.argv[0])))
			safe_print("[D] pydir parent:", pydir_parent)
		if os.path.dirname(os.path.abspath(sys.argv[0])) == pydir_parent and \
		   pydir_parent not in data_dirs:
			# Add the parent directory of the package directory to our list
			# of data directories if it is the directory containing the 
			# currently run script (e.g. when running from source)
			data_dirs.insert(1, pydir_parent)
		runtype = pyext
	for dir_ in sys.path:
		if not isinstance(dir_, unicode):
			dir_ = unicode(dir_, fs_enc)
		dir_ = os.path.abspath(os.path.join(dir_, appname))
		if dir_ not in data_dirs and os.path.isdir(dir_):
			data_dirs.append(dir_)
			if debug:
				safe_print("[D] from sys.path:", dir_)
	if sys.platform not in ("darwin", "win32"):
		data_dirs.extend([os.path.join(dir_, "doc", appname + "-" + version) 
						  for dir_ in xdg_data_dirs + [xdg_data_home]])
		data_dirs.extend([os.path.join(dir_, "doc", "packages", appname) 
						  for dir_ in xdg_data_dirs + [xdg_data_home]])
		data_dirs.extend([os.path.join(dir_, "doc", appname) 
						  for dir_ in xdg_data_dirs + [xdg_data_home]])
		data_dirs.extend([os.path.join(dir_, "doc", appname.lower())  # Debian
						  for dir_ in xdg_data_dirs + [xdg_data_home]])
		data_dirs.extend([os.path.join(dir_, "icons", "hicolor") 
						  for dir_ in xdg_data_dirs + [xdg_data_home]])
	if debug:
		safe_print("[D] Data files search paths:\n[D]", "\n[D] ".join(data_dirs))
	defaults["3dlut.input.profile"] = get_data_path(os.path.join("ref",
																 "Rec709.icm")) or ""
	defaultmmode = defaults["measurement_mode"]
	defaultptype = defaults["profile.type"]
	defaultchart = testchart_defaults.get(defaultptype, 
										  testchart_defaults["s"])[None]
	defaults["testchart.file"] = get_data_path(os.path.join("ti1", 
															defaultchart)) or ""
	defaults["testchart.file.backup"] = defaults["testchart.file"]
	defaults["profile_verification_chart"] = get_data_path(os.path.join("ref", 
															"verify.ti1")) or ""
	defaults["gamap_profile"] = get_data_path(os.path.join("ref", "sRGB.icm")) or ""
	return runtype
示例#50
0
async def startup_event():
    setup_logging()
示例#51
0
#!/usr/bin/env python

import fuse
import json
import log
import os
import rest_client
import stat
import sys
import time
import threading

fuse.fuse_python_api = (0, 2)

logger = log.setup_logging("/tmp/efs")

READ_CACHE = {}
WRITE_CACHE = {}
SYMLINK_FILES = {}


def embed_shell():
    import IPython
    IPython.embed()


class EFSStat(fuse.Stat):
    def __init__(self):
        self.st_mode = 0
        self.st_ino = 0
        self.st_dev = 0