Beispiel #1
0
def _main():
    if on_mac():
        die("Cross-checking is only supported on Linux hosts.")

    setup_logging()
    logging.debug("args: %s", " ".join(sys.argv))

    # earlier plumbum versions are missing features such as TEE
    if pb.__version__ < c.MIN_PLUMBUM_VERSION:
        err = "locally installed version {} of plumbum is too old.\n" \
            .format(pb.__version__)
        err += "please upgrade plumbum to version {} or later." \
            .format(c.MIN_PLUMBUM_VERSION)
        die(err)

    args = _parse_args()
    if args.clean_all:
        logging.info("cleaning all dependencies and previous built files")
        shutil.rmtree(c.CLANG_XCHECK_PLUGIN_BLD, ignore_errors=True)
        make = get_cmd_or_die('make')
        with pb.local.cwd(c.LIBFAKECHECKS_DIR):
            make('clean')

    # clang 3.6.0 is known to work; 3.4.0 known to not work.
    ensure_clang_version([3, 6, 0])
    # NOTE: it seems safe to disable this check since we now
    # that we use a rust-toolchain file for rustc versioning.
    # ensure_rustc_version(c.CUSTOM_RUST_RUSTC_VERSION)

    ensure_dir(c.CLANG_XCHECK_PLUGIN_BLD)
    ensure_dir(c.BUILD_DIR)
    git_ignore_dir(c.BUILD_DIR)

    build_clang_plugin(args)
Beispiel #2
0
def _main():
    setup_logging()
    logging.debug("args: %s", " ".join(sys.argv))

    # earlier plumbum versions are missing features such as TEE
    if pb.__version__ < c.MIN_PLUMBUM_VERSION:
        err = "locally installed version {} of plumbum is too old.\n" \
            .format(pb.__version__)
        err += "please upgrade plumbum to version {} or later." \
            .format(c.MIN_PLUMBUM_VERSION)
        die(err)

    args = _parse_args()
    if args.clean_all:
        logging.info("cleaning all dependencies and previous built files")
        shutil.rmtree(c.CLANG_XCHECK_PLUGIN_BLD, ignore_errors=True)

    # prerequisites
    if not have_rust_toolchain(c.CUSTOM_RUST_NAME):
        die("missing rust toolchain: " + c.CUSTOM_RUST_NAME, errno.ENOENT)

    # clang 3.6.0 is known to work; 3.4.0 known to not work.
    ensure_clang_version([3, 6, 0])
    ensure_rustc_version(c.CUSTOM_RUST_RUSTC_VERSION)

    ensure_dir(c.CLANG_XCHECK_PLUGIN_BLD)
    ensure_dir(c.DEPS_DIR)
    git_ignore_dir(c.DEPS_DIR)

    build_clang_plugin(args)
Beispiel #3
0
    def test_setup_logging_invalid_log_level(self):
        logger = Mock()
        with self.assertRaises(Exception) as context:
            common.setup_logging(logger, '/dev/null',
                                 '%(name)s: %(message)s', 'banana')

        assert str(context.exception) == 'Invalid log level: BANANA'
Beispiel #4
0
def main():
    """Generate a new csmith test case and compare its execution to the translated Rust version."""

    validate_csmith_home()

    common.setup_logging()

    with tempfile.TemporaryDirectory('_c2rust_csmith') as dirname:

        # generate filenames based on tempdir
        output_c_name = os.path.join(dirname, 'output.c')
        output_c_exe_name = os.path.join(dirname, 'output.c.exe')
        output_rs_name = os.path.join(dirname, 'output.rs')
        output_rs_exec_name = os.path.join(dirname, 'output.rs.exe')

        logging.info("Using temporary directory: %s", dirname)

        # Generate and run C version
        generate_c_source(dirname, output_c_name)
        compile_c_file(output_c_name, output_c_exe_name)
        expected_output = execute_driver(output_c_exe_name)

        # Generate and run Rust version
        transpile_file(dirname, output_c_name)
        compile_rust_file(output_c_name, output_rs_name, output_rs_exec_name)
        actual_output = execute_driver(output_rs_exec_name)

        if expected_output == actual_output:
            logging.info("Match")
        else:
            logging.info("FAILURE: %s %s", expected_output, actual_output)
            copyfile(output_c_name, 'output.c')
            copyfile(output_rs_name, 'output.rs')
Beispiel #5
0
def main():
    oparser = add_options()
    (options, args) = oparser.parse_args()

    if not args:
        oparser.print_help()
        sys.exit()
    if not os.path.isfile(args[0]):
        print "Configuration file %s not existing." % args[0]
        sys.exit(-1)

    config_file = norm_path(args[0])
    config = configuration.parse_merge(config_file)
    loglevel = config.getroot().get("loglevel", 10)
    common.setup_logging("~/.flof/flof.log", loglevel)
 
    register_bundled_workers()

    if options.only:
        WorkerRegistry.workers = filter(lambda a: a in options.only.split(","), WorkerRegistry.workers)

    if options.do_not:
        WorkerRegistry.workers = filter(lambda a: a not in options.do_not.split(","), WorkerRegistry.workers)

    os.chdir(os.path.dirname(norm_path(args[0])))
    context = ContextManager({"config_file" : config_file})
    context.overrides = parse_context(options.context)
    RootWorker(config, context).run()
Beispiel #6
0
    def __init__(self, config_file):
        """
        Initialize the config object loading from ini file
        :param config_path: the path to the configuration file to load
        """
        # Initial log setup
        setup_logging("promoter", logging.DEBUG)

        self.git_root, self.script_root = get_root_paths(self.log)
        self.log.debug("Config file passed: %s", config_file)
        self.log.debug("Git root %s", self.git_root)

        if config_file is None:
            raise ConfigError("Empty config file")
        # The path is either absolute ot it's relative to the code root
        if not os.path.isabs(config_file):
            config_file = os.path.join(self.script_root, "config", config_file)
        try:
            os.stat(config_file)
        except OSError:
            self.log.error("Configuration file not found")
            raise

        self.log.debug("Using config file %s", config_file)
        self._file_config = self.load_from_ini(config_file)
        self._config = self.load_config(config_file, self._file_config)

        # Load keys as config attributes
        for key, value in self._config.items():
            setattr(self, key, value)
Beispiel #7
0
def main():
    oparser = add_options()
    (options, args) = oparser.parse_args()

    if not args:
        oparser.print_help()
        sys.exit()
    if not os.path.isfile(args[0]):
        print "Configuration file %s not existing." % args[0]
        sys.exit(-1)

    config_file = norm_path(args[0])
    config = configuration.parse_merge(config_file)
    loglevel = config.getroot().get("loglevel", 10)
    common.setup_logging("~/.flof/flof.log", loglevel)

    register_bundled_workers()

    if options.only:
        WorkerRegistry.workers = filter(lambda a: a in options.only.split(","),
                                        WorkerRegistry.workers)

    if options.do_not:
        WorkerRegistry.workers = filter(
            lambda a: a not in options.do_not.split(","),
            WorkerRegistry.workers)

    os.chdir(os.path.dirname(norm_path(args[0])))
    context = ContextManager({"config_file": config_file})
    context.overrides = parse_context(options.context)
    RootWorker(config, context).run()
Beispiel #8
0
def main(argv):
    # setup and parse arguments
    parser = argparse.ArgumentParser(description=DESCRIPTION)
    parser.add_argument('-o', '--output',
        required=True,
        action='store',
        metavar='DIR',
        help='the output directory where files will be rebuilt')
    parser.add_argument('inputs',
        metavar='IN_DIR',
        type=str,
        nargs='+',
        help='list of directories from where input chunks are taken')
    parser.add_argument('-f', '--force',
        action='store_true',
        help='overwrite the output directory, even if it exists')
    parser.add_argument('-v', '--verbose',
        action='store_true',
        help='detailed output of operations')
    parser.add_argument('-d', '--debug',
        action='store_true',
        help='debug information')


    # create namespace with args
    ns = parser.parse_args(args=argv)

    common.setup_logging(ns)
    _verify_args(ns)
    _fec_decode(ns)
    def test_setup_logging_invalid_log_level(self):
        logger = Mock()
        with self.assertRaises(Exception) as context:
            common.setup_logging(logger, '/dev/null',
                                 '%(name)s: %(message)s', 'banana')

        assert str(context.exception) == 'Invalid log level: BANANA'
Beispiel #10
0
def main():
    setup_logging()

    # checkout_and_build_libclevrbuf()
    build_libfakechecks()
    test_clang_cross_checks()
    test_rust_cross_checks()
Beispiel #11
0
def _main():
    setup_logging()
    logging.debug("args: %s", " ".join(sys.argv))

    # FIXME: allow env/cli override of LLVM_SRC and LLVM_BLD
    # FIXME: check that cmake and ninja are installed
    # FIXME: option to build LLVM/Clang from master?

    args = _parse_args()

    if args.clean_all:
        logging.info("cleaning all dependencies and previous built files")
        shutil.rmtree(c.LLVM_SRC, ignore_errors=True)
        shutil.rmtree(c.LLVM_BLD, ignore_errors=True)
        shutil.rmtree(c.BUILD_DIR, ignore_errors=True)
        shutil.rmtree(c.AST_EXPO_PRJ_DIR, ignore_errors=True)
        cargo = get_cmd_or_die("cargo")
        with pb.local.cwd(c.ROOT_DIR):
            invoke(cargo, "clean")

    ensure_dir(c.LLVM_BLD)
    ensure_dir(c.BUILD_DIR)
    git_ignore_dir(c.BUILD_DIR)

    download_llvm_sources()
    configure_and_build_llvm(args)
    build_transpiler(args)
    print_success_msg(args)
Beispiel #12
0
def main():
    setup_logging()
    if not len(sys.argv) == 3:
        print(
            "usage: print_clang_ast.py <file.c> path/to/compile_commands.json",
            file=sys.stderr)
        exit(1)
    c_file: str = os.path.basename(sys.argv[1])
    compile_commands_path: str = sys.argv[2]

    # do we have clang in path?
    get_cmd_or_die("clang")

    try:
        with open(compile_commands_path, "r") as fh:
            commands = json.load(fh)
    except FileNotFoundError:
        die(f"file not found: " + compile_commands_path)

    commands = filter(
        lambda c: os.path.basename(c["file"]) == c_file,
        commands)

    cmd = next(commands, None)
    if not cmd:
        die(f"no command to compile {c_file}")
    elif next(commands, None):
        logging.warning(f"warning: found multiple commands for {c_file}")
    
    dump_ast(cmd)
Beispiel #13
0
    def __init__(self, config_class=PromoterConfig):
        """
        Initialize the config object loading from ini file
        :param config_path: the path to the configuration file to load
        :param validate: A comma separated list of checks for the config
        file
        """
        # Initial log setup
        setup_logging("promoter", logging.DEBUG)
        self.git_root = None
        self.script_root = None
        self.git_root, self.script_root = get_root_paths(self.log)
        self.log.debug("Git root %s", self.git_root)
        self.log.debug("Script root %s", self.git_root)
        self.rel_roots_map = {
            "global_defaults":
            os.path.join(self.script_root, "config_environments"),
            "environments_pool":
            os.path.join(self.script_root, "config_environments"),
        }

        self.global_defaults = self.load_yaml_config("global_defaults",
                                                     "global_defaults.yaml")
        self.global_defaults['git_root'] = self.git_root
        self.global_defaults['script_root'] = self.script_root
        self.config_class = config_class
 def test_setup_logging_correct_log_file(self, mock_log_info):
     __, filepath = tempfile.mkstemp()
     setup_logging("tests", logging.DEBUG, log_file=filepath)
     os.unlink(filepath)
     mock_log_info.assert_has_calls([
         mock.call('Set up logging level %%s on:  file %s' % filepath,
                   'DEBUG')
     ])
 def test_close_logging(self):
     __, filepath = tempfile.mkstemp()
     setup_logging("test", logging.DEBUG, log_file=filepath)
     logger = logging.getLogger("test")
     self.assertGreater(len(logger.handlers), 0)
     close_logging("test")
     self.assertEqual(len(logger.handlers), 0)
     os.unlink(filepath)
Beispiel #16
0
def _main():
    setup_logging()
    logging.debug("args: %s", " ".join(sys.argv))

    # FIXME: allow env/cli override of LLVM_SRC, LLVM_VER, and LLVM_BLD
    # FIXME: check that cmake and ninja are installed
    # FIXME: option to build LLVM/Clang from master?

    # earlier plumbum versions are missing features such as TEE
    if pb.__version__ < c.MIN_PLUMBUM_VERSION:
        err = "locally installed version {} of plumbum is too old.\n" \
            .format(pb.__version__)
        err += "please upgrade plumbum to version {} or later." \
            .format(c.MIN_PLUMBUM_VERSION)
        die(err)

    args = _parse_args()

    # prerequisites
    if not have_rust_toolchain(c.CUSTOM_RUST_NAME):
        die("missing rust toolchain: " + c.CUSTOM_RUST_NAME, errno.ENOENT)

    # clang 3.6.0 is known to work; 3.4.0 known to not work.
    ensure_clang_version([3, 6, 0])

    if args.clean_all:
        logging.info("cleaning all dependencies and previous built files")
        shutil.rmtree(c.LLVM_SRC, ignore_errors=True)
        shutil.rmtree(c.LLVM_BLD, ignore_errors=True)
        shutil.rmtree(c.DEPS_DIR, ignore_errors=True)
        shutil.rmtree(c.AST_EXPO_PRJ_DIR, ignore_errors=True)
        cargo = get_cmd_or_die("cargo")
        with pb.local.cwd(c.ROOT_DIR):
            invoke(cargo, "clean")

    ensure_dir(c.LLVM_BLD)
    ensure_dir(c.DEPS_DIR)
    git_ignore_dir(c.DEPS_DIR)

    download_llvm_sources()

    update_cmakelists()

    configure_and_build_llvm(args)

    build_transpiler(args)

    # print a helpful message on how to run c2rust bin directly
    c2rust_bin_path = 'target/debug/c2rust' if args.debug \
                      else 'target/release/c2rust'
    c2rust_bin_path = os.path.join(c.ROOT_DIR, c2rust_bin_path)
    # if os.path.curdir
    abs_curdir = os.path.abspath(os.path.curdir)
    common_path = os.path.commonpath([abs_curdir, c2rust_bin_path])
    if common_path != "/":
        c2rust_bin_path = "." + c2rust_bin_path[len(common_path):]
    print("success! you may now run", c2rust_bin_path)
Beispiel #17
0
def _main():
    setup_logging()
    logging.debug("args: %s", " ".join(sys.argv))

    # FIXME: allow env/cli override of LLVM_SRC, LLVM_VER, and LLVM_BLD
    # FIXME: check that cmake and ninja are installed
    # FIXME: option to build LLVM/Clang from master?

    # earlier plumbum versions are missing features such as TEE
    if pb.__version__ < c.MIN_PLUMBUM_VERSION:
        err = "locally installed version {} of plumbum is too old.\n" \
            .format(pb.__version__)
        err += "please upgrade plumbum to version {} or later." \
            .format(c.MIN_PLUMBUM_VERSION)
        die(err)

    args = _parse_args()
    if args.clean_all:
        logging.info("cleaning all dependencies and previous built files")
        shutil.rmtree(c.LLVM_SRC, ignore_errors=True)
        shutil.rmtree(c.LLVM_BLD, ignore_errors=True)
        shutil.rmtree(c.DEPS_DIR, ignore_errors=True)

    # prerequisites
    if not have_rust_toolchain(c.CUSTOM_RUST_NAME):
        die("missing rust toolchain: " + c.CUSTOM_RUST_NAME, errno.ENOENT)

    # clang 3.6.0 is known to work; 3.4.0 known to not work.
    ensure_clang_version([3, 6, 0])
    ensure_rustc_version(c.CUSTOM_RUST_RUSTC_VERSION)

    ensure_dir(c.LLVM_BLD)
    ensure_dir(c.DEPS_DIR)
    git_ignore_dir(c.DEPS_DIR)

    if on_linux():
        build_a_bear()
        if not os.path.isfile(c.BEAR_BIN):
            die("bear not found", errno.ENOENT)

    download_llvm_sources()

    integrate_ast_exporter()

    cc_db = install_tinycbor()

    configure_and_build_llvm(args)

    # NOTE: we're not doing this anymore since it is
    # faster and takes less space to simply pull the
    # prebuilt nightly binaries with rustup
    # download_and_build_custom_rustc(args)

    build_ast_importer(args.debug)

    if not on_mac() and args.sanity_test:
        test_ast_exporter(cc_db)
Beispiel #18
0
def main():
    setup_logging()

    # prerequisites
    if not have_rust_toolchain(c.CUSTOM_RUST_NAME):
        die("missing rust toolchain: " + c.CUSTOM_RUST_NAME, errno.ENOENT)

    # checkout_and_build_libclevrbuf()
    test_cross_checks()
Beispiel #19
0
def main():
    setup_logging()
    logging.debug("args: %s", " ".join(sys.argv))

    args = parse_args()
    c.update_args(args)
    transpile_files(args.commands_json, args.filter, args.extra_impo_args,
                    args.import_only, args.verbose, args.emit_build_files,
                    args.main, args.cross_checks, args.use_fakechecks,
                    args.cross_check_config, args.reloop_cfgs)

    logging.info("success")
Beispiel #20
0
def main():
    arg_parser = argparse.ArgumentParser()

    # Register common args.
    common.register_args(arg_parser)

    # Filters for targets.
    arg_parser.add_argument('--include_inactive', type=bool, default=True)
    arg_parser.add_argument('--include_normal', type=bool, default=False)
    arg_parser.add_argument('--include_honorable', type=bool, default=False)
    arg_parser.add_argument('--include_strong', type=bool, default=False)
    arg_parser.add_argument('--rank_min',
                            type=int,
                            required=True,
                            help='Min rank to send probes')
    arg_parser.add_argument('--rank_max',
                            type=int,
                            required=True,
                            help='Max rank to send probes')

    # Scan config.
    arg_parser.add_argument('--planet_num',
                            type=int,
                            default=0,
                            help='Which planet to send probes from')
    arg_parser.add_argument('--parallelism',
                            type=int,
                            required=True,
                            help='Num missions to send at a time')
    arg_parser.add_argument('-n',
                            '--max_scans',
                            type=int,
                            required=True,
                            help='Num of scans before exiting')
    arg_parser.add_argument('--systems_to_skip',
                            type=int,
                            default=0,
                            help='Skip the N closest systems')
    arg_parser.add_argument(
        '--galaxy',
        type=int,
        help='If present, scan this galaxy instead of the home galaxy')

    # Args for universe structure.
    arg_parser.add_argument('--num_galaxies', type=int, default=7)
    arg_parser.add_argument('--num_systems', type=int, default=499)

    args = arg_parser.parse_args()

    common.setup_logging(args)
    b = common.open_browser_and_connect(args)

    scan(b, args)
def main():

    common.setup_logging()

    calc_pe_ratio_ftm()
    calc_garp_ratio()
    
    calc_ranks()
    
    calc_magic_formula_trailing() 
    calc_magic_formula_future() 
    
    calc_magic_formula_ranks()
Beispiel #22
0
def main():

    common.setup_logging()

    calc_pe_ratio_ftm()
    calc_garp_ratio()

    calc_ranks()

    calc_magic_formula_trailing()
    calc_magic_formula_future()

    calc_magic_formula_ranks()
Beispiel #23
0
    def sanity_check(self, config, file_config, checks="all"):
        """
        There are several exceptions
        that can block the load
        - Missing main section
        - Missing criteria section for one of the specified candidates
        - Missing jobs in criteria section
        - Missing mandatory parameters
        - Missing password
        """
        conf_ok = True
        mandatory_parameters = [
            "distro_name",
            "distro_version",
            "release",
            "api_url",
            "log_file",
        ]
        if checks == "all":
            checks = ["logs", "parameters", "password", "criteria"]
        if 'logs' in checks:
            try:
                setup_logging('promoter', config['log_level'],
                              config['log_file'])
            except LoggingError:
                conf_ok = False
        for key, value in config.items():
            if key in mandatory_parameters and key not in file_config['main']:
                self.log.warning(
                    "Missing parameter in configuration file: %s."
                    " Using default value: %s"
                    "", key, value)
        if 'username' not in file_config['main']:
            self.log.warning(
                "Missing parameter in configuration file: "
                "username. Using default value: %s"
                "", config['dlrnauth_username'])
        if "password" in checks:
            if config['dlrnauth_password'] is None:
                self.log.error("No dlrnapi password found in env")
                conf_ok = False
        if "criteria" in checks:
            for target_name, job_list in \
                    config['promotion_criteria_map'].items():
                if not job_list:
                    self.log.error("No jobs in criteria for target %s",
                                   target_name)
                    conf_ok = False

        return conf_ok
Beispiel #24
0
def main():
    # TODO: implement rustfmt and diff actions from `run-test.sh`

    setup_logging()
    ensure_rustc_version(c.CUSTOM_RUST_RUSTC_VERSION)
    # TODO: update rustfmt version check once idiomize bitrot has been fixed
    # ensure_rustfmt_version()
    test_dir = os.path.join(c.RREF_DIR, "tests")
    assert os.path.isdir(test_dir), "test dir missing: " + test_dir
    idiomize_binary = os.path.join(c.RREF_DIR, "target/debug/idiomize")
    if not os.path.isfile(idiomize_binary):
        die("build idiomize binary first. expected: " + idiomize_binary)

    testcases = get_testcases(test_dir)
    run_tests(sorted(testcases))
def main():
    setup_logging()

    parser = ArgumentParser()
    parser.add_argument('channel_directory')
    parser.add_argument('local_path')
    args = parser.parse_args()

    local_path = Path(args.local_path)

    upload(
        args.channel_directory,
        local_path.name,
        local_path,
    )
Beispiel #26
0
def main():
    port = common.get_port()
    leader.spawn(port)

    common.setup_logging()
    logger.info("ssvim starting! :>")

    selector = selectors.DefaultSelector()
    client = Client(port, selector)
    client.run()

    while True:
        for key, mask in selector.select():
            callback = key.data
            callback(key.fileobj)
Beispiel #27
0
def setup_app(usage="%prog [options] ...",
              description=None,
              argstest=lambda args: len(args) == 0,
              logname='app',
              extra_options=None):
    u"""Функция, выполняющая основные настройки cli-приложения

    @param usage: строка использования для OptionParser
    @param description: описание приложения
    @param argstest: функция, проверяющая правильность неименованных аргументов
    @param logname: имя программы для сообщений в log
    """
    parser = common.default_option_parser()
    parser.usage = usage
    parser.description = description and re.sub(r"\s+", " ", description)
    if extra_options is not None:
        group = OptionGroup(parser, parser.expand_prog_name("%prog options"))
        extra_options(group)
        parser.add_option_group(group)
    (options, args) = parser.parse_args()

    setup_encoding(options.io_encoding)
    ExceptionHandler()

    if options.help_exit_codes:
        common.print_exit_codes()
        sys.exit(exit_codes.success)

    if len(inspect.getargspec(argstest)[0]) == 1:
        args_valid = argstest(args)
    else:
        args_valid = argstest(args, options)
    
    if not args_valid:
        parser.print_help()
        sys.exit(exit_codes.bad_usage)
        
    common.setup_logging(options)
    common.finalize_options(options)

    service = PilotService(options.pilot_url, build_ssl_ctx(options),
                           options.retries, options.connection_debug)

    if options.timeout is not None:
        signal.signal(signal.SIGALRM, sigalrm_handler)
        signal.alarm(options.timeout)

    return (options, args, logging.getLogger(logname), service)
def main():
    common.setup_logging()

    parser = argparse.ArgumentParser()
    args = parser.parse_args()

    for exchange in EXCHANGES:
        LOGGER.info('Getting exchange: %s' % exchange)

        for company in get_companies(exchange):
            data.set_company(
                symbol=company.symbol,
                name=company.name,
                sector=company.sector,
                industry=company.industry,
            )
Beispiel #29
0
def main():
    common.setup_logging("talk_asyncio_server", level=logging.DEBUG)

    loop = asyncio.get_event_loop()
    server_future = loop.create_server(QuestionHandler, config.SERVER_HOST,
                                       config.SERVER_PORT)
    loop.run_until_complete(server_future)

    logging.info("Starting server...")

    try:
        loop.run_forever()
    except KeyboardInterrupt:
        pass
    except Exception as ex:
        logging.error("Error in execution: %s", ex)
Beispiel #30
0
def main():
    common.setup_logging()

    parser = argparse.ArgumentParser()
    args = parser.parse_args()

    for exchange in EXCHANGES:
        LOGGER.info('Getting exchange: %s' % exchange)

        for company in get_companies(exchange):
            data.set_company(
                symbol=company.symbol,
                name=company.name,
                sector=company.sector,
                industry=company.industry,
            )
def main(argv):
    manual, name = parse_cmd_args(argv)
    auto = not manual
    client = None

    try:
        if auto:
            common.setup_logging()
        else:
            FORMAT = '%(filename)s: %(message)s'
            logging.basicConfig(level=logging.DEBUG, format=FORMAT,
                filename='client.log')
            logging.info('Logging started')
        client = Client(name, common.HOST, common.PORT, auto=auto)

        # join the server
        client.send_msg('[cjoin|{}]'.format(name.ljust(8)))
        while not client.msgs:
            client.recv_msgs()

        msg = client.get_msg()
        assert msg, 'No msg, self.msgs = '.format(client.msgs)
        while message.msg_type(msg) != 'sjoin':
            # logging.warn('Client {} received an unexpected message: {}'.format(client.name, msg))
            msg = client.get_msg()
        
        # validate msg
        name = message.fields(msg)[0].strip()
        client.player = common.Player(name)
        logging.info('Client {} successfully joined with name {}'.format(client.name, name))
        if client.gui:
            client.gui.print_msg("Succesfully joined server with name {}".format(
                client.player.name))

        client.game_loop()

        if client.gui: client.gui.print_msg("Quitting, press any key to confirm")

        client.disconnect()

        if client.gui:
            client.gui.curses_thread.join()
        logging.info("Client %s quitting", client.name)
    except Exception as ex:
        logging.info('Client caught exception: %s', ex)
        raise ex
    return
Beispiel #32
0
def main():
    setup_logging('ddns2.log')
    config = get_config('router', 'ddns')

    logging.info("starting ddns")

    ddns_config = config['DDNS_CONFIG']
    resource4 = ddns_config['resource4']
    resource6 = ddns_config['resource6']
    domain = ddns_config['domain']
    pppoe_interface = ddns_config['pppoe_interface']
    ipv6_pool_name = ddns_config['ipv6_pool_name']

    logging.info("loaded config, connecting to router")

    router_api = RouterOsApiPool(**(config['ROUTER_CONFIG'])).get_api()

    while True:
        try:
            for local_ip, resource in ((get_local_ipv4(router_api,
                                                       pppoe_interface),
                                        resource4),
                                       (get_local_ipv6(router_api,
                                                       ipv6_pool_name),
                                        resource6)):
                current_ip, record_name = get_current_record_ip(
                    domain, resource)
                if local_ip is not None and current_ip != local_ip:
                    logging.info(
                        f"updating record {record_name} from {current_ip} to {local_ip}"
                    )
                    update_record_ip(domain, resource, local_ip)
            heartbeat()
        except RouterOsApiConnectionError:
            logging.warning('resetting router api')
            router_api = RouterOsApiPool(
                **(get_config('router')['ROUTER_CONFIG'])).get_api()
        except JSONDecodeError:
            logging.exception('linode JSONDecodeError')
        except (requests.exceptions.RequestException, OSError):
            logging.exception('RequestException issues')
        except:
            logging.exception("error")
            raise
        finally:
            sleep(30)
Beispiel #33
0
def main():
    # TODO: implement rustfmt and diff actions from `run-test.sh`

    setup_logging()
    # NOTE: it seems safe to disable this check since we now
    # that we use a rust-toolchain file for rustc versioning.
    # ensure_rustc_version(c.CUSTOM_RUST_RUSTC_VERSION)
    # TODO: update rustfmt version check once c2rust-refactor bitrot has been fixed
    # ensure_rustfmt_version()
    test_dir = os.path.join(c.RREF_DIR, "tests")
    assert os.path.isdir(test_dir), "test dir missing: " + test_dir
    refactor_binary = os.path.join(c.ROOT_DIR, "target/debug/c2rust-refactor")
    if not os.path.isfile(refactor_binary):
        die("build refactor binary first. expected: " + refactor_binary)

    testcases = get_testcases(test_dir)
    run_tests(sorted(testcases))
def main():
    common.setup_logging("talk_asyncio_server", level=logging.DEBUG)

    loop = asyncio.get_event_loop()
    server_future = loop.create_server(
        QuestionHandler, config.SERVER_HOST, config.SERVER_PORT
    )
    loop.run_until_complete(server_future)

    logging.info("Starting server...")

    try:
        loop.run_forever()
    except KeyboardInterrupt:
        pass
    except Exception as ex:
        logging.error("Error in execution: %s", ex)
Beispiel #35
0
def main():
    logger = common.setup_logging()
    # What are we going to do?
    logger.info(__doc__)

    # Determine some arguments we need for api calls
    base_url = common.get_base_url()
    token = common.get_token_from_env()
    if not token:
        logger.fatal("ATHERA_API_TOKEN not set in env")
        return 1

    # Show the token expiry
    token_helper = common.TokenHelper(token)
    logger.info("Token expires {}".format(token_helper.get_expiry_string()))

    # API calls all need an active group to define the 'Context' of the request. We only care about the top-level groups, orgs. Ask for user input.
    selector = common.GroupSelector(logger, base_url, token)
    group_id = selector.get_org()
    if not group_id:
        return 2

    logger.info("Selected {}".format(group_id))

    # Feed this into the class which will query the app_families endpoint
    searcher = AppSearcher(logger, base_url, group_id, token)

    # Fetch the search term
    target = input("-- Enter the app name (or part of) to search for: ")

    # Run the search
    families = searcher.search_families(target)

    if families:
        # List comprehension to filter bundled. Bundled apps are legacy and should not be used
        result = list(filter(lambda f: 'Bundled' not in f['name'], families))

        if len(result) == 0:
            logger.info("-- No apps found (bundled apps are ignored)")

        # Pretty-print the output
        for f in result:
            logger.info("{:50} {}".format(f['name'], f['id']))
            if 'apps' not in f:
                logger.error("Missing apps data")
                continue

            apps = f['apps']
            interactive_app = apps[
                'interactive'] if 'interactive' in apps else None
            compute_app = apps['compute'] if 'compute' in apps else None
            if interactive_app:
                for k, v in interactive_app.items():
                    logger.info("-- interactive {:35} {}".format(k, v))
            if compute_app:
                for k, v in compute_app.items():
                    logger.info("-- compute     {:35} {}".format(k, v))
Beispiel #36
0
def _main():
    setup_logging()
    logging.debug("args: %s", " ".join(sys.argv))

    # FIXME: allow env/cli override of LLVM_SRC and LLVM_BLD
    # FIXME: check that cmake and ninja are installed
    # FIXME: option to build LLVM/Clang from master?

    # earlier plumbum versions are missing features such as TEE
    if pb.__version__ < c.MIN_PLUMBUM_VERSION:
        err = "locally installed version {} of plumbum is too old.\n" \
            .format(pb.__version__)
        err += "please upgrade plumbum to version {} or later." \
            .format(c.MIN_PLUMBUM_VERSION)
        die(err)

    args = _parse_args()

    # prerequisites
    if not have_rust_toolchain(c.CUSTOM_RUST_NAME):
        die("missing rust toolchain: " + c.CUSTOM_RUST_NAME, errno.ENOENT)

    # clang 3.6.0 is known to work; 3.4.0 known to not work.
    ensure_clang_version([3, 6, 0])

    if args.clean_all:
        logging.info("cleaning all dependencies and previous built files")
        shutil.rmtree(c.LLVM_SRC, ignore_errors=True)
        shutil.rmtree(c.LLVM_BLD, ignore_errors=True)
        shutil.rmtree(c.BUILD_DIR, ignore_errors=True)
        shutil.rmtree(c.AST_EXPO_PRJ_DIR, ignore_errors=True)
        cargo = get_cmd_or_die("cargo")
        with pb.local.cwd(c.ROOT_DIR):
            invoke(cargo, "clean")

    ensure_dir(c.LLVM_BLD)
    ensure_dir(c.BUILD_DIR)
    git_ignore_dir(c.BUILD_DIR)

    download_llvm_sources()
    update_cmakelists()
    configure_and_build_llvm(args)
    build_transpiler(args)
    print_success_msg(args)
Beispiel #37
0
def main():
    common.setup_logging(is_leader=True)

    port = common.get_port()
    selector = selectors.DefaultSelector()

    leader = Leader(port, selector)
    try:
        leader.run()
    except Exception as e:
        # The server is already active.
        return

    while True:
        if not leader.clients and leader.had_clients:
            break
        for key, mask in selector.select():
            callback = key.data
            callback(key.fileobj)
Beispiel #38
0
def main():
    common.setup_logging(is_leader=True)

    port = common.get_port()
    selector = selectors.DefaultSelector()

    leader = Leader(port, selector)
    try:
        leader.run()
    except Exception as e:
        # The server is already active.
        return

    while True:
        if not leader.clients and leader.had_clients:
            break
        for key, mask in selector.select():
            callback = key.data
            callback(key.fileobj)
def main():
    common.setup_logging()

    parser = argparse.ArgumentParser()
    parser.add_argument('--sleep-time', dest='sleep_time', type=float, default=1)

    subparsers = parser.add_subparsers()

    parser_yahoo_finance = subparsers.add_parser('yahoo_finance')
    parser_yahoo_finance.set_defaults(func=yahoo_finance)
    
    parser_quandl = subparsers.add_parser('quandl')
    parser_quandl.set_defaults(func=quandl)


    parser_yahoo_roa = subparsers.add_parser('yahoo_roa')
    parser_yahoo_roa.set_defaults(func=yahoo_roa)
    
    args = parser.parse_args()
    args.func(sleep_time=args.sleep_time)
Beispiel #40
0
def main():
    args = docopt(__doc__)
    setup_logging(logging.DEBUG if args['--debug'] else logging.WARNING)

    ds = [date(y,m,1) for y in [2015,2016] for m in range(1,13)]
    ds = {d.strftime('%b%y').lower(): 'test_samples/vyp-{}-en.pdf'.format(d.strftime('%Y-%m')) for d in ds}

    filename = args['<file>']

    if filename in ds:
        filename = ds[filename]

    if filename.lower().endswith('.zip'):
        pdfname = extract_pdf_from_zip(filename)
    else:
        pdfname = filename

    try:
        #text = open(filename, 'r').read()
        text = load_pdf_file(pdfname)
    except FileNotFoundError:
        print("File not found: {}".format(pdfname))
        sys.exit(1)

    if filename.lower().endswith('.zip'):
        os.remove(pdfname)

    ie = IncomeExtractor(text)

    if args['extract']:
        result = ie.extract_amounts()
        pretty(result)
    elif args['gnucash']:
        ie.gnucash()
    elif args['verify']:
        iv = IncomeVerificator(ie)
        iv.verify(assumptions=args['--assumptions'])
Beispiel #41
0
Datei: deci.py Projekt: axtl/deci
def main(argv):
    # setup and parse arguments
    parser = argparse.ArgumentParser(description=DESCRIPTION)
    parser.add_argument('-i', '--input',
        required=True,
        action='store',
        metavar='IN_DIR',
        help='directory to stripe across output directories')
    parser.add_argument('outputs',
        metavar='OUT_DIR',
        type=str,
        nargs='+',
        help='a directory to strip TO')
    parser.add_argument('-k', '--shares',
        required=True,
        action='store',
        type=int,
        help='the number of shares to use for striping (must be smaller than\
            the number of output directories)')
    parser.add_argument('-f', '--force',
        action='store_true',
        help='overwrite existing files in output directories')
    parser.add_argument('-v', '--verbose',
        action='store_true',
        help='detailed output of operations')
    parser.add_argument('-d', '--debug',
        action='store_true',
        help='debug information')

    # create namespace with args
    ns = parser.parse_args(args=argv)

    common.setup_logging(ns)
    _verify_args(ns)
    _make_output_dirs(ns)
    _fec_encode(ns)
Beispiel #42
0
def main(argv):
    global FilesSyncQueue
    global logger
    global foreground

    # parse argv
    parse_argv(argv, globals())

    # daemonize
    daemonize(SYNCER_PID, foreground)

    # initialize logging
    logger = setup_logging(argv[0], CONSOLE_LOG_LEVEL, FILE_LOG_LEVEL,
            LOG_FORMAT, SYNCER_LOG, DATE_FORMAT)

    # sanity check
    if not os.path.isdir(WATCH_DIR):
        logger.critical('Watched directory %s does not exist. '
                'Bailing out.' % WATCH_DIR)
        sys.exit(1)

    # if FilesSyncQueue is nonexistant or damaged, truncate it
    try:
        FilesSyncQueue = read_atomic(FILES_SYNC_FILE)
    except (IOError, AttributeError, EOFError):
        logger.warn('Unusable file sync queue file %s. Recreating.' %
                FILES_SYNC_FILE)
        pass
    write_atomic(FILES_SYNC_FILE, FilesSyncQueue)

    # start main loop
    logger.debug('File sync service starting... Entering wait loop.')
    while True:
        while decisionlogic():
            pass
        time.sleep(SLEEP_TIME)
        print("Running {} sec automated game".format(TEST_DURATION))
        for i in range(TEST_DURATION):
            time.sleep(1)
            print(i+1)
        # cleanup
        server.stop()
        server_thread.join()
        print("Shutdown server")
        for thread in client_threads:
            thread.join(timeout=1)
        print("Shutdown clients")


if __name__ == '__main__':
    GUI = True
    common.setup_logging(to_file=GUI)
    # unittest.main()   # Uncomment to unit-test utility functions
    h = '192.168.10.100'
    lh = 'localhost'
    if not GUI:
        # speed test
        print("Performing automated test to see if game crashes")
        client.AUTOPLAY_PAUSE = 0
        test_game(lh, 60, gui=GUI)
    else:
        print("Starting GUI to test user interaction")
        time.sleep(.5)
        # gui test
        client.AUTOPLAY_PAUSE = .1
        test_game(lh, 40, gui=GUI)
    logging.info('Logging finished')
Beispiel #44
0
from pyreto.roth_erev import VariantRothErev
from pyreto.util import ManualNormalExplorer

from pybrain.rl.explorers import BoltzmannExplorer #@UnusedImport
from pybrain.rl.learners import Q, QLambda, SARSA #@UnusedImport
from pybrain.rl.learners import ENAC, Reinforce #@UnusedImport

from common import \
    get_case6ww, get_case6ww2, setup_logging, get_discrete_task_agent, \
    get_zero_task_agent, save_results, run_experiment, \
    get_continuous_task_agent, get_neg_one_task_agent

from plot import plot5_1, plot5_2, plot5_3, plot5_4 #@UnusedImport


setup_logging()

decommit = False
auctionType = DISCRIMINATIVE #FIRST_PRICE
cap = 100.0
#profile = [1.0, 1.0]

markups = (0, 10, 20, 30)
withholds = None
markupMax = 30.0
withholdMax = None

nOffer = 1
nStates = 1

Beispiel #45
0
    examples_as_indices = True
    learn_embeddings = False
    hidden_sizes = [300]
    hidden_activation = 'hard_sigmoid' # 'relu'
    l2_lambda = 1e-5 # 1e-4
    dropout = 0.5
    batch_size = 50
    epochs = 10
    learning_rate = 0.001
    loss = 'categorical_crossentropy' # 'mse'

config = settings.from_cli(['datadir', 'wordvecs'], Defaults)
optimizer = Adam(config.learning_rate)

data_name = path.basename(config.datadir.rstrip('/'))
common.setup_logging(data_name)
settings.log_with(config, info)

# Data

data = input_data.read_data_sets(config.datadir, config.wordvecs, config)
embedding = common.word_to_vector_to_matrix(config.word_to_vector)

if config.max_train_examples and len(data.train) > config.max_train_examples:
    warn('cropping train data from %d to %d' % (len(data.train),
                                                config.max_train_examples))
    data.train.crop(config.max_train_examples)

# Model

model = Sequential()
    except getopt.GetoptError as ex:
        print(ex.msg)
        usage()
        sys.exit()
    else:
        if minplayers < 3:
            minplayers = 3
        if lobbytimeout < 0:
            lobbytimeout = 15
        if turntimeout < 1:
            turntimeout = 1
        return turntimeout, lobbytimeout, minplayers

def main(argv):
    global TURNTIMEOUT
    global LOBBYTIMEOUT
    global MINPLAYERS
    TURNTIMEOUT, LOBBYTIMEOUT, MINPLAYERS = parse_cmd_args(argv)

    start_server()
    logging.info('Game server started')

    start_game()

    logging.info('Game server shutdown')


if __name__ == '__main__':
    common.setup_logging()
    main(sys.argv[1:])
                "who forget to deliver, and some bad examiners who forget to " \
                "publish and correct.")
    p.add_option("--deadline", dest="deadline",
            default=None,
            help="Deadline. If this is specified, --deadline-profile is "\
                "ignored. Format: YYYY-MM-DD. Time will always be 00:00.")
    p.add_option("--pubtime-diff", dest="pubtime_diff",
            default=14, type='int',
            help="Number of days between publishing time and deadline. "\
                "Defaults to 14.")


    add_quiet_opt(p)
    add_debug_opt(p)
    (opt, args) = p.parse_args()
    setup_logging(opt)

    # Django must be imported after setting DJANGO_SETTINGS_MODULE
    set_django_settings_module(opt)
    load_devilry_plugins()
    from django.contrib.auth.models import User
    from devilry.apps.core.models import Delivery
    from devilry.apps.core.testhelpers import create_from_path
    from devilry.apps.core.gradeplugin import registry

    def exit_help():
        p.print_help()
        raise SystemExit()
    if len(args) != 1:
        exit_help()
    setup_logging(opt)
Beispiel #48
0
    hidden_sizes = [300]
    hidden_activation = 'hard_sigmoid' # 'relu'
    batch_size = 50
    epochs = 10
    loss = 'categorical_crossentropy' # 'mse'
    verbosity = 1    # 0=quiet, 1=progress bar, 2=one line per epoch
    iobes = False     # Map tags to IOBES on input
    token_level_eval = False    # Token-level eval even if IOB-like tagging
    optimizer = 'adam' # 'sgd'
    test = False

config = settings.from_cli(['datadir', 'wordvecs'], Defaults)
optimizer = optimizers.get(config.optimizer)

output_name = 'mlp--' + path.basename(config.datadir.rstrip('/'))
common.setup_logging(output_name)
settings.log_with(config, info)

# Data

data = input_data.read_data_sets(config.datadir, config.wordvecs, config)
embedding = common.word_to_vector_to_matrix(config.word_to_vector)

if config.max_train_examples and len(data.train) > config.max_train_examples:
    warn('cropping train data from %d to %d' % (len(data.train),
                                                config.max_train_examples))
    data.train.crop(config.max_train_examples)

# Model

model = Sequential()
    def test_setup_logging_log_level(self):
        logger = Mock()
        common.setup_logging(logger, '/dev/null',
                             '%(name)s: %(message)s', 'info')

        logger.setLevel.assert_called_with(20)
Beispiel #50
0
def main(argv):
    global FilesActionMap
    global FilesHashMap
    global FilesSyncQueue
    global logger
    global foreground

    # parse argv
    parse_argv(argv, globals())

    # daemonize
    daemonize(SUMMER_PID, foreground)

    # initialize logging
    logger = setup_logging(argv[0], CONSOLE_LOG_LEVEL, FILE_LOG_LEVEL,
            LOG_FORMAT, SUMMER_LOG, DATE_FORMAT)

    # sanity check
    if not os.path.isdir(WATCH_DIR):
        logger.critical('Watched directory %s does not exist. '
                'Bailing out.' % WATCH_DIR)
        sys.exit(1)

    # if FilesActionMap is nonexistant or damaged, truncate it
    try:
        FilesActionMap = read_atomic(FILES_STATUS_FILE)
    except (IOError, AttributeError, EOFError):
        logger.warn('Unusable action map status file %s. Recreating.' %
                FILES_STATUS_FILE)
        pass
    write_atomic(FILES_STATUS_FILE, FilesActionMap)

    # if FilesHashMap is nonexistant or damaged, truncate it
    try:
        FilesHashMap = read_atomic(FILES_HASH_FILE)
    except (IOError, AttributeError, EOFError):
        logger.warn('Unusable hash map file %s. Recreating.' %
                FILES_HASH_FILE)
        pass
    write_atomic(FILES_HASH_FILE, FilesHashMap)

    # if FilesSyncQueue is nonexistant or damaged, truncate it
    try:
        FilesSyncQueue = read_atomic(FILES_SYNC_FILE)
    except (IOError, AttributeError, EOFError):
        logger.warn('Unusable sync queue file %s. Recreating.' %
                FILES_SYNC_FILE)
        pass
    write_atomic(FILES_SYNC_FILE, FilesSyncQueue)

    # clear non-existant files from checksum map, most probably due to
    # changes when monitor was inactive
    for path in FilesHashMap.keys():
        if not os.path.exists(path):
            logger.warn('File %s is in hash map, but not on disk. '
                    'Deleting from map and trying to delete remotely.' %
                    path)
            # remove from hash file
            FilesHashMap = read_atomic(FILES_HASH_FILE)
            del FilesHashMap[path]
            write_atomic(FILES_HASH_FILE, FilesHashMap)
            # enqueue to remove remotely
            FilesSyncQueue = read_atomic(FILES_SYNC_FILE)
            FilesSyncQueue.append((path, 'remove', 0))
            write_atomic(FILES_SYNC_FILE, FilesSyncQueue)

    # start main loop
    logger.debug('Checksumming service starting... Entering wait loop.')
    while True:
        while decisionlogic():
            pass
        time.sleep(SLEEP_TIME)
        self.client_id = client_id


def check_stop_client():
    global answers_received

    if answers_received == clients_no:
        logging.info("stopping client reactor")
        reactor.stop()


def run_client():
    clients = [None] * clients_no

    for i in range(clients_no):
        clients[i] = CuriousClientFactory()
        clients[i].setId(i)
        reactor.connectTCP(config.SERVER_HOST, config.SERVER_PORT,
                           clients[i])

    lc = task.LoopingCall(check_stop_client)
    lc.start(2)

    reactor.run()



if __name__ == "__main__":
    common.setup_logging("talk_twisted_client", level=logging.DEBUG)
    run_client()
Beispiel #52
0
def main(argv):
    global FilesActionMap
    global logger
    global foreground

    # parse argv
    parse_argv(argv, globals())

    # daemonize
    daemonize(MONITOR_PID, foreground)

    # initialize logging
    logger = setup_logging(argv[0], CONSOLE_LOG_LEVEL, FILE_LOG_LEVEL,
            LOG_FORMAT, MONITOR_LOG, DATE_FORMAT)

    # sanity check
    if not os.path.isdir(WATCH_DIR):
        logger.critical('Watched directory %s does not exist. '
                'Bailing out.' % WATCH_DIR)
        sys.exit(1)

    # if FilesActionMap is nonexistant or damaged, truncate it
    try:
        FilesActionMap = read_atomic(FILES_STATUS_FILE)
    except (IOError, AttributeError, EOFError):
        logger.warn('Unusable action map status file %s. Recreating.' %
                FILES_STATUS_FILE)
        pass
    write_atomic(FILES_STATUS_FILE, FilesActionMap)

    # initial recursive walk (initial events)
    for root, dirs, files in os.walk(WATCH_DIR):
        for name in files:
            path = os.path.join(root, name)
            FilesActionMap[path] = ('created', time.time())
        for name in dirs:
            path = os.path.join(root, name)
            FilesActionMap[path] = ('created_dir', time.time())
    write_atomic(FILES_STATUS_FILE, FilesActionMap)
    logger.debug('Initial events %s. Commiting.' % FilesActionMap)

    # start inotify monitor
    watch_manager = pyinotify.WatchManager()
    handler = ProcessEventHandler()
    notifier = pyinotify.Notifier(watch_manager, default_proc_fun=handler,
            read_freq=SLEEP_TIME)

    # try coalescing events if possible
    try:
        notifier.coalesce_events()
        logger.debug('Successfuly enabled events coalescing. Good.')
    except AttributeError:
        pass

    # catch only create/delete/modify/attrib events; don't monitor
    # IN_MODIFY, instead use IN_CLOSE_WRITE when file has been written to
    # and finally closed; and monitor IN_MOVED_TO when using temporary
    # files for atomicity as well as IN_MOVED_FROM when file is moved from
    # watched path
    event_mask = pyinotify.IN_CREATE|pyinotify.IN_DELETE|\
            pyinotify.IN_CLOSE_WRITE|pyinotify.IN_ATTRIB|\
            pyinotify.IN_MOVED_TO|pyinotify.IN_MOVED_FROM|\
            pyinotify.IN_ISDIR|pyinotify.IN_UNMOUNT|\
            pyinotify.IN_Q_OVERFLOW
    watch_manager.add_watch(WATCH_DIR, event_mask, rec=True,
            auto_add=True)

    # enter loop
    logger.debug('Inotify handler starting... Entering notify loop.')
    notifier.loop()
Beispiel #53
0
    print(prettify(str(final_smv)))

    return 0


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Transforms spec SMV into valid SMV')

    parser.add_argument('-v', '--verbose', action='count', help='verbose output', default=-1)
    parser.add_argument('smv', metavar='smv',
                        type=argparse.FileType(),
                        help='input SMV file')

    args = parser.parse_args()

    logger = setup_logging(__name__, verbose_level=args.verbose)
    logger.info("run with args:%s", args)

    try:
        exit(main(args.smv.read().splitlines(),
                  os.path.dirname(args.smv.name)))
    except KeyboardInterrupt:
        print()  # empty line, so that command line prompt is on a new one
    except SystemExit:
        pass
    except BaseException as e:
        from logging import INFO
        # print exception in an appropriate format
        logger.setLevel(INFO)
        logger.exception(e)
Beispiel #54
0
    def delete(self, jid):
        """ Delete a specific job, given by the job ID from the queue. """
        return self.jobqueue.delete(jid)
    
    def get_queue(self):
        """ Returns the entire queue as a list of jobs. Each job is represented by a dictionary. """
        return self.jobqueue.as_dict()

    def abort(self, jid):
        """ Aborts a job. Do nothing, if the job is not running. """
        self.jobqueue.abort(jid)
        return 0

    def reprio(self, jid, new_prio):
        """ Change the priority of the given job. """
        return self.jobqueue.reprio(jid, new_prio)

def add_options():
    """ Factory function for the OptionParser for command line parsing. """
    parser = optparse.OptionParser(usage="%prog [options]")
    parser.add_option("-c", "--config", help="Add/overwrite configuration options. Syntax: 'section.key=value,...'")
    return parser

    
if __name__ == "__main__":
    common.setup_logging("~/.flof/flofserver.log")
    oparser = add_options()
    (options, args) = oparser.parse_args()
    configuration = Configuration(cmd_config = options.config)
    FlofServer(configuration)
Beispiel #55
0
import logging
from tcpsender import *
from tcpsender_args import *
from  common import timer_dec, setup_logging, press_any_key


@timer_dec
def run(log):
    args = get_args(log)
    tcp_sender_run(log, args)

if __name__ == '__main__':
    log = object
    try:
        log = setup_logging()
        log = logging.getLogger(__name__)

        run(log)
        log.debug(__name__ + 'finish!')
    except Exception as inst:
        log.exception(inst)
        #print(inst)
    else:
        log.debug('TCPSender successful done!')
        pass
    finally:
        log.debug('Finish!')
        #press_any_key()
        pass
Beispiel #56
0
    # otherwise make sure that a Marathon URL was specified
    else:
        if args.marathon is None:
            arg_parser.error('argument --marathon/-m is required')
        if args.marathon_lb is None:
            arg_parser.error('argument --marathon-lb/-l is required')
        if args.json is None:
            arg_parser.error('argument --json/-j is required')

    return args


if __name__ == '__main__':
    args = process_arguments()
    set_request_retries()
    setup_logging(logger, args.syslog_socket, args.log_format, args.log_level)

    try:
        if do_zdd(args):
            sys.exit(0)
        else:
            sys.exit(1)
    except Exception as e:
        if hasattr(e, 'zdd_exit_status'):
            if hasattr(e, 'error'):
                logger.exception(str(e.error))
            else:
                logger.exception(traceback.print_exc())
            sys.exit(e.zdd_exit_status)
        else:
            # For Unknown Exceptions
        defferedAnswer = self.factory.knowledge_engine.getAnswer(question)
        defferedAnswer.addCallback(self.sendAnswer, client_id)

    def sendAnswer(self, answer, client_id):
        logging.debug("sending answer to client %s: [%s]",
                      client_id, answer)
        self.transport.write(answer)
        self.transport.loseConnection()


class KnowledgeFactory(protocol.ServerFactory):
    protocol = KnowledgeProtocol
    knowledge_engine = knowledge_engine.KnowledgeEngine()


def start_server():
    factory = KnowledgeFactory()

    endpoints.serverFromString(
        reactor, "tcp:%s" % config.SERVER_PORT).listen(factory)

    logging.info("server started on port %d", config.SERVER_PORT)
    reactor.run()
    reactor.callLater(100, reactor.stop)
    logging.info("server execution ended")


if __name__ == "__main__":
    common.setup_logging("talk_twisted_server", level=logging.DEBUG)
    start_server()