Example #1
0
File: gron.py Project: hidnoiz/gron
def setup_logger(args):
    options = {'debug': False, 'silent': False}
    if args.debug:
        options['debug'] = True
    if args.silent:
        options['silent'] = True
    logger_module.setup(options=options)
    global logger
    logger = logging.getLogger('main')
Example #2
0
def run(sc):
    ts = datetime.datetime.now().replace(microsecond=0).isoformat('_')
    sc.sim_time_start = ts
    logger.setup(sc)
    store_scenario(sc)
    INFO('Init (%s)' % ts)

    INFO('Fitness (minimal): %.6f' % sc.sol_fitness_min)
    INFO('Fitness (maximal): %.6f' % sc.sol_fitness_max)
    INFO('Fitness (average): %.6f' % sc.sol_fitness_avg)

    INFO('Creating %d agents' % sc.opt_m)
    agents = dict()
    for aid, search_space, initial_value in zip(sc.agent_ids,
                                                sc.agent_search_spaces,
                                                sc.agent_initial_values):
        agents[aid] = Agent(aid, search_space, initial_value)

    INFO('Connecting agents')
    for a, neighbors in sc.network.items():
        for n in neighbors:
            # Consistency check
            assert a != n, 'cannot add myself as neighbor!'
            # Add neighbor
            DEBUG('', 'Connecting', a, '->', n)
            if n not in agents[a].neighbors:
                agents[a].neighbors[n] = agents[n]
            else:
                WARNING(n, 'is already neighbor of', a)

    INFO('Starting simulation')
    mas = Mas(sc, agents)
    logger.set_mas(mas)
    stats = Stats(sc, agents)
    stats.eval(mas.current_time)
    AGENT(mas.aid, 'Notifying initial agent (%s)' % sc.sim_initial_agent)
    kappa = Working_Memory(sc.objective, dict(),
                           Solution_Candidate(None, dict(), float('-inf')))
    msg = Message(mas.aid, sc.sim_initial_agent, kappa)
    mas.msg(msg)
    while mas.is_active():
        mas.step()
        stats.eval(mas.current_time)
    if not stats.is_converged():
        ERROR('convergence not reached!')

    ts = datetime.datetime.now().replace(microsecond=0).isoformat('_')
    INFO('End (%s)' % ts)

    # Store scenario again, this time with simulation result
    store_scenario(sc, overwrite=True)

    return stats
Example #3
0
def run(sc):
    ts = datetime.datetime.now().replace(microsecond=0).isoformat('_')
    sc.sim_time_start = ts
    logger.setup(sc)
    store_scenario(sc)
    INFO('Init (%s)' % ts)

    INFO('Fitness (minimal): %.6f' % sc.sol_fitness_min)
    INFO('Fitness (maximal): %.6f' % sc.sol_fitness_max)
    INFO('Fitness (average): %.6f' % sc.sol_fitness_avg)

    INFO('Creating %d agents' % sc.opt_m)
    agents = dict()
    for aid, search_space, initial_value in zip(
            sc.agent_ids, sc.agent_search_spaces, sc.agent_initial_values):
        agents[aid] = Agent(aid, search_space, initial_value)

    INFO('Connecting agents')
    for a, neighbors in sc.network.items():
        for n in neighbors:
            # Consistency check
            assert a != n, 'cannot add myself as neighbor!'
            # Add neighbor
            DEBUG('', 'Connecting', a, '->', n)
            if n not in agents[a].neighbors:
                agents[a].neighbors[n] = agents[n]
            else:
                WARNING(n, 'is already neighbor of', a)

    INFO('Starting simulation')
    mas = Mas(sc, agents)
    logger.set_mas(mas)
    stats = Stats(sc, agents)
    stats.eval(mas.current_time)
    AGENT(mas.aid, 'Notifying initial agent (%s)' % sc.sim_initial_agent)
    kappa = Working_Memory(sc.objective, dict(),
                           Solution_Candidate(None, dict(), float('-inf')))
    msg = Message(mas.aid, sc.sim_initial_agent, kappa)
    mas.msg(msg)
    while mas.is_active():
        mas.step()
        stats.eval(mas.current_time)
    if not stats.is_converged():
        ERROR('convergence not reached!')

    ts = datetime.datetime.now().replace(microsecond=0).isoformat('_')
    INFO('End (%s)' % ts)

    # Store scenario again, this time with simulation result
    store_scenario(sc, overwrite=True)

    return stats
Example #4
0
def main(args):
    template_loader = JinjaLoader(
        os.path.join(os.path.dirname(__file__), 'templates/'))
    application = BusinessIssues(get(),
                                 template_loader=template_loader,
                                 static_path=os.path.join(
                                     os.path.dirname(__file__), "static"),
                                 xsrf_cookies=False,
                                 debug=args.debug)
    server = HTTPServer(application, xheaders=True)
    server.listen(args.port, '0.0.0.0')
    logger.setup()
    install_tornado_shutdown_handler(tornado.ioloop.IOLoop.instance(), server,
                                     logging.getLogger())
    logging.info('start service at ' + time.ctime() + '\n')
    tornado.ioloop.IOLoop.current().start()
Example #5
0
def main():
        global requestSize
        global serverWithoutFile

        appPath = os.path.dirname(os.path.abspath(__file__))
        
        logger.setup(appPath + "/Logs/log.txt")
        #logger.logt("main")
	
        # дескриптор файла (что пришел по сокету с сервера с запросом)
        fd = int(sys.argv[1])
        print("--------")

        # запрос
        requestData = str(os.read(fd, requestSize)) #os.read(fd, 2048).decode('utf-8') #str(os.read(fd, 2048))

        # парсинг запроса
        request = requester.getHeaders(requestData)
        requester.parseParams(request)

        #logger.logt("request url:"+request.getHead("URL"))

        # ответl
        response = zetypes.ZeResponse();
        response.setAppPath(appPath)	# root path (from main)
        response.setRequest(request)

        # content
        respDeprecatedVar = responser.createResponseHtml(serverWithoutFile, response, request)
        content = response.buildHttpResponse()


	#with open(fd, 'w') as outf:
	#	outf.write(content)
        try:
                with open(fd, 'w') as outf:
                        outf.write(content)
        except IOError as e:
                if e.errno == errno.EPIPE:
                        print("pipe err")



        #logger.logt("end ok")
        #logger.log("-------------")
        print("ok")
Example #6
0
    def __init__(self,
                 cluster=None,
                 ipaddress=None,
                 username=None,
                 password=None,
                 source_dir=None,
                 target_dir=None,
                 copyback_files=None,
                 copyback_dir=None,
                 command=None,
                 logfile=None,
                 start_clean=True,
                 cleanup=True,
                 timeout=None,
                 all=None,
                 source_files=None,
                 apikey=None):

        self.cluster = cluster
        if isinstance(cluster, str):
            self.cluster = picluster.PiBoardTable(cluster, apikey)
        self.ipaddress = ipaddress
        self.username = username
        self.password = password
        self.source_dir = source_dir
        self.source_files = source_files
        self.target_dir = target_dir
        self.copyback_files = copyback_files
        self.copyback_dir = copyback_dir
        self.command = command
        self.start_clean = start_clean
        self.cleanup = cleanup
        self.logfile = logfile
        self.timeout = timeout

        self.all = all
        self.machine = None
        self.ssh = None
        self.buffer = None

        # global logger is hooked up to parent modules by module name and this
        # logger can see all the remote command output from all commands, which
        # will be formatted differently with "ThreadId: " prefix so user can
        # make sense of the combined output when remote commands are running in
        # parallel.
        if self.logfile:
            self.logger = logger.setup(self.logfile)
        else:
            self.logger = logger.get()

        if not cluster and not ipaddress:
            raise Exception("Error: required ipaddress or cluster or both")

        # Sanity-check parameters
        if self.target_dir and os.path.pathsep in self.target_dir:
            raise Exception(
                "Error: multilevel target directories not supported")
Example #7
0
def main():
    """ MAIN APP """
    master_config = get_start_arguments()
    logger.setup(master_config['log_file'], 'w')
    config.init(master_config['config_file'])
    config.set_section('GLOBAL')
    _version = config.key['version']
    _port = config.key['port']
    logger.level(config.key['log_level'])
    app = manager.App(camera_data=config.set_section('CAMERA'),
                      display_data=config.set_section('DISPLAY'),
                      led_data=config.set_section('LED'),
                      buzzer_data=config.set_section('BUZZER'),
                      random_data=config.set_section('STATS'),
                      version=_version,
                      port=get_mac_address(_port))
    while app.run():
        pass
    app.close()
Example #8
0
    def __init__(self, address, interface, timeout=None,
                       encoding=None, logging=None):
        if (not isinstance(interface, type) or
            not issubclass(interface, JsonRpcIface)):
            raise TypeError('Interface must be JsonRpcIface subclass')

        self.interface = interface
        self.timeout = timeout
        self.encoding = encoding or 'utf-8'
        logger.setup(logging)

        try:
            asyncore.dispatcher.__init__(self)
            self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
            self.bind(address)
            self.listen(0)
        except Exception:
            logger.exception('Server run error')
            raise
Example #9
0
def main():
    parser = argparse.ArgumentParser(
        description="Converts ONNX model to ELL model\n"
        "Example:\n"
        "onnx_import.py model.onnx\n"
        "This outputs 'model.ell' which can be compiled with ELL's 'wrap' tool\n"
    )
    parser.add_argument("input",
                        type=Text,
                        help="Input model file (onnx or protobuf)")
    parser.add_argument("--zip_ell_model",
                        help="zips the output ELL model if set",
                        action="store_true")
    parser.add_argument(
        "--verbose",
        help="print verbose output during the import. Helps to diagnose ",
        action="store_true")
    parser.add_argument(
        '-o',
        '--output_directory',
        help='Path to output directory (default: input file directory)',
        default=None)

    model_options = parser.add_argument_group('model_options')
    model_options.add_argument(
        "--step_interval",
        help="produce a steppable ELL model for a millisecond interval",
        default=None)
    model_options.add_argument(
        "--lag_threshold",
        help=
        "number of step intervals to fall behind before notifying the caller.\n"
        "used when step_interval is set\n",
        default=None)

    logger.add_logging_args(parser)
    args = parser.parse_args()
    logger.setup(args)

    convert(args.input, args.output_directory, args.zip_ell_model,
            args.step_interval, args.lag_threshold)
Example #10
0
 def __init__(self, *args, **kwargs):
     self.args = list(args)
     self.kwargs = dict(kwargs)
     self.logger_kwargs = {
         'directory': kwargs.get('logsdir', os.path.join('..', 'logs')),
         'name': 'maioget.%s' % (kwargs.get('name', 'UNNAMED'),),
         'daemon': kwargs.get('daemon', False),
         'loglevel': kwargs.get('loglevel', 'INFO'),
     }
     try:
         self.set_logger(logger.setup(**self.logger_kwargs))
     except LogDirectoryException:
         raise
Example #11
0
def main(argv):
    global log
    parser = argparse.ArgumentParser()
    parser.add_argument("src", help="The Bitcasa base64 path for file source")
    parser.add_argument("dst", help="The final destination root dir or your files")
    parser.add_argument("token", help="The access token from Bitcasa. To get one navigate to https://rose-llc.com/bitcasafilelist")
    parser.add_argument("-t", "--temp", help="The temp dir to store downloaded files. (Should be a local folder)")
    parser.add_argument("-l", "--log", help="Full path to log file")
    parser.add_argument("--depth", type=int, help="Specify depth of folder traverse. 0 is same as --norecursion")
    parser.add_argument("-m", "--threads", type=int, help="Specify the max number of threads to use for downloading. Default is 5")
    parser.add_argument("--local", help="Only store file locally. Do not use temp dir", action="store_true")
    parser.add_argument("--norecursion", help="Do not go below the src folder. (Same as --depth=0)", action="store_true")
    parser.add_argument("--verbose", help="increase output verbosity", action="store_true")
    args = parser.parse_args()


    _log = ""
    if (args.log == None or args.log == "") and not args.local:
        _log = os.path.join(args.temp, "runlog.txt")
    elif (args.log == None or args.log == "") and args.local:
        _log = os.path.join(args.dst, "runlog.txt")
    elif args.log != None and args.log != "":
        _log = args.log

    rec = not args.norecursion
    if (args.temp == "" and not args.local) or args.dst == "" or args.src == "" or args.token == "":
        sys.stderr.write("Please supply access token, temp, source, and destination locations. If this is a local copy, then specify -l or --local\n")
        sys.exit(2)
    elif args.temp != None and args.temp != "" and args.local:
        sys.stdout.write("Local specified. Ignoring temp\n")
        args.temp = args.dst
    elif args.local:
        args.temp = args.dst

    #initialize temp dir
    try:
        if not os.path.isdir(args.temp):
            os.makedirs(args.temp)
    except:
        sys.stderr.write("Error creating temp directory\n")
        raise
    #initialie logging
    log = logger.setup(logfile=_log, debug=args.verbose)

    if args.depth > 0 and args.norecursion:
        log.info("Note: Non 0 depth and --no-recursion parameter present. Assuming recusion")
        rec = True
    log.debug("Initializing Bitcasa")
    bitc = BitcasaDownload(args.depth, args.temp, args.src, args.dst, rec, args.local, args.token, args.threads)
    bitc.process()
    log.info("done")
Example #12
0
import click
import logging
import logger
from deploy import run, js_code_snippet, preflight_checks, dashboard_url

logger = logger.setup()


@click.group()
@click.option('--debug', is_flag=True)
def cli(debug):
    if debug:
        logger.setLevel(logging.DEBUG)


@cli.command()
def preflight():
    logger.info('running preflight checks')
    preflight_checks()


@cli.command()
@click.option('--preflight/--no-preflight', default=True)
def deploy(preflight):
    if preflight:
        logger.info('running preflight checks')
        if not preflight_checks():
            return
    logger.info('deploying')
    run()
    js_code_snippet()
Example #13
0
 def __init__(self, url, timeout=None, encoding=None, logging=None):
     self.url = url
     self.timeout = timeout
     self.encoding = encoding or 'utf-8'
     logger.setup(logging)
Example #14
0
#!/usr/bin/env python
import os
import time
from PIL import Image
from ConfigParser import NoOptionError

import s3
import server
import settings
import db
import logger
l = logger.setup('hook')
cfg = settings.config()

sender = 0

def generate_key(fileName):
    """
    Generate a unique filename based on the current timestamp.
    """
    timestamp = str(time.time()).replace(".", "")
    file_extension = fileName.split(".")[1]
    return timestamp + "." + file_extension

def generate_thumb(src, dest):
    """
    Create a thumbnail of the file using Pillow and return
    the path to the created thumbnail.
    """
    try:
        quality = cfg.getint('thumbs', 'quality')
Example #15
0
botwtools is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with botwtools.  If not, see <https://www.gnu.org/licenses/>.
"""

import sys
if sys.version_info < (3, 0):
    raise RuntimeError("This program requires Python 3.")

import logger
logger.setup('botwtools')
log = logger.logging.getLogger()
import argparse
from app import App
import codec

arg_parser = None  # declare global


def _setupArgs():
    global arg_parser
    parser = argparse.ArgumentParser(
        description="Breath of the Wild modding tools.")
    arg_parser = parser

    parser.add_argument('--extract',
Example #16
0
    # wrap it up and make a sendmany call
    Args = namedtuple(
        'args', 'dummy amounts minconf comment \
    substractfeefrom replaceable conf_target estimate_mod output_assets')
    params = Args("", addr2amt, 1, "wallet cleanup", pay_fee_addr, False, 6,
                  'UNSET', addr2asset)
    if (input(
            f"please confirm you want to do sendmany with the following arguments: \n{params}\n"
    )) in ['y', 'yes']:
        txid = r.call("sendmany", *params)
        return txid
    else:
        logging.info("swipe aborted by user\n")
        exit()


if __name__ == "__main__":
    import logger
    logger.setup(logger.Args('debug'))

    # wipe all utxos to new addresses
    txid = swipe_assets()
    if not txid:
        print("Something is wrong\n")
    else:
        print(txid)
    # make a dump of the wallet.dat
    # stop the elements service
    # move wallet.dat to wallet.$m.$d
    # restart elements service
Example #17
0
from gevent import monkey; monkey.patch_all()
import env; env.setup()
import logger; logger.setup()

import logging
log = logging.getLogger()

# An example gevent/flask app
import flask
app = flask.Flask(__name__)

@app.route('/get', methods=['GET'])
def get():
    # flask.request.args for GET
    # flask.request.form for POST
    return "Hello World %s" % flask.request.args
    
if __name__ == '__main__':
    from gevent.wsgi import WSGIServer
    http_server = WSGIServer(('127.0.0.1', 5000), app)
    http_server.serve_forever()
Example #18
0
def main(args):
    arg_parser = argparse.ArgumentParser(
        description="Converts CNTK model to ELL model\n"
        "Example:\n"
        "    cntk_import.py model.cntk\n"
        "This outputs 'model.ell' which can be compiled with ELL's 'wrap' tool\n")

    arg_parser.add_argument(
        "cntk_model_file",
        help="path to a CNTK model file, or a zip archive of a CNTK model file")
    arg_parser.add_argument(
        "--zip_ell_model",
        help="zips the output ELL model if set", action="store_true")
    arg_parser.add_argument(
        "--use_legacy_importer",
        help="specifies whether to use the new importer engine or the legacy importer", action="store_true")
    arg_parser.add_argument(
        "--plot_model",
        help="specifies whether to plot the model using SVG to cntk_model_file.svg", action="store_true")
    arg_parser.add_argument(
        "--verify_vision_model",
        help="verifies the imported vision ELL model produces the same output as the original CNTK model",
        action="store_true")
    arg_parser.add_argument(
        "--verify_audio_model",
        help="verifies the imported audio ELL model produces the same output as the original CNTK model",
        action="store_true")

    model_options = arg_parser.add_argument_group('model_options')
    model_options.add_argument(
        "--step_interval",
        help="produce a steppable ELL model for a millisecond interval",
        type=float)
    model_options.add_argument(
        "--lag_threshold",
        help="millisecond time lag before notifying the caller.\n"
             "used when step_interval is set\n",
        type=float)

    logger.add_logging_args(arg_parser)
    args = arg_parser.parse_args(args)
    _logger = logger.setup(args)

    args = vars(args)

    step_interval = args['step_interval']
    lag_threshold = args['lag_threshold']
    if step_interval is not None and lag_threshold is None:
        lag_threshold = step_interval * 2
    plot_model = args["plot_model"]
    verify_model = {"vision": args["verify_vision_model"],
                    "audio": args["verify_audio_model"]}

    # extract the model if it's in an archive
    unzip = ziptools.Extractor(args['cntk_model_file'])
    success, filename = unzip.extract_file(".cntk")
    if success:
        _logger.info("Extracted: " + filename)
    else:
        # not a zip archive
        filename = args['cntk_model_file']

    if not args["use_legacy_importer"]:
        _logger.info("-- Using new importer engine --")
        ell_map = cntk_to_ell.map_from_cntk_model_using_new_engine(filename, step_interval, lag_threshold, plot_model,
                                                                   verify_model)
    else:
        _logger.info("-- Using legacy importer --")
        predictor = cntk_to_ell.predictor_from_cntk_model(filename)
        ell_map = ell.neural.utilities.ell_map_from_predictor(predictor, step_interval, lag_threshold)

    model_file_name = os.path.splitext(filename)[0] + ".ell"

    _logger.info("\nSaving model file: '" + model_file_name + "'")
    ell_map.Save(model_file_name)

    if args["zip_ell_model"]:
        _logger.info("Zipping model file: '" + model_file_name + ".zip'")
        zipper = ziptools.Zipper()
        zipper.zip_file(model_file_name, model_file_name + ".zip")
        os.remove(model_file_name)
Example #19
0
    parser.add_argument(
        '--granularity',
        type=int,
        default=60,
        choices=[60, 300, 900, 3600, 21600, 86400],
        help=
        'granularity of candle data in seconds. choices: 1m, 5m, 15m, 6h, 24h')
    args = parser.parse_args()

    # use formatted datetime for filenames
    curr_datetime_str = dt.today().strftime('%Y-%m-%d-%H-%M-%S')

    # setup logging
    log_name = 'exchanges.cb'
    log_filename = 'logs/{:s}-{:s}'.format(args.market, curr_datetime_str)
    log = logger.setup(log_name, log_filename, logger.levels[args.loglevel])

    # setup pystore for storing time series data
    ps_store = pystore.store('coinbase')
    ps_collection = ps_store.collection('candles.minute')
    ps_item = '{:s}'.format(args.market)

    ps_item_exists = ps_item in ps_collection.list_items()

    # track execution time to monitor avg request time
    exec_time = time.time()

    start_date = get_start_date(args.market)
    dates = _gen_date_frames(start_date)

    for count, (start, end) in enumerate(dates):
Example #20
0
    def parse_command_line(self, args=None):
        arg_parser = _PassArgsParser(prog="wrap", description="""This tool wraps a given ELL model in a CMake buildable \
project that builds a language specific module that can call the ELL model on a given target platform.
The supported languages are:
    python   (default)
    cpp
The supported target platforms are:
    pi0       Raspberry Pi 0
    pi3       Raspberry Pi 3
    orangepi0 Orange Pi Zero
    aarch64   arm64 Linux, works on Qualcomm DragonBoards
    host      (default) your host computer architecture""")

        for arg in self.arguments.keys():
            argdef = self.arguments[arg]
            if "required" in argdef.keys():
                arg_parser.add_argument("--" + arg, "-" + argdef["short"],
                                        help=argdef["help"], required=True)
            elif "choices" in argdef.keys():
                arg_parser.add_argument("--" + arg, "-" + argdef["short"],
                                        help=argdef["help"], default=argdef["default"],
                                        choices=argdef["choices"])
            elif type(argdef["default"]) is bool and not argdef["default"]:
                arg_parser.add_argument("--" + arg, "-" + argdef["short"],
                                        help=argdef["help"], action="store_true", default=False)
            else:
                arg_parser.add_argument("--" + arg, "-" + argdef["short"],
                                        help=argdef["help"], default=argdef["default"])

        compile_args = []
        if '--' in args:
            index = args.index('--')
            compile_args = args[index + 1:]
            args = args[:index]

        logger.add_logging_args(arg_parser)
        args = arg_parser.parse_args(args)
        self.logger = logger.setup(args)

        self.model_file = args.model_file
        _, tail = os.path.split(self.model_file)
        self.model_file_base = os.path.splitext(tail)[0]
        self.model_name = args.module_name
        if not self.model_name:
            self.model_name = self.model_file_base.replace('-', '_')
        self.language = args.language
        self.target = args.target
        self.objext = self.get_objext(self.target)
        self.output_dir = args.outdir
        if self.output_dir is None:
            self.output_dir = self.target
        if os.path.isfile(self.output_dir + ".py"):
            raise Exception("You have a python module named '{}', which will conflict with the --outdir of '{}'. \
Please specify a different outdir.".format(self.output_dir + ".py", self.output_dir))
        self.profile = args.profile
        self.verbose = self.logger.getVerbose() or args.verbose
        self.llvm_format = args.llvm_format
        self.optimization_level = args.optimization_level
        self.no_opt_tool = args.no_opt_tool or self.optimization_level in ['0', 'g']
        self.no_llc_tool = args.no_llc_tool
        self.optimize = not args.no_optimize
        self.fuse_linear_ops = not args.no_fuse_linear_ops
        self.optimize_reorder = not args.no_optimize_reorder
        self.debug = args.debug
        self.blas = self.str2bool(args.blas)
        self.swig = self.language != "cpp"
        self.cpp_header = self.language == "cpp"
        self.compile_args = compile_args
Example #21
0
        "--auto_scale",
        help=
        "Whether to auto-scale audio input to range [-1, 1] (default false).",
        action='store_true')
    parser.add_argument(
        "--output",
        help="Name of text file to contain list of test results.",
        default="results.json")
    parser.add_argument("--prediction_algorithm",
                        "-pa",
                        help="Specify prediction algorithm (max or mean).",
                        default="max")

    logger.add_logging_args(parser)
    args = parser.parse_args()
    log = logger.setup(args)

    sample_rate = args.sample_rate

    if args.list_file:
        verify_file_exists("list_file", args.list_file)
    elif args.dataset:
        verify_file_exists("dataset", args.dataset)
    else:
        log.error("Expecting one of --list_file or --dataset")
        sys.exit(1)

    test = AudioModelTester(args.reset)
    test.run_test(args.featurizer, args.classifier, args.list_file,
                  args.max_tests, args.dataset, args.categories, sample_rate,
                  args.auto_scale, args.output, args.prediction_algorithm,
Example #22
0
    def run(self):
        model_id_string = "{} ({})".format(
            self.profiler_data.model_name,
            self.profiler_data.profile_options.to_long_string())

        global_logger = logger.setup(
            format="{} %(message)s".format(model_id_string))
        global_logger.info("Compiling model {} ...".format(model_id_string))
        profile_src_path = os.path.join(self.ell_build_root, "tools",
                                        "utilities", "profile")

        # Take all .cpp, .h, .cmd, .sh, and .cmake files from the build/tools/utilities/profile directory, and only hard-code the CMakeLists file to take
        all_profile_files = os.listdir(profile_src_path)
        profile_extensions_to_deploy = [".cpp", ".h", ".cmd", ".sh", ".cmake"]
        profile_src_files = [
            filename for filename in all_profile_files if True in
            [filename.endswith(ext) for ext in profile_extensions_to_deploy]
        ]

        cmakelists_file_to_deploy = "CMakeLists-device-parallel.txt.in"
        profile_src_files.append(cmakelists_file_to_deploy)
        profile_src_file_renames = {
            cmakelists_file_to_deploy: "CMakeLists.txt"
        }

        base_wrap_args = [
            "--model_file",
            self.profiler_data.model_path,
            "--module_name",
            "ELL",  # Profiler C++ code assumes the module name is ELL
            "--target",
            self.target,
            "--language",
            self.language,
            "--outdir",
            self.profiler_data.built_profilers_path,
        ]

        base_wrap_args.extend(
            self.profiler_data.profile_options.base_wrap_args())

        opt_wrap_args = ["--llvm_format", "ir"]
        no_opt_wrap_args = [
            "--no_opt_tool", "--no_llc_tool", "--llvm_format", "obj"
        ]

        profile_wrap_args = ["--profile"]

        profile_options_additional_compile_args = self.profiler_data.profile_options.additional_compile_args(
        )
        if profile_options_additional_compile_args:
            additional_compile_args = [
                "--"
            ] + profile_options_additional_compile_args
        else:
            additional_compile_args = []

        no_opt_noprofile_builder = wrap.ModuleBuilder()
        no_opt_noprofile_wrap_args = base_wrap_args + no_opt_wrap_args + additional_compile_args
        no_opt_noprofile_builder.parse_command_line(no_opt_noprofile_wrap_args)

        no_opt_profile_builder = wrap.ModuleBuilder()
        no_opt_profile_wrap_args = base_wrap_args + no_opt_wrap_args + profile_wrap_args + additional_compile_args
        no_opt_profile_builder.parse_command_line(no_opt_profile_wrap_args)

        opt_noprofile_builder = wrap.ModuleBuilder()
        opt_noprofile_wrap_args = base_wrap_args + opt_wrap_args + additional_compile_args
        opt_noprofile_builder.parse_command_line(opt_noprofile_wrap_args)

        opt_profile_builder = wrap.ModuleBuilder()
        opt_profile_wrap_args = base_wrap_args + opt_wrap_args + profile_wrap_args + additional_compile_args
        opt_profile_builder.parse_command_line(opt_profile_wrap_args)

        # Profiler and ExecuteModel binaries expect to find the following header and object files, built with different compile args:
        # compiled_model.o
        # compiled_model.h
        # compiled_model_opt.o
        # compiled_model_opt.h
        # compiled_model_noprofile.o
        # compiled_model_noprofile.h
        # compiled_model_noprofile_opt.o
        # compiled_model_noprofile_opt.h
        built_name_prefix = "compiled_model"
        noprofile_suffix = "_noprofile"
        opt_suffix = "_opt"
        obj_suffix = ".o"
        header_suffix = ".h"
        file_suffixes = [obj_suffix, header_suffix]
        base_model_filename = os.path.basename(self.profiler_data.model_path)
        base_model_name = os.path.splitext(base_model_filename)[0]

        no_opt_noprofile_renames = {
            (base_model_name + file_suffix):
            (built_name_prefix + noprofile_suffix + file_suffix)
            for file_suffix in file_suffixes
        }
        no_opt_profile_renames = {(base_model_name + file_suffix):
                                  (built_name_prefix + file_suffix)
                                  for file_suffix in file_suffixes}
        opt_noprofile_renames = {
            (base_model_name + file_suffix):
            (built_name_prefix + noprofile_suffix + opt_suffix + file_suffix)
            for file_suffix in file_suffixes
        }
        opt_profile_renames = {(base_model_name + file_suffix):
                               (built_name_prefix + opt_suffix + file_suffix)
                               for file_suffix in file_suffixes}

        builders_and_rename_maps = [(no_opt_profile_builder,
                                     no_opt_profile_renames),
                                    (opt_profile_builder, opt_profile_renames)]
        if self.include_exercise_models:
            builders_and_rename_maps.extend([
                (no_opt_noprofile_builder, no_opt_noprofile_renames),
                (opt_noprofile_builder, opt_noprofile_renames)
            ])

        target_files = []
        for builder_and_rename_map in builders_and_rename_maps:
            target_files.extend([
                builder_and_rename_map[1][filename]
                for filename in builder_and_rename_map[1]
            ])

        existing_files = os.listdir(self.profiler_data.built_profilers_path)
        need_to_build = False in [
            filename in existing_files for filename in target_files
        ]

        if need_to_build:
            try:
                for builder_and_rename_map in builders_and_rename_maps:
                    builder = builder_and_rename_map[0]
                    rename_map = builder_and_rename_map[1]
                    builder.run()
                    for filename in rename_map:
                        src_path = os.path.join(
                            self.profiler_data.built_profilers_path, filename)
                        dst_path = os.path.join(
                            self.profiler_data.built_profilers_path,
                            rename_map[filename])
                        shutil.copy(src_path, dst_path)
            except:
                errorType, value, traceback = sys.exc_info()
                msg = "### WrapException: %s: %s" % (str(errorType),
                                                     str(value))
                global_logger.error(msg)
                sys.exit(1)
        else:
            global_logger.info(
                "Target files already exist in path {}, skipping build".format(
                    self.profiler_data.built_profilers_path))

        keep_files = target_files

        # Copy profile source code into place
        for filename in profile_src_files:
            src_path = os.path.join(profile_src_path, filename)
            dst_name = profile_src_file_renames[
                filename] if filename in profile_src_file_renames else filename
            dst_path = os.path.join(self.profiler_data.built_profilers_path,
                                    dst_name)
            keep_files.append(dst_name)
            shutil.copy(src_path, dst_path)

        all_built_files = os.listdir(self.profiler_data.built_profilers_path)
        delete_files = [
            filename for filename in all_built_files
            if filename not in keep_files
        ]
        for filename in delete_files:
            full_path = os.path.join(self.profiler_data.built_profilers_path,
                                     filename)
            if os.path.isfile(full_path):
                global_logger.info(
                    "Cleaning temporary build file {}".format(full_path))
                os.remove(full_path)

        global_logger.info("Done compiling model {}.".format(model_id_string))
Example #23
0
                                     formatter_class=argparse.ArgumentDefaultsHelpFormatter,
                                     fromfile_prefix_chars='@')
    parser.add_argument('--temp', type=str, default='/Volumes/RAM Disk/',
                        help='temporary filesystem')
    parser.add_argument('--no-s3', action='store_true', default=True,
                        help='do not modify S3 objects')
    parser.add_argument('--no-fs', action='store_true', default=True,
                        help='do not modify source files')
    parser.add_argument('--workers', type=int, default=2, metavar='#',
                        help='sorta like threads')
    parser.add_argument('--debug', type=str, default='DEBUG', metavar='LEVEL',
                        help='debug level or module1=level,module2=level')
    parser.add_argument('files', nargs='*', metavar='FILE',
                        help='filenames to process')

    if 'xterm' in os.environ.get('TERM', ''):
        print "\033]2;{}\007".format(title)

    def debug_level(option):
        if '=' in option:
            return option.split('=')
        if option.upper() in ('DEBUG', 'INFO', 'WARN', 'ERROR'):
            return ('', option)
        return (option, 'DEBUG')

    cliargs = parser.parse_args()
    debuglevels = dict([debug_level(level) for level in cliargs.debug.split(',')])
    logger.setup(debuglevels.get('', 'WARN'), **debuglevels)

    main(**cliargs.__dict__)
Example #24
0
    APP_PATH = construct_path(sys.argv[2])

    # Install settings if not yet installed
    if not os.path.exists(CONFIG_PATH + 'bot_settings.py'):
        shutil.move(os.path.join(APP_PATH, 'settings.py'),
                    CONFIG_PATH + 'bot_settings.py')
        print('Edit ' + CONFIG_PATH + 'bot_settings.py with you settings.')
        exit(1)
    sys.path.insert(0, CONFIG_PATH)

    # Set up logger
    LOG_PATH = os.path.join(CONFIG_PATH, 'logs/')
    if not os.path.exists(LOG_PATH):
        os.makedirs(LOG_PATH)
    import logger
    logger.setup(LOG_PATH)

    # local imports
    import bot_settings as settings
    from db_manager import DbManager

    # Create twitter instance
    twitter = get_twython_instance(settings.API)

    # Initialize the Database
    following = twitter.get_friends_ids(
        screen_name=settings.API['TWITTER_HANDLE'])['ids']
    db = DbManager(following, CONFIG_PATH)
    unfollow_users(twitter, db.delete_user_check())

    while (1):
def main(argv):
    requests.packages.urllib3.disable_warnings()
    global loggerMigrator
    loggerMigrator =    logger.setup('migrator', 'myapp.log')

    parser = argparse.ArgumentParser()
    parser.add_argument("apikey", type=str, help="Mashery V2 API Key")
    parser.add_argument("secret", type=str, help="Mashery V2 API Secret")
    parser.add_argument("env", type=str, help="Is this a production area or a sandbox area")
    parser.add_argument("area_name", type=str, help="Mashery Area Name")
    parser.add_argument("config_file", type=str, help="Full path to config file containing json blobs")
    parser.add_argument("file_with_service_ids", type=str, help="Full path to file containging list of service ids for which to create packages", nargs='?')
    
    args = parser.parse_args()

    apikey = args.apikey
    secret = args.secret
    env = args.env
    area_name = args.area_name
    config_file = args.config_file

    print 'APIKey: ' + apikey
    print 'Secret: ' + secret
    print 'Production? ' + env 
    print "Area: " + area_name

    service_ids = []
    if args.file_with_service_ids != None: # adding capability to pass in a list of service ids for which packages should be created
        service_ids = [line.rstrip() for line in open(args.file_with_service_ids)]

    apis = fetch(env, area_name, apikey, secret, 'service_definitions', '*, service, service_definition_endpoints, service.service_classes, service.service_classes.developer_class', '')
    api_configs = fetch_service_configs(config_file)
    packages = fetch(env, area_name, apikey, secret, 'packages', '*, plans', '')
    email_template_sets = fetch(env, area_name, apikey, secret, 'email_template_sets', '*', '')

    for api in apis:
        if args.file_with_service_ids != None: # checking to see if passed in service ids
            if api['service_key'] not in service_ids: # if passed in, then check to see if the api is in the list, if not, skipe
               print "Skipping " + api['service_key']
               continue 

        print api['service_key']
        api_config = []
        if api['service_key'] in api_configs:
            api_config = api_configs[api['service_key']]
        else:
            print ""
            print "Need to create the package for this service manually: "
            print api
            print ""
            continue

        package = getPackageForService(api, packages)
        if (package != None):
            print 'Deleting Package...' + package['name']
            delete(env, area_name, apikey, secret, 'package', package)

        email_template_set = getEmailTemplateForService(api, email_template_sets)
        if (email_template_set != None):
            print 'Deleting Email Template Set..' + email_template_set['name']
            delete(env, area_name, apikey, secret, 'email_template_set', email_template_set)

        email_template_set = getEmailTemplate(api_config)
        created_email_template_set = create(env, area_name, apikey, secret, 'email_template_set', email_template_set)
        package = buildPackagedApi(api, api_config['notifications'])
        print 'Creating Package...' + package['name']

        created_package = create(env, area_name, apikey, secret, 'package', package)
        created_plans = fetch(env, area_name, apikey, secret, 'plans', '*', 'REQUIRE RELATED package WITH id = ' + str(created_package['result']['id']))

        for plan in created_plans:
            if created_email_template_set != None:
                email_template_set_t = {}
                email_template_set_t['id'] = created_email_template_set['result']['id']
                plan['email_template_set'] = email_template_set_t

            update(env, area_name, apikey, secret, 'plan', plan)
            plan_service = buildPlanService(api, plan)
            created_plan_service = create(env, area_name, apikey, secret, 'plan_service', plan_service)

            plan_endpoints = buildPlanEndpoints(api, plan)
            created_plan_endpoints = create(env, area_name, apikey, secret, 'plan_endpoint', plan_endpoints)

    return
Example #26
0
#!/usr/bin/env python
import os
import time
from PIL import Image
from ConfigParser import NoOptionError

import s3
import server
import settings
import db
import logger
l = logger.setup('hook')
cfg = settings.config()

sender = 0


def generate_key(fileName):
    """
    Generate a unique filename based on the current timestamp.
    """
    timestamp = str(time.time()).replace(".", "")
    file_extension = fileName.split(".")[1]
    return timestamp + "." + file_extension


def generate_thumb(src, dest):
    """
    Create a thumbnail of the file using Pillow and return
    the path to the created thumbnail.
    """
Example #27
0
####################################################################################################

import sys
import argparse
import os
import shutil
from dask import compute, delayed
import dask.multiprocessing

import optimizer_util
import remoterunner
import picluster

import logger

global_logger = logger.setup()

remote_working_directory = "/home/pi/profilers"
default_platform_regex = {
    "pi3": "ARMv7 Processor rev 4 \(v7l\) \(4 cores\) BCM2835",
    "pi0": "ARMv6-compatible processor rev 7 \(v6l\) \(1 cores\) BCM2835"
}


class RemoteProfileRunner:
    def __init__(self, profiler_data, cluster, fallback_ip_address, username, password, output_path, platform_re=None, ignore_extensions=[".ll", ".bc"], logging_args=None):
        self.profiler_data = profiler_data
        self.cluster = cluster
        self.fallback_ip_address = fallback_ip_address
        self.username = username
        self.password = password
Example #28
0
#!/usr/bin/env python3
"""This is a much more complex server that needs to exist solely to do ZLIB
decompression because we can't do it client-side because lol.
"""
import logger
logger.setup("WebServer", logfile=None)
log = logger.logging.getLogger(__name__)

import io
import os
import os.path
import sys
import time
import json
import asyncio
import aiohttp
from aiohttp import web
import datetime
import subprocess
import zlib
import struct
from texture import ImageFormat, BITS_PER_PIXEL, decode_image, encode_image


class ZlibView(aiohttp.web.View):
    """A view that decompresses ZLB.

    Given a request path like: /zlb/path?offset=x&length=y
    attempts to decompress the ZLB data at offset `x` with length `y`
    from the specified file.
    If decompression fails, returns error 500.
Example #29
0
    def test_write_msg(self):
        class target():
            pass

        err = Exception('Test error')
        test_context = {"ctx-key1": 'val1', "ctx-key2": 'val2'}
        test_data = {"key1": 'val1', "key2": 'val2'}
        now = '2020-01-02T03:04:05Z'

        got_messages = []

        def mock_transport(message):
            got_messages.append(message)

        logger.setup(
            target=target,
            transport=mock_transport,
            now_fn=lambda: now,
        )

        target.error('Hello Error',
                     context=test_context,
                     data=test_data,
                     err=err)
        target.warn('Hello Warn',
                    context=test_context,
                    data=test_data,
                    err=err)
        target.info('Hello Info',
                    context=test_context,
                    data=test_data,
                    err=err)
        target.debug('Hello Debug',
                     context=test_context,
                     data=test_data,
                     err=err)

        self.assertEqual(got_messages, [
            logger.json_formatter(msg='Hello Error',
                                  level='error',
                                  now=now,
                                  context=test_context,
                                  data=test_data,
                                  err=err),
            logger.json_formatter(msg='Hello Warn',
                                  level='warn',
                                  now=now,
                                  context=test_context,
                                  data=test_data,
                                  err=err),
            logger.json_formatter(msg='Hello Info',
                                  level='info',
                                  now=now,
                                  context=test_context,
                                  data=test_data,
                                  err=err),
            logger.json_formatter(msg='Hello Debug',
                                  level='debug',
                                  now=now,
                                  context=test_context,
                                  data=test_data,
                                  err=err),
        ])
Example #30
0
import app
import logger

config = app.config.load()
logger.setup(transport=app.setup.logger_transport(config['log']))

server = app.create_server(config)


def startServer():
    try:
        server.start()
    except KeyboardInterrupt as err:
        server.stop()
    except BaseException as err:
        logger.error("Failed to start server\n", err=err)


if config["server"]["enabled"] == True:
    startServer()
Example #31
0
#!/usr/bin/env python
import json

import s3
import server
import logger
import db
l = logger.setup('background');

sender = 0

def upload_full_image(fileId):
    """
    Upload a full-resolution image to Amazon S3.
    """
    l.debug("Transferring full-resolution photo %s to Amazon S3.", fileId)
    dest = str(sender) + '/full/' + fileId
    src = 'full/' + fileId
    saved = s3.save(src, dest, verbose=True)
    if saved:
        l.info("Full-resolution photo %s transferred to Amazon S3.", fileId)
        return saved
    else:
        l.error("Failed to transfer full-resolution photo %s to Amazon S3.", fileId)
        return False

def post_to_server(saved, fileId):
    """
    Notify photostreamer-server that a new full-resolution photo is available
    on Amazon S3.
    """

import os
import json
from glob import glob
from functools import partial  # for rawincount function which counts lines in a file
import urllib.parse
from updateSources import update_all_sources
from timeit import default_timer as timer
import logging
import logger

# setup logger
logger.setup(__name__)
log = logging.getLogger(__name__)

# Project Settings
BASEDIR_PATH = os.path.dirname(os.path.realpath(__file__))


def get_default():
    return {
        "destinationpath": os.path.join(BASEDIR_PATH, "rulegroups"),
        "sourcespath": os.path.join(BASEDIR_PATH, "sources"),
        "sourcedatafilename": "update.json",
        "sources": [],
    }


def get_default_hostnames():
    return {"localhost", "localhost.localdomain", "local", "broadcasthost", "localhost4",
Example #33
0
from Map import Point, Segment, Graph, Stop, Route, Car, Map
from Compute import get_closest_segment_np
from matplotlib import pyplot as plt
import pandas as pd
from datetime import datetime
import numpy as np
import geojson
from dbhelper import DBhelper
import time
import logger

logging = logger.setup('main')
logging.info('Started Main.py')

db = DBhelper()
gps_log = db.get_all_gps()

# test 3
# gps_log = db.get_gps("0TU0007 (3)",
#                      datetime(2019, 11, 19, 3, 27, 0),
#                      datetime(2019, 11, 19, 4, 0, 0))
# test 1b
# gps_log = db.get_gps()

# test 4
# gps_log = db.get_gps("0TU0008 (4)",
#                      datetime(2019, 11, 19, 0, 3, 0),
#                      datetime(2019, 11, 19, 3, 0, 0))

# test 5 changed from 2
# gps_log = db.get_gps("0TU0021 (2)",
Example #34
0
    arg_parser.add_argument("--apikey",
                            help="API key for picluster server",
                            default=None)
    arg_parser.add_argument("--username",
                            help="Username for logon to remote machine",
                            default=None)
    arg_parser.add_argument("--password",
                            help="Password for logon to remote machine",
                            default=None)
    arg_parser.add_argument("--command",
                            help="The command to run on the remote machine",
                            default=None)
    arg_parser.add_argument(
        "--timeout",
        type=int,
        default=300,
        help="Timeout for the command in seconds (default 300 seconds)")

    logger.add_logging_args(arg_parser)
    args = arg_parser.parse_args()
    logger.setup(args)

    runner = RemoteRunner(ipaddress=args.ipaddress,
                          cluster=args.cluster,
                          username=args.username,
                          password=args.password,
                          command=args.command,
                          timeout=args.timeout,
                          apikey=args.apikey)
    runner.run_command()
from dbhelper import DBhelper
from Compute import get_closest_segment_np
from Map import Point, Segment, Graph, Stop, Route, Car, Map
import pandas as pd
import numpy as np
import multiprocessing as mp
import time
import logger

logging = logger.setup('build-map')
logging.info('START build-map.py')
st = time.time()
db = DBhelper()

# create Graph and build
G = Graph()
G.load()

# create MAP
M = Map()
M.set_graph(G)

#  build Route
route_no_list = ['1a', '1a_b', '1b', '2', '3', '4', '5']
for i in route_no_list:
    route_df = pd.read_csv(f'data/route-{i}.csv')
    route = Route(f'route-{i}')
    # load route
    for index, way in route_df.iterrows():
        route.add_way(G, way['from'], way['to'], way['stop'], way['dist'],
                      way['change'])
from kafka import KafkaConsumer, KafkaProducer
import time
from Map import Map, Car
import pandas as pd
import re

import logger

logging = logger.setup('estimator')
logging.info('Started estimator.py')

# load Map
M = Map()
M.load()
import json


def init_car(cid):
    match = re.match(r'.*TU\d{2}(\d{2}).*\((.{1,2})\).*', cid)
    if not match:
        # logging.error(f"wrong cid format {cid}")
        return
    rid = f'route-{match.group(2).lower()}'
    r = M.R[rid]
    car = Car(cid, r, match.group(1))
    M.add_car(car)
    logging.info(f'init new car {cid}')


def format_processed_data(car):
    # [alias, route_id, lat, lon, direction, status]
Example #37
0
import numpy as np
import logger as log
import logging
from enum import Enum
from pathlib import Path
import tkinter as tk

logger = log.setup(name=str(Path(__file__).stem))
logger.setLevel(logging.DEBUG)


class CubeVals(Enum):
    """
    Each CubeVal contains it's RGB values
    """
    RED = (255, 0, 0)
    GREEN = (0, 255, 0)
    ORANGE = (255, 153, 51)
    BLUE = (0, 0, 255)
    YELLOW = (255, 255, 0)
    WHITE = (255, 255, 255)


class Face:
    def __init__(self, ndarray: np.ndarray = None):
        if ndarray is None:
            self.values = np.zeros((3, 3))
        else:
            if not isinstance(ndarray[0][0], CubeVals) or ndarray.shape != (3, 3):
                raise ValueError("Face class takes a 3x3 array of CubeVals")
            else:
Example #38
0
def main():
    home_dir = os.path.expanduser("~")

    parser = argparse.ArgumentParser()

    parser.add_argument("--package_directory",
                        "-p",
                        default=os.path.join(home_dir, ".litepkg/packages"),
                        help="Set directory for package installer directory." +
                        " (default: ~/.litepkg/packages)")

    parser.add_argument("--artifacts_directory",
                        "-a",
                        default=os.path.join(home_dir, ".litepkg/artifacts"),
                        help="Directory for any artifacts. (default: " +
                        "~/.litepkg/artifacts")

    parser.add_argument("--binaries_directory",
                        "-b",
                        default=os.path.join(home_dir, "bin"),
                        help="Directory where executables should be " +
                        "symlinked. (default: ~/bin)")

    parser.add_argument("--console_log_level",
                        "-l",
                        default="INFO",
                        help="Log level for console output." +
                        " (default: INFO)")

    parser.add_argument("--file_log_level",
                        "-L",
                        default="DEBUG",
                        help="Log level for log file." + " (default: DEBUG)")

    parser.add_argument("--log_file",
                        "-f",
                        default="/dev/null",
                        help="Log file path. (default: /dev/null)")

    parser.add_argument("--verbose",
                        "-v",
                        default=False,
                        action="store_true",
                        help="Show verbose output on the console, implies" +
                        " --console_log_level DEBUG.")

    parser.add_argument("verb", help="litepkg verb")

    parser.add_argument("targets", nargs="*", help="verb targets")

    config.args = parser.parse_args()
    config.start_cwd = os.getcwd()

    if config.args.verbose:
        config.args.console_log_level = "DEBUG"

    logger.setup(logPath=config.args.log_file,
                 fileLevel=config.args.file_log_level,
                 consoleLevel=config.args.console_log_level)

    logging.debug("Application started.")
    logger.prettyLog(config.args, msg="args")


    config.args.package_directory = \
            os.path.realpath(config.args.package_directory)
    config.args.artifacts_directory= \
            os.path.realpath(config.args.artifacts_directory)
    config.args.binaries_directory= \
            os.path.realpath(config.args.binaries_directory)

    validate_dirs()

    handle_verb()
Example #39
0
#!/usr/bin/env python
import logger
logger.setup("python-glsl")
log = logger.logging.getLogger(__name__)

import gi
try:
    gi.require_version('Gtk', '3.0')
except AttributeError:
    raise RuntimeError("python-gobject is not installed")
from gi.repository import Gtk, GLib

from app import MainWindow
from MyRenderer import MyRenderer

win = MainWindow(MyRenderer())


def redraw():
    win.queue_draw()
    return True


GLib.timeout_add(100, redraw)

Gtk.main()  # won't return until quit event (ie window is closed)
print("Bye!")
Example #40
0
import ConfigParser
import logger
l = logger.setup(__name__)

def config():
	config = ConfigParser.ConfigParser()
	try:
		config.readfp(open('config.cfg'))
		return config
	except IOError:
		l.exception("Error reading config.cfg file.")
		raise
Example #41
0
  sys.exit("Couldn't find credentials file auth.json")

action = " ".join(
  without(
    without(argv, "-p"), "--silent"
  )
).lower()

# read authentication data
auth_data = json.load(open("auth.json", "r"))
ACCESS_ID = auth_data["access_id"]
SECRET_KEY = auth_data["secret_key"]

settings_filename = "settings.json"

logger.setup()

with open(settings_filename, "r") as f:
  lines = f.readlines()
  # remove comments in json
  lines = map(lambda line: re.sub("/\*.*\*/", "", line), lines)
  settings_file_contents = "".join(lines)
settings_raw = json.loads(settings_file_contents)
#settings_log_text = None
# for line in log:
#   if line['Activity'] in ['Create', 'Update']:
#     settings_log_text = line['Data']
# settings_log_raw = json.loads(settings_log_text) if settings_log_text else settings_raw
# settings_in_log = settings.parse(settings_log_raw)
settings_in_file = settings.parse(settings_raw)
# settings_modified = (action is not "show status" and settings_in_log is not settings_in_file)
Example #42
0
    # direct message to correct consumer
    if module == 'SENSOR_TEMPERATURE':
        cf.toggle_fan()


def start_services(q_in, q_out):
    try:
        Thread(target=ts.setup, args=(q_in, q_out)).start()
    except Exception as ex:
        logger.write_e(MODULE, "unable to start thread")
        sys.exit(-1)


if __name__ == '__main__':
    logger.setup()

    q_in = []
    q_out = []

    start_services(q_in, q_out)
    logger.write(MODULE, 'booted')

    while True:
        try:
            msg = q_in.pop(0)
            handle_reading(msg)
            time.sleep(READ_TIMEOUT)
        except:
            # no readings
            pass
Example #43
0
    parser.add_argument('config_file',
                        help='Path to darknet configuration file')
    parser.add_argument('weights_file', help='Path to darknet weights file')

    parser.add_argument(
        '-o',
        '--output_directory',
        help='Path to output directory (default: input weights file directory)'
    )

    model_options = parser.add_argument_group('model_options')
    model_options.add_argument(
        "--step_interval",
        help="produce a steppable ELL model for a millisecond interval",
        type=float)
    model_options.add_argument(
        "--lag_threshold",
        help=
        "number of step intervals to fall behind before notifying the caller.\n"
        "used when step_interval is set\n",
        type=float)

    logger.add_logging_args(parser)
    args = parser.parse_args()
    _logger = logger.setup(args)

    parser_args = vars(args)

    importer = DarknetImporter(parser_args)
    importer.run()