def run(self, incoming, internal, outgoing):
        g.PHASE = self.phase
        g.SERVER_SEQ = self.server_seq

        config.load()
        logger.init('play')
        master_data.load()

        # test
        g.MST[1] = 2

        while self.is_running:
            time.sleep(0.001)

            self.curr_time = time.time()
            self.delta_time = (self.curr_time - self.last_time) * 1000

            i = 0
            while i < self.get_max and not incoming.empty():
                i += 1

                print('incoming_get')
                (conn_id, req_msg_type, req_msg_body) = incoming.get()
                print(conn_id)
                print(req_msg_type)
                print(req_msg_body)
                if req_msg_type == e.PLAYER_CREATE:
                    print('add_player')
                    area_id = 0
                    self.players[conn_id] = Player(area_id)
                    self.areas[area_id].player_conn_ids.append(conn_id)

                elif req_msg_type == e.PLAYER_DELETE:
                    print('remove_player')
                    area_id = self.players[conn_id].area_id
                    self.areas[area_id].player_conn_ids.remove(conn_id)
                    del self.players[conn_id]

                elif req_msg_type in g.PLAY_HANDLERS:
                    (conn_id, ack_msg_type, ack_msg_body, rcpt) = \
                        g.PLAY_HANDLERS[req_msg_type](conn_id, req_msg_body)

                    if rcpt == e.TO_ME:
                        outgoing.put([conn_id, ack_msg_type, ack_msg_body])

                    if rcpt == e.TO_ALL:
                        area_id = self.players[conn_id].area_id
                        for player_conn_id in self.areas[area_id].player_conn_ids:
                            outgoing.put([player_conn_id, ack_msg_type, ack_msg_body])

                    elif rcpt == e.TO_DATA:
                        internal.put([player_conn_id, ack_msg_type, ack_msg_body])

            for player in self.players.values():
                player.run(self.delta_time)

            for area in self.areas.values():
                area.run(self.delta_time)

            self.last_time = self.curr_time
def _append_data(analysis):
    """append source scores across subjects"""
    try:
        stcs, connectivity = load('score_source', subject='fsaverage',
                                  analysis=analysis['name'])
    except Exception:
        stcs = list()
        for meg_subject, subject in zip(range(1, 21), subjects_id):
            if subject in bad_mri:
                continue
            # load
            stc, _, _ = load('evoked_source', subject=meg_subject,
                             analysis=analysis['name'])
            morph = load('morph', subject=meg_subject)
            vertices_to = [np.arange(10242)] * 2
            # fix angle error scale
            if 'circAngle' in analysis['name']:
                stc._data /= 2.

            # apply morph
            stc_morph = morph_data_precomputed(subject, 'fsaverage', stc,
                                               vertices_to, morph)
            stcs.append(stc_morph.data)
        stcs = np.array(stcs)
        save([stcs, connectivity], 'score_source', subject='fsaverage',
             analysis=analysis['name'], overwrite=True, upload=True)
    return stcs, connectivity
Esempio n. 3
0
def main(args):
    import json
    from argparse import ArgumentParser

    parser = ArgumentParser(description="")
    parser.add_argument(
        "-f", "--config-file", dest="cf", help="Read config from FILE", metavar="FILE", default=config.DEFAULT_PATH
    )
    parser.add_argument("-d", "--debug", help="Turn on debugging info.", type=bool, default=False)
    args = parser.parse_args(args)

    # Set up debugging state.
    if args.debug:
        debug.enable()

    # Load config file.
    if os.path.isfile(args.cf):
        try:
            c = config.load(args.cf)
        except config.ParseError as ve:
            print 'Could not decode the config file: "%s" \n:%s.\nContents:\n%s' % (args.cf, ve.message, file_contents)
            sys.exit(-1)
    else:
        c = config.load()
        c.save()

    Game(config).run()
Esempio n. 4
0
def _init_env():
	import os, sys

	_dirs = ['lib', 'libs']
	_d_ins = [os.path.join(sys.path[0], _d) for _d in _dirs if \
			os.path.exists(os.path.join(sys.path[0], _d))]
	sys.path = [sys.path[0]] + _d_ins + sys.path[1:]

	_opts = _usage()

	import config
	config.load(_opts.config)

	if not config.cfg.has_section('conf'):
		config.cfg.add_section('conf')

	if not _opts.logging and 'output' in _opts:
		_opts.logging = os.path.join(_opts.output, 'log.txt')

	for _k, _v in _opts.__dict__.items():
		if _v != None:
			config.cfg.set('conf', _k, str(_v))

	import logging_util
	logging_util.init(_opts.logging)

	import file_unzip as fz
	fz.clean(fz.default_dir(_opts.temp))

	return _opts
    def Run(self, extra_arg=None):
        """ Figure out which game to play based on the contents of the 
        vp_game_map_file. """

        import config

        if extra_arg is not None:
            logging.getLogger("vpcom").info("Run received extra arg!?")
            logging.getLogger("vpcom").info("Arg was {0}".format(extra_arg))

        vp_game_map_file = config.value_for_key_path(keypath="vp_game_map_file", default="/.")
        vp_game_map = yaml.load(open(vp_game_map_file, "r"))
        game_class = vp_game_map[self.GameName]["kls"]
        game_path = vp_game_map[self.GameName]["path"]
        yamlpath = vp_game_map[self.GameName]["yaml"]
        logging.getLogger("vpcom").info("S11 is ..." + str(self.Sys11))

        try:
            # switch to the directory of the current game
            curr_file_path = os.path.dirname(os.path.abspath(__file__))
            newpath = os.path.realpath(curr_file_path + game_path)
            os.chdir(newpath)

            # add the path to the system path; this lets game relative procgames
            # be found if needed
            sys.path.insert(0, newpath)

            # re-import and re-load config to find game relative config.yaml if existing
            from procgame import config

            config.load()

            # now load procgame --this will be game relative if present, or system-wide if not
            from procgame import *

            # find the class of the game instance
            klass = util.get_class(game_class, game_path)
            self.game = klass()

            self.game.yamlpath = yamlpath
            self.game.log("GameName: " + str(self.GameName))
            self.game.log("SplashInfoLine: " + str(self.SplashInfoLine))

        except Exception, e:
            import traceback

            exc_type, exc_value, exc_traceback = sys.exc_info()

            formatted_lines = traceback.format_exc().splitlines()
            exceptionName = formatted_lines[-1]

            logging.getLogger("vpcom").info("game instantiation error({0})".format(exceptionName))
            logger = logging.getLogger("vpcom")
            logger.info("PYTHON FAILURE (Visual Pinball Bridge is now broken)")
            logger.info("Exception Name {0}".format(e))
            for l in formatted_lines:
                logger.info("{0}".format(l))
            if len(formatted_lines) > 2:
                self.ErrorMsg += "\n" + formatted_lines[-3] + "\n" + formatted_lines[-2] + "\n" + formatted_lines[-1]
            raise
Esempio n. 6
0
 def test_loadConfig(self):
     config.load(self.configFile.getFilename())
     self.assertEqual(config.DEFAULT_LANGUAGE, 'cz')
     self.assertEqual(config.DATA_FOLDER, os.path.normpath(os.path.join(os.path.dirname(__file__), '../../data')))
     self.assertEqual(config.LOG_FILE, os.path.normpath('C:\\ProgramData\\Artshow\\artshow.log'))
     self.assertListEqual(config.CURRENCY, ['czk', 'eur', 'usd'])
     self.assertEqual(len(config.SESSION_KEY), 24)
Esempio n. 7
0
def resetConfiguration(factoryDefaults=False):
	"""Loads the configuration, installs the correct language support and initialises audio so that it will use the configured synth and speech settings.
	"""
	import config
	import braille
	import speech
	import languageHandler
	import inputCore
	log.debug("Terminating braille")
	braille.terminate()
	log.debug("terminating speech")
	speech.terminate()
	log.debug("terminating addonHandler")
	addonHandler.terminate()
	log.debug("Reloading config")
	config.load(factoryDefaults=factoryDefaults)
	logHandler.setLogLevelFromConfig()
	#Language
	lang = config.conf["general"]["language"]
	log.debug("setting language to %s"%lang)
	languageHandler.setLanguage(lang)
	# Addons
	addonHandler.initialize()
	#Speech
	log.debug("initializing speech")
	speech.initialize()
	#braille
	log.debug("Initializing braille")
	braille.initialize()
	log.debug("Reloading user and locale input gesture maps")
	inputCore.manager.loadUserGestureMap()
	inputCore.manager.loadLocaleGestureMap()
	log.info("Reverted to saved configuration")
def _analyze_toi(analysis):
    """Subscore each analysis as a function of the reported visibility"""
    ana_name = analysis['name'] + '-toi'

    # don't recompute if not necessary
    fname = paths('score', analysis=ana_name)
    if os.path.exists(fname):
        return load('score', analysis=ana_name)

    # gather data
    n_subject = 20
    scores = dict(visibility=np.zeros((n_subject, len(tois), 4)),
                  contrast=np.zeros((n_subject, len(tois), 3)))
    R = dict(visibility=np.zeros((n_subject, len(tois))),
             contrast=np.zeros((n_subject, len(tois))),)
    for s, subject in enumerate(subjects):
        gat, _, events_sel, events = load('decod', subject=subject,
                                          analysis=analysis['name'])
        events = events.iloc[events_sel].reset_index()
        for t, toi in enumerate(tois):
            # Average predictions on single trials across time points
            y_pred = _average_ypred_toi(gat, toi, analysis)
            # visibility
            for factor in ['visibility', 'contrast']:
                # subscore per condition (e.g. each visibility rating)
                scores[factor][s, t, :] = _subscore(y_pred, events,
                                                    analysis, factor)
                # correlate residuals with factor
                R[factor][s, t] = _subregress(y_pred, events,
                                              analysis, factor, True)

    save([scores, R], 'score', analysis=ana_name, overwrite=True, upload=True)
    return [scores, R]
Esempio n. 9
0
 def test_load_calls_create(self,create_config,isfile,mkpath):
         #if the config folder doesn't exsists
         isfile.return_value=False
         #and we try to load the configuration
         config.load()
         #we try to create the configuration 
         create_config.assert_called_once_with()
Esempio n. 10
0
File: ppre.py Progetto: PPAlpha/PPRE
 def openProjectOf(self, projFile):
     self.projFile = projFile
     config.load(open(projFile, "r"), config.qtSetter, self.projectinfo)
     config.project = {"directory": 
         self.projectinfo["location_directory_value"].text()}
     config.project["versioninfo"] = pokeversion.get()
     return
def _analyze_continuous(analysis):
    """Regress prediction error as a function of visibility and contrast for
    each time point"""
    ana_name = analysis['name'] + '-continuous'

    # don't recompute if not necessary
    fname = paths('score', analysis=ana_name)
    if os.path.exists(fname):
        return load('score', analysis=ana_name)

    # gather data
    n_subject = 20
    n_time = 151
    scores = dict(visibility=np.zeros((n_subject, n_time, 4)),
                  contrast=np.zeros((n_subject, n_time, 3)))
    R = dict(visibility=np.zeros((n_subject, n_time)),
             contrast=np.zeros((n_subject, n_time)),)
    for s, subject in enumerate(subjects):
        gat, _, events_sel, events = load('decod', subject=subject,
                                          analysis=analysis['name'])
        events = events.iloc[events_sel].reset_index()
        y_pred = np.transpose(get_diagonal_ypred(gat), [1, 0, 2])[..., 0]
        for factor in ['visibility', 'contrast']:
            # subscore per condition (e.g. each visibility rating)
            scores[factor][s, :, :] = _subscore(y_pred, events,
                                                analysis, factor)
            # correlate residuals with factor
            R[factor][s, :] = _subregress(y_pred, events,
                                          analysis, factor, True)

    times = gat.train_times_['times']
    save([scores, R, times], 'score', analysis=ana_name,
         overwrite=True, upload=True)
    return [scores, R, times]
Esempio n. 12
0
def load_command(file_path):
    """
  open
  load          Opens a new file and loads it's contents into memory, where it can
                be worked with.
                Usage: load FILE"""
    config.load(file_path, __sort)
Esempio n. 13
0
def main(argv): # argv should be a list of config files, parsed in that order
    config_files = argv[1:] # chop off actual program
    
    if len(config_files) < 1:
        config_files.append('default.cfg')
    if len(config_files) < 2: 
        config_files.append('gghost.cfg')
    config.load(config_files)
    
    # Set up the environment
    os.environ.update(config.config['environ'])
    
    # Get eventlet loaded, so hook.py can detect it
    
    import eventlet
    from eventlet import backdoor
    
    # Initialise backdoor
    if int(config.config['launch'].get('backdoor', '1')):
        eventlet.spawn(backdoor.backdoor_server,
                       eventlet.listen((config.config['launch'].get('backdoor_bind', '127.0.0.1'),
                       int(config.config['launch'].get('backdoor_port', '3000')))))
    
    
    # Initialise plugins
    import hook
    for plugin in config.config['launch']['plugins'].split(','):
        hook.install(plugin)
    
    # Let's run
    core_module = __import__(config.config['launch']['core'])
    eventlet.spawn(core_module.launch).wait()
    
    return 0
Esempio n. 14
0
    def req_init_module_handler(self):
        req = urlparse.urlparse(self.path).query
        reqs = urlparse.parse_qs(req, keep_blank_values=True)
        data = ""

        try:
            module = reqs["module"][0]
            config.load()

            if reqs["cmd"] == ["start"]:
                result = module_init.start(module)
                data = '{ "module": "%s", "cmd": "start", "result": "%s" }' % (module, result)
            elif reqs["cmd"] == ["stop"]:
                result = module_init.stop(module)
                data = '{ "module": "%s", "cmd": "stop", "result": "%s" }' % (module, result)
            elif reqs["cmd"] == ["restart"]:
                result_stop = module_init.stop(module)
                result_start = module_init.start(module)
                data = '{ "module": "%s", "cmd": "restart", "stop_result": "%s", "start_result": "%s" }' % (
                    module,
                    result_stop,
                    result_start,
                )
        except Exception as e:
            logging.exception("init_module except:%s", e)

        self.send_response("text/html", data)
def _stats(analysis):
    """2nd order stats across subjects"""

    # if already computed lets just load it
    ana_name = 'stats_' + analysis['name'] + '_vhp'
    if op.exists(paths('score', analysis=ana_name)):
        return load('score', analysis=ana_name)

    # gather scores across subjects
    scores = list()
    for subject in range(1, 21):
        kwargs = dict(subject=subject, analysis=analysis['name'] + '_vhp')
        fname = paths('score', **kwargs)
        if op.exists(fname):
            score, times = load(**kwargs)
        else:
            score, times = _decod(subject, analysis)
        scores.append(score)
    scores = np.array(scores)

    # compute stats across subjects
    p_values = stats(scores - analysis['chance'])
    diag_offdiag = scores - np.tile([np.diag(sc) for sc in scores],
                                    [len(times), 1, 1]).transpose(1, 0, 2)
    p_values_off = stats(diag_offdiag)

    # Save stats results
    out = dict(scores=scores, p_values=p_values, p_values_off=p_values_off,
               times=times, analysis=analysis)
    save(out, 'score',  analysis=ana_name)
    return out
def main():
    server_type = 'play'

    if len(sys.argv) < 3:
        print('Usage: sudo python3 ./PlayServer.py develop 00')
        sys.exit()

    g.PHASE = sys.argv[1]
    g.SERVER_SEQ = sys.argv[2]

    config.load()
    logger.init(server_type)
    master_data.load()

    # test
    g.MST[1] = 2

    pool_size = g.CFG[server_type + '_thread_pool_size']
    port = g.CFG[server_type + g.SERVER_SEQ]

    # queue
    g.INCOMING = multiprocessing.Queue()
    g.INTERNAL = multiprocessing.Queue()
    g.OUTGOING = multiprocessing.Queue()

    # play
    play_loop = PlayLoop(g.PHASE, g.SERVER_SEQ)
    g.PROCESS = multiprocessing.Process(target=play_loop.run, args=(g.INCOMING, g.INTERNAL, g.OUTGOING))
    g.PROCESS.start()

    # play_server
    g.LOOP = asyncio.get_event_loop()

    # pool
    g.REDIS_POOL = yield from asyncio_redis.Pool.create(host='127.0.0.1', port=6379, poolsize=10)
    g.THREAD_POOL = concurrent.futures.ThreadPoolExecutor(pool_size)

    try:
        g.LOOP.add_signal_handler(signal.SIGINT, shutdown)
        g.LOOP.add_signal_handler(signal.SIGTERM, shutdown)

    except NotImplementedError:
        pass

    g.TASK_INTERNAL = g.LOOP.create_task(handle_internal())
    g.TASK_OUTGOING = g.LOOP.create_task(handle_outgoing())

    coro = g.LOOP.create_server(PlayConnection, port=port)
    g.SERVER = g.LOOP.run_until_complete(coro)

    for sock in g.SERVER.sockets:
        print('{}_server_{} starting.. {}'.format(server_type, g.SERVER_SEQ, sock.getsockname()))

    try:
        g.LOG.info('%s_server_%s starting.. port %s', server_type, g.SERVER_SEQ, port)
        g.LOOP.run_forever()

    except KeyboardInterrupt:
        shutdown()
Esempio n. 17
0
 def robotInit(self):
     try:
         ttl = cfg.current_milli_time()
         cfg.load("ignore")
         cfgdly = cfg.current_milli_time() - ttl
         print("Config Load Time: " + str(cfgdly))
     except:
         sys.exit("Robot initialize did not complete successfully.")
Esempio n. 18
0
 def __init__(self) :
     self.current = ''
     config.load()
     self.config = config.config
     self.buttons = {}
     self.frames = {}
     self.service = None
     self.main()
Esempio n. 19
0
    def req_config_handler(self):
        req = urlparse.urlparse(self.path).query
        reqs = urlparse.parse_qs(req, keep_blank_values=True)
        data = ''

        if reqs['cmd'] == ['get_config']:
            config.load()
            data = '{ "check_update": "%d", "popup_webui": %d, "auto_start": %d, "php_enable": %d }' %\
                   (config.get(["update", "check_update"], 1)
                    , config.get(["modules", "launcher", "popup_webui"], 1)
                    , config.get(["modules", "launcher", "auto_start"], 0)
                    , config.get(["modules", "php_proxy", "auto_start"], 0))
        elif reqs['cmd'] == ['set_config']:
            if 'check_update' in reqs:
                check_update = int(reqs['check_update'][0])
                if check_update != 0 and check_update != 1:
                    data = '{"res":"fail, check_update:%s"}' % check_update
                else:
                    config.set(["update", "check_update"], int(check_update))
                    config.save()

                    data = '{"res":"success"}'

            elif 'popup_webui' in reqs :
                popup_webui = int(reqs['popup_webui'][0])
                if popup_webui != 0 and popup_webui != 1:
                    data = '{"res":"fail, popup_webui:%s"}' % popup_webui
                else:
                    config.set(["modules", "launcher", "popup_webui"], popup_webui)
                    config.save()

                    data = '{"res":"success"}'
            elif 'auto_start' in reqs :
                auto_start = int(reqs['auto_start'][0])
                if auto_start != 0 and auto_start != 1:
                    data = '{"res":"fail, auto_start:%s"}' % auto_start
                else:
                    if auto_start:
                        autorun.enable()
                    else:
                        autorun.disable()

                    config.set(["modules", "launcher", "auto_start"], auto_start)
                    config.save()

                    data = '{"res":"success"}'
            elif 'php_enable' in reqs :
                php_enable = int(reqs['php_enable'][0])
                if php_enable != 0 and php_enable != 1:
                    data = '{"res":"fail, php_enable:%s"}' % php_enable
                else:
                    config.set(["modules", "php_proxy", "auto_start"], php_enable)
                    config.save()
                    data = '{"res":"success"}'
            else:
                data = '{"res":"fail"}'

        self.send_response('text/html', data)
	def setUp(self):
		yaml_file = os.path.join(os.path.dirname(__file__), '..', '..', 'config.yml')
		config.load(yaml_file)
		config.bootstrap(['-vv'])
		# make sure we're not mocking out google reader
		app_globals.OPTIONS['test'] = False
		config.parse_options(['--output-path=/tmp/gris-test', '--num-items=1'])
		config.check()
		self.reader = app_globals.READER = Reader()
Esempio n. 21
0
def main():
    app = QApplication(sys.argv)
    app.connect(app, SIGNAL("lastWindowClosed()"), app, SLOT("quit()"))
    w = MainWindow()
    w.show()
    config.load()
    editor.setup_pisi()
    w.browser.collect_pspecs(config.pspec_folder)
    app.exec_loop()
Esempio n. 22
0
  def run_resume(self):
    config.load(os.path.join(config.latest_link_file, config.config_file_name))
    fridge.get_all_from_disk(config.work_dir)

    with open(config.failure_file) as failure_f:
      last_stage = int(failure_f.readline())
    config.dynamic('stage', last_stage)

    self.run_stages()
Esempio n. 23
0
def read_conf(config_file):
    global local, qfile_set, other_set, remote_set, all_set
    global master_base_path, host_base_path
    global ant_path, arc_path, phutil_path, code_path, report_path, host_code_path, ivy_path

    if config_file is not None:
        config.load(config_file)
    else:
        config.load()

    local = config.local
    qfile_set = config.qfile_set
    other_set = config.other_set
    remote_set = config.remote_set
    all_set = config.all_set

    master_base_path = config.master_base_path
    host_base_path = config.host_base_path

    if "HIVE_PTEST_SUFFIX" in os.environ:
        suffix = os.environ["HIVE_PTEST_SUFFIX"]
        master_base_path += "-" + suffix
        host_base_path += "-" + suffix

    ant_path = master_base_path + "/apache-ant-1.8.4"
    arc_path = master_base_path + "/arcanist"
    phutil_path = master_base_path + "/libphutil"
    code_path = master_base_path + "/trunk"
    report_path = master_base_path + "/report/" + time.strftime("%m.%d.%Y_%H:%M:%S")
    host_code_path = host_base_path + "/trunk-{host}"
    ivy_path = master_base_path + "/.ivy2"

    # Setup of needed environmental variables and paths

    # Proxy
    if args.http_proxy is not None:
        all_set.export("http_proxy", args.http_proxy + ":" + args.http_proxy_port)
        all_set.export("https_proxy", args.http_proxy + ":" + args.http_proxy_port)
        all_set.export("ANT_OPTS", get_ant_opts_proxy())

    # Ant
    all_set.export("ANT_HOME", ant_path)
    all_set.add_path(ant_path + "/bin")

    # Arcanist
    all_set.add_path(arc_path + "/bin")

    # Java
    all_set.export("JAVA_HOME", config.java_home)
    all_set.add_path(config.java_home + "/bin")

    # Hive
    remote_set.export("HIVE_HOME", host_code_path + "/build/dist")
    remote_set.add_path(host_code_path + "/build/dist/bin")
Esempio n. 24
0
    def __init__(self, config_file):
        load(config_file)
        MysqlConnectionManager.create_and_initialise()
        MysqlConnectionManager.connect()

        handlers = [
            (r'/recommendation/item/([\w\d\-]+)', ItemRecommendationController),
            (r'/recommendation/user/([\w\d\-]+)', UserRecommendationController),
            ('/recommendations/_build', RecommendationsBuildController),
        ]
        Application.__init__(self, handlers)
Esempio n. 25
0
def main():
    load('bookshelf.conf')
    if config('LOCALE') is not None and config('LOCALE') != '':
        Locale.setDefault(Locale(config('LOCALE')))
    from window import BookshelfView
    from logic import Bookshelf
    UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName())
    view = BookshelfView(Bookshelf())
    screenSize = Toolkit.getDefaultToolkit().getScreenSize();
    view.setLocation(screenSize.width/5,  screenSize.height/5)
    view.setVisible(1)
Esempio n. 26
0
def read_conf(config_file):
    global local, qfile_set, other_set, remote_set, all_set
    global master_base_path, host_base_path
    global ant_path, arc_path, phutil_path, code_path, report_path, host_code_path, ivy_path

    if config_file is not None:
        config.load(config_file)
    else:
        config.load()

    local = config.local
    qfile_set = config.qfile_set
    other_set = config.other_set
    remote_set = config.remote_set
    all_set = config.all_set

    master_base_path = config.master_base_path
    host_base_path = config.host_base_path

    if 'HIVE_PTEST_SUFFIX' in os.environ:
        suffix = os.environ['HIVE_PTEST_SUFFIX']
        master_base_path += '-' + suffix
        host_base_path  += '-' + suffix

    ant_path = master_base_path + '/apache-ant-1.8.4'
    arc_path = master_base_path + '/arcanist'
    phutil_path = master_base_path + '/libphutil'
    code_path = master_base_path + '/trunk'
    report_path = master_base_path + '/report/' + time.strftime('%m.%d.%Y_%H:%M:%S')
    host_code_path = host_base_path + '/trunk-{host}'
    ivy_path = master_base_path + '/.ivy2'

    # Setup of needed environmental variables and paths

    # Proxy
    if args.http_proxy is not None:
      all_set.export('http_proxy', args.http_proxy + ':' + args.http_proxy_port)
      all_set.export('https_proxy', args.http_proxy + ':' + args.http_proxy_port)
      all_set.export('ANT_OPTS', get_ant_opts_proxy())

    # Ant
    all_set.export('ANT_HOME', ant_path)
    all_set.add_path(ant_path + '/bin')

    # Arcanist
    all_set.add_path(arc_path + '/bin')

    # Java
    all_set.export('JAVA_HOME', config.java_home)
    all_set.add_path(config.java_home + '/bin')

    # Hive
    remote_set.export('HIVE_HOME', host_code_path + '/build/dist')
    remote_set.add_path(host_code_path + '/build/dist/bin')
	def test_should_load_plist(self):
		write_file(self.plist_file, """<?xml version="1.0" encoding="UTF-8"?>
			<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
			<plist version="1.0">
				<dict>
					<key>num_items</key>
					<integer>5</integer>
				</dict> 
			</plist>
			 """)
		config.load(self.plist_file)
		self.assertEqual(config.app_globals.OPTIONS['num_items'], 5) 
Esempio n. 28
0
def bot_REHASH(bot, source, args, receive):

	nick = source.split('!')[0];

	# check if this is from the actual owner
	if not nick in bot.admins:
		bot.msg(receive, "You are not authorized.")
		return

	config.load(main.runtime['config'])

	# announce rehash to admin channel
	modules.channels.notify(bot, "info", "Configuration rehashed by %s" % nick);
Esempio n. 29
0
    def req_config_handler(self):
        req = urlparse.urlparse(self.path).query
        reqs = urlparse.parse_qs(req, keep_blank_values=True)
        data = ""

        if reqs["cmd"] == ["get_config"]:
            config.load()
            data = '{ "check_update": "%d", "popup_webui": %d, "auto_start": %d }' % (
                config.get(["update", "check_update"], 1),
                config.get(["modules", "launcher", "popup_webui"], 1),
                config.get(["modules", "launcher", "auto_start"], 0),
            )
        elif reqs["cmd"] == ["set_config"]:
            if "check_update" in reqs:
                check_update = int(reqs["check_update"][0])
                if check_update != 0 and check_update != 1:
                    data = '{"res":"fail, check_update:%s"}' % check_update
                else:
                    config.set(["update", "check_update"], int(check_update))
                    config.save()

                    data = '{"res":"success"}'

            elif "popup_webui" in reqs:
                popup_webui = int(reqs["popup_webui"][0])
                if popup_webui != 0 and popup_webui != 1:
                    data = '{"res":"fail, popup_webui:%s"}' % popup_webui
                else:
                    config.set(["modules", "launcher", "popup_webui"], popup_webui)
                    config.save()

                    data = '{"res":"success"}'
            elif "auto_start" in reqs:
                auto_start = int(reqs["auto_start"][0])
                if auto_start != 0 and auto_start != 1:
                    data = '{"res":"fail, auto_start:%s"}' % auto_start
                else:
                    if auto_start:
                        autorun.enable()
                    else:
                        autorun.disable()

                    config.set(["modules", "launcher", "auto_start"], auto_start)
                    config.save()

                    data = '{"res":"success"}'
            else:
                data = '{"res":"fail"}'

        self.send_response("application/json", data)
Esempio n. 30
0
def read_conf(config_file):
    global local, qfile_set, other_set, remote_set, all_set
    global master_base_path, host_base_path
    global ant_path, arc_path, phutil_path, code_path, report_path, host_code_path

    if config_file is not None:
        config.load(config_file)
    else:
        config.load()

    local = config.local
    qfile_set = config.qfile_set
    other_set = config.other_set
    remote_set = config.remote_set
    all_set = config.all_set

    master_base_path = config.master_base_path
    host_base_path = config.host_base_path

    if 'HIVE_PTEST_SUFFIX' in os.environ:
        suffix = os.environ['HIVE_PTEST_SUFFIX']
        master_base_path += '-' + suffix
        host_base_path  += '-' + suffix

    ant_path = master_base_path + '/apache-ant-1.8.2'
    arc_path = master_base_path + '/arcanist'
    phutil_path = master_base_path + '/libphutil'
    code_path = master_base_path + '/trunk'
    report_path = master_base_path + '/report/' + time.strftime('%m.%d.%Y_%H:%M:%S')
    host_code_path = host_base_path + '/trunk-{host}'

    # Setup of needed environmental variables and paths

    # Ant
    all_set.add_path(ant_path + '/bin')

    # Arcanist
    all_set.add_path(arc_path + '/bin')

    # Java
    all_set.export('JAVA_HOME', config.java_home)
    all_set.add_path(config.java_home + '/bin')

    # Hive
    remote_set.export('HIVE_HOME', host_code_path + '/build/dist')
    remote_set.add_path(host_code_path + '/build/dist/bin')

    # Hadoop
    remote_set.export('HADOOP_HOME', host_code_path +
            '/build/hadoopcore/hadoop-0.20.1')
Esempio n. 31
0
    parser.add_argument("--verbose",
                        "-v",
                        dest="verbose",
                        action="store_true",
                        default=False,
                        help="Verbose output to.")

    input = parser.parse_args()

    if not input.command:
        parser.print_usage(sys.stderr)
        sys.exit(1)

    # basic sub-command validation.
    if input.command not in commands:
        print("{} not a valid command.".format(input.command))
        parser.print_usage(sys.stderr)
        sys.exit(1)

    # load in the config file
    CONFIG = config.load(input.config)

    if not CONFIG:
        print("no valid config file.")
        sys.exit(1)

    judo = Judo(CONFIG)

    # call the function that is mapped in commands.
    commands[input.command](judo, input)
Esempio n. 32
0
                        default="60")

    parser.add_argument('-p',
                        '--persist',
                        help="persist in db for change notification",
                        type=str,
                        default="false")

    parser.add_argument('-l',
                        '--log',
                        help="log level",
                        type=str,
                        default="INFO")

    args = parser.parse_args()
    c = config.load(vars(args))

    logging.basicConfig(
        level=loglevel.get(c['log'].upper(), logging.INFO),
        format="%(asctime)s,%(msecs)d %(levelname)s: %(message)s",
        datefmt="%H:%M:%S",
    )

else:
    c = config.load({"configuration": './config.json'})


runner = AsyncRunner(
    ft.partial(core.checkall, c),
    frequency=c['frequency'])
Esempio n. 33
0
    def req_config_handler(self):
        req = urlparse.urlparse(self.path).query
        reqs = urlparse.parse_qs(req, keep_blank_values=True)
        data = ''

        current_version = update_from_github.current_version()

        if reqs['cmd'] == ['get_config']:
            config.load()
            check_update = config.get(["update", "check_update"], 1)
            if check_update == 0:
                check_update = "dont-check"
            elif check_update == 1:
                check_update = "long-term-stable"

            data = '{ "check_update": "%s", "popup_webui": %d, "show_systray": %d, "auto_start": %d, "php_enable": %d, "goagent_enable": %d }' %\
                   (check_update
                    , config.get(["modules", "launcher", "popup_webui"], 1)
                    , config.get(["modules", "launcher", "show_systray"], 1)
                    , config.get(["modules", "launcher", "auto_start"], 0)
                    , config.get(["modules", "php_proxy", "auto_start"], 0)
                    , config.get(["modules", "goagent", "auto_start"], 0))
        elif reqs['cmd'] == ['set_config']:
            if 'check_update' in reqs:
                check_update = reqs['check_update'][0]
                if check_update not in [
                        "dont-check", "long-term-stable", "stable", "test"
                ]:
                    data = '{"res":"fail, check_update:%s"}' % check_update
                else:
                    config.set(["update", "check_update"], check_update)
                    config.save()

                    data = '{"res":"success"}'

            elif 'popup_webui' in reqs:
                popup_webui = int(reqs['popup_webui'][0])
                if popup_webui != 0 and popup_webui != 1:
                    data = '{"res":"fail, popup_webui:%s"}' % popup_webui
                else:
                    config.set(["modules", "launcher", "popup_webui"],
                               popup_webui)
                    config.save()

                    data = '{"res":"success"}'
            elif 'show_systray' in reqs:
                show_systray = int(reqs['show_systray'][0])
                if show_systray != 0 and show_systray != 1:
                    data = '{"res":"fail, show_systray:%s"}' % show_systray
                else:
                    config.set(["modules", "launcher", "show_systray"],
                               show_systray)
                    config.save()

                    data = '{"res":"success"}'
            elif 'auto_start' in reqs:
                auto_start = int(reqs['auto_start'][0])
                if auto_start != 0 and auto_start != 1:
                    data = '{"res":"fail, auto_start:%s"}' % auto_start
                else:
                    if auto_start:
                        autorun.enable()
                    else:
                        autorun.disable()

                    config.set(["modules", "launcher", "auto_start"],
                               auto_start)
                    config.save()

                    data = '{"res":"success"}'
            elif 'goagent_enable' in reqs:
                goagent_enable = int(reqs['goagent_enable'][0])
                if goagent_enable != 0 and goagent_enable != 1:
                    data = '{"res":"fail, goagent_enable:%s"}' % goagent_enable
                else:
                    config.set(["modules", "goagent", "auto_start"],
                               goagent_enable)
                    config.save()
                    if goagent_enable:
                        module_init.start("goagent")
                    else:
                        module_init.stop("goagent")
                    self.load_module_menus()
                    data = '{"res":"success"}'
            elif 'php_enable' in reqs:
                php_enable = int(reqs['php_enable'][0])
                if php_enable != 0 and php_enable != 1:
                    data = '{"res":"fail, php_enable:%s"}' % php_enable
                else:
                    config.set(["modules", "php_proxy", "auto_start"],
                               php_enable)
                    config.save()
                    if php_enable:
                        module_init.start("php_proxy")
                    else:
                        module_init.stop("php_proxy")
                    self.load_module_menus()
                    data = '{"res":"success"}'
            else:
                data = '{"res":"fail"}'
        elif reqs['cmd'] == ['get_new_version']:
            versions = update_from_github.get_github_versions()
            data = '{"res":"success", "test_version":"%s", "stable_version":"%s", "current_version":"%s"}' % (
                versions[0][1], versions[1][1], current_version)
            logging.info("%s", data)
        elif reqs['cmd'] == ['update_version']:
            version = reqs['version'][0]
            try:
                update_from_github.update_version(version)
                data = '{"res":"success"}'
            except Exception as e:
                logging.info("update_test_version fail:%r", e)
                data = '{"res":"fail", "error":"%s"}' % e

        self.send_response('text/html', data)
Esempio n. 34
0
    def CreateWindow(self, parent):
        panel = wx.Panel(parent)
        panel.SetMinSize((600, 300))

        # BEE Uninstall Warning | checkbox
        self.BUWC = wx.CheckBox(parent=panel,
                                name='BUWC',
                                label=loc('settings.tab.general.buwc.text'),
                                pos=wx.Point(0, -60))
        self.BUWC.SetToolTip(
            wx.ToolTip(loc('settings.tab.general.buwc.tooltip')))
        self.BUWC.SetValue(config.load('showUninstallDialog', default=True))

        # Verify Game Cache warning | checkbox
        self.VGFC = wx.CheckBox(parent=panel,
                                name='VGFC',
                                label=loc('settings.tab.general.vgfc.text'),
                                pos=wx.Point(0, -40))
        self.VGFC.SetToolTip(
            wx.ToolTip(loc('settings.tab.general.vgfc.tooltip')))
        self.VGFC.SetValue(config.load('showVerifyDialog', default=True))

        # Startup Update Check | checkbox
        self.SUCC = wx.CheckBox(parent=panel,
                                name='SUCC',
                                label=loc('settings.tab.general.succ.text'),
                                pos=wx.Point(0, -20))
        self.SUCC.SetToolTip(
            wx.ToolTip(loc('settings.tab.general.succ.tooltip')))
        self.SUCC.SetValue(config.load('startupUpdateCheck', default=True))

        # splash screen | checkbox
        self.SSC = wx.CheckBox(parent=panel,
                               name='SSC',
                               label=loc('settings.tab.general.ssc.text'),
                               pos=wx.Point(0, 0))
        self.SSC.SetToolTip(wx.ToolTip(
            loc('settings.tab.general.ssc.tooltip')))
        self.SSC.SetValue(config.load('showSplashScreen', default=True))

        # language | static text | dropdown list
        self.LDST = wx.StaticText(parent=panel,
                                  label=loc('settings.tab.general.lddl.text'),
                                  pos=wx.Point(300, -60))
        self.LDDL = wx.Choice(parent=panel,
                              name='LDDL',
                              choices=list(localizeObj.localizations.keys()),
                              pos=wx.Point(300, -40))
        self.LDDL.SetToolTip(
            wx.ToolTip(loc('settings.tab.general.lddl.tooltip')))
        self.LDDL.SetSelection(self.LDDL.FindString(localizeObj.lang))

        # reload lang files | button
        self.RLFB = wx.Button(parent=panel,
                              label=loc('settings.tab.general.rlfb.text'),
                              pos=wx.Point(299, -10))
        self.RLFB.SetToolTip(
            wx.ToolTip(loc('settings.tab.general.rlfb.tooltip')))

        # bind everything
        self.BUWC.Bind(wx.EVT_CHECKBOX, self.save)
        self.VGFC.Bind(wx.EVT_CHECKBOX, self.save)
        self.SUCC.Bind(wx.EVT_CHECKBOX, self.save)
        self.SSC.Bind(wx.EVT_CHECKBOX, self.save)
        self.LDDL.Bind(wx.EVT_CHOICE, self.save)
        self.RLFB.Bind(wx.EVT_BUTTON, self.reloadLangFiles)

        return panel
Esempio n. 35
0
 def setUp(self):
     config.load("dev", "../config.yml")
from conditions import analyses
from config import load, save, bad_mri, subjects_id
from base import nested_analysis

# params
inv_params = dict(lambda2=1.0 / (2 ** 3.0),
                  method='dSPM',
                  pick_ori='normal',
                  verbose=False)

for meg_subject, subject in zip(range(1, 21), subjects_id):
    # load single subject effects (across trials)
    if subject in bad_mri:
        continue
    epochs = load('epochs_decim', subject=meg_subject, preload=True)
    events = load('behavior', subject=meg_subject)
    epochs.apply_baseline((None, 0))
    epochs.pick_types(meg=True, eeg=False, eog=False)

    # Setup source data container
    evoked = epochs.average()
    inv = load('inv', subject=meg_subject)
    stc = apply_inverse(evoked, inv, **inv_params)

    # run each analysis within subject
    for analysis in analyses:
        # source transforming should be applied as early as possible,
        # but here I'm struggling on memory
        coefs = list()
        n_chunk = 20
Esempio n. 37
0
def create_parser(product_choices=None):
    from mozlog import commandline

    import products

    if product_choices is None:
        config_data = config.load()
        product_choices = products.products_enabled(config_data)

    parser = argparse.ArgumentParser(description="Runner for web-platform-tests tests.")
    parser.add_argument("--metadata", action="store", type=abs_path, dest="metadata_root",
                        help="Path to the folder containing test metadata"),
    parser.add_argument("--tests", action="store", type=abs_path, dest="tests_root",
                        help="Path to test files"),
    parser.add_argument("--run-info", action="store", type=abs_path,
                        help="Path to directory containing extra json files to add to run info")
    parser.add_argument("--config", action="store", type=abs_path, dest="config",
                        help="Path to config file")

    parser.add_argument("--manifest-update", action="store_true", default=False,
                        help="Force regeneration of the test manifest")

    parser.add_argument("--binary", action="store",
                        type=abs_path, help="Binary to run tests against")
    parser.add_argument("--webdriver-binary", action="store", metavar="BINARY",
                        type=abs_path, help="WebDriver server binary to use")
    parser.add_argument("--processes", action="store", type=int, default=None,
                        help="Number of simultaneous processes to use")

    parser.add_argument("--run-by-dir", type=int, nargs="?", default=False,
                        help="Split run into groups by directories. With a parameter,"
                        "limit the depth of splits e.g. --run-by-dir=1 to split by top-level"
                        "directory")

    parser.add_argument("--timeout-multiplier", action="store", type=float, default=None,
                        help="Multiplier relative to standard test timeout to use")
    parser.add_argument("--repeat", action="store", type=int, default=1,
                        help="Number of times to run the tests")

    parser.add_argument("--no-capture-stdio", action="store_true", default=False,
                        help="Don't capture stdio and write to logging")

    parser.add_argument("--product", action="store", choices=product_choices,
                        default=None, help="Browser against which to run tests")

    parser.add_argument("--list-test-groups", action="store_true",
                        default=False,
                        help="List the top level directories containing tests that will run.")
    parser.add_argument("--list-disabled", action="store_true",
                        default=False,
                        help="List the tests that are disabled on the current platform")

    build_type = parser.add_mutually_exclusive_group()
    build_type.add_argument("--debug-build", dest="debug", action="store_true",
                            default=None,
                            help="Build is a debug build (overrides any mozinfo file)")
    build_type.add_argument("--release-build", dest="debug", action="store_false",
                            default=None,
                            help="Build is a release (overrides any mozinfo file)")

    test_selection_group = parser.add_argument_group("Test Selection")
    test_selection_group.add_argument("--test-types", action="store",
                                      nargs="*", default=["testharness", "reftest"],
                                      choices=["testharness", "reftest"],
                                      help="Test types to run")
    test_selection_group.add_argument("--include", action="append",
                                      help="URL prefix to include")
    test_selection_group.add_argument("--exclude", action="append",
                                      help="URL prefix to exclude")
    test_selection_group.add_argument("--include-manifest", type=abs_path,
                                      help="Path to manifest listing tests to include")
    test_selection_group.add_argument("--tag", action="append", dest="tags",
                                      help="Labels applied to tests to include in the run. Labels starting dir: are equivalent to top-level directories.")

    debugging_group = parser.add_argument_group("Debugging")
    debugging_group.add_argument('--debugger', const="__default__", nargs="?",
                                 help="run under a debugger, e.g. gdb or valgrind")
    debugging_group.add_argument('--debugger-args', help="arguments to the debugger")

    debugging_group.add_argument('--pause-after-test', action="store_true", default=None,
                                 help="Halt the test runner after each test (this happens by default if only a single test is run)")
    debugging_group.add_argument('--no-pause-after-test', dest="pause_after_test", action="store_false",
                                 help="Don't halt the test runner irrespective of the number of tests run")

    debugging_group.add_argument('--pause-on-unexpected', action="store_true",
                                 help="Halt the test runner when an unexpected result is encountered")

    debugging_group.add_argument("--symbols-path", action="store", type=url_or_path,
                                 help="Path or url to symbols file used to analyse crash minidumps.")
    debugging_group.add_argument("--stackwalk-binary", action="store", type=abs_path,
                                 help="Path to stackwalker program used to analyse minidumps.")

    chunking_group = parser.add_argument_group("Test Chunking")
    chunking_group.add_argument("--total-chunks", action="store", type=int, default=1,
                                help="Total number of chunks to use")
    chunking_group.add_argument("--this-chunk", action="store", type=int, default=1,
                                help="Chunk number to run")
    chunking_group.add_argument("--chunk-type", action="store", choices=["none", "equal_time", "hash"],
                                default=None, help="Chunking type to use")

    ssl_group = parser.add_argument_group("SSL/TLS")
    ssl_group.add_argument("--ssl-type", action="store", default=None,
                        choices=["openssl", "pregenerated", "none"],
                        help="Type of ssl support to enable (running without ssl may lead to spurious errors)")

    ssl_group.add_argument("--openssl-binary", action="store",
                        help="Path to openssl binary", default="openssl")
    ssl_group.add_argument("--certutil-binary", action="store",
                        help="Path to certutil binary for use with Firefox + ssl")

    ssl_group.add_argument("--ca-cert-path", action="store", type=abs_path,
                        help="Path to ca certificate when using pregenerated ssl certificates")
    ssl_group.add_argument("--host-key-path", action="store", type=abs_path,
                        help="Path to host private key when using pregenerated ssl certificates")
    ssl_group.add_argument("--host-cert-path", action="store", type=abs_path,
                        help="Path to host certificate when using pregenerated ssl certificates")

    gecko_group = parser.add_argument_group("Gecko-specific")
    gecko_group.add_argument("--prefs-root", dest="prefs_root", action="store", type=abs_path,
                             help="Path to the folder containing browser prefs")
    gecko_group.add_argument("--e10s", dest="gecko_e10s", action="store_true",
                             help="Run tests with electrolysis preferences")

    b2g_group = parser.add_argument_group("B2G-specific")
    b2g_group.add_argument("--b2g-no-backup", action="store_true", default=False,
                           help="Don't backup device before testrun with --product=b2g")

    servo_group = parser.add_argument_group("Servo-specific")
    servo_group.add_argument("--user-stylesheet",
                             default=[], action="append", dest="user_stylesheets",
                             help="Inject a user CSS stylesheet into every test.")

    parser.add_argument("test_list", nargs="*",
                        help="List of URLs for tests to run, or paths including tests to run. "
                             "(equivalent to --include)")

    commandline.add_logging_group(parser)
    return parser
Esempio n. 38
0
    def req_config_handler(self):
        req = urlparse.urlparse(self.path).query
        reqs = urlparse.parse_qs(req, keep_blank_values=True)
        data = ''

        if reqs['cmd'] == ['get_config']:
            config.load()

            if module_init.xargs.get("allow_remote", 0):
                allow_remote_connect = 1
            else:
                allow_remote_connect = config.get(
                    ["modules", "launcher", "allow_remote_connect"], 0)

            dat = {
                "check_update":
                config.get(["update", "check_update"], "notice-stable"),
                "language":
                config.get(["language"], i18n_translator.lang),
                "popup_webui":
                config.get(["modules", "launcher", "popup_webui"], 1),
                "allow_remote_connect":
                allow_remote_connect,
                "allow_remote_switch":
                config.get(["modules", "launcher", "allow_remote_connect"], 0),
                "show_systray":
                config.get(["modules", "launcher", "show_systray"], 1),
                "auto_start":
                config.get(["modules", "launcher", "auto_start"], 0),
                "show_detail":
                config.get(["modules", "gae_proxy", "show_detail"], 0),
                "gae_proxy_enable":
                config.get(["modules", "gae_proxy", "auto_start"], 0),
                "x_tunnel_enable":
                config.get(["modules", "x_tunnel", "auto_start"], 0),
                "smart_router_enable":
                config.get(["modules", "smart_router", "auto_start"], 0),
                "system-proxy":
                config.get(["modules", "launcher", "proxy"], "smart_router"),
                "no_mess_system":
                config.get(["no_mess_system"], 0),
                "keep_old_ver_num":
                config.get(["modules", "launcher", "keep_old_ver_num"],
                           -1),  # -1 means not set yet
                "postUpdateStat":
                config.get(["update", "postUpdateStat"], "noChange"),
            }
            data = json.dumps(dat)
        elif reqs['cmd'] == ['set_config']:
            if 'skip_version' in reqs:
                skip_version = reqs['skip_version'][0]
                skip_version_type = reqs['skip_version_type'][0]
                if skip_version_type not in ["stable", "test"]:
                    data = '{"res":"fail"}'
                else:
                    config.set(
                        ["update",
                         "skip_%s_version" % skip_version_type], skip_version)
                    config.save()
                    if skip_version in update_from_github.update_info:
                        update_from_github.update_info = ''
                    data = '{"res":"success"}'
            elif 'check_update' in reqs:
                check_update = reqs['check_update'][0]
                if check_update not in [
                        "dont-check", "stable", "notice-stable", "test",
                        "notice-test"
                ]:
                    data = '{"res":"fail, check_update:%s"}' % check_update
                else:
                    if config.get(["update", "check_update"]) != check_update:
                        update_from_github.init_update_info(check_update)
                        config.set(["update", "check_update"], check_update)
                        config.save()

                    data = '{"res":"success"}'
            elif 'language' in reqs:
                language = reqs['language'][0]

                if language not in i18n_translator.get_valid_languages():
                    data = '{"res":"fail, language:%s"}' % language
                else:
                    config.set(["language"], language)
                    config.save()

                    i18n_translator.lang = language
                    self.load_module_menus()

                    data = '{"res":"success"}'
            elif 'popup_webui' in reqs:
                popup_webui = int(reqs['popup_webui'][0])
                if popup_webui != 0 and popup_webui != 1:
                    data = '{"res":"fail, popup_webui:%s"}' % popup_webui
                else:
                    config.set(["modules", "launcher", "popup_webui"],
                               popup_webui)
                    config.save()

                    data = '{"res":"success"}'
            elif 'allow_remote_switch' in reqs:
                allow_remote_switch = int(reqs['allow_remote_switch'][0])
                if allow_remote_switch != 0 and allow_remote_switch != 1:
                    data = '{"res":"fail, allow_remote_connect:%s"}' % allow_remote_switch
                else:
                    config.set(["modules", "launcher", "allow_remote_connect"],
                               allow_remote_switch)
                    config.save()

                    try:
                        del module_init.xargs["allow_remote"]
                    except:
                        pass

                    if allow_remote_switch:
                        module_init.call_each_module("set_bind_ip",
                                                     {"ip": "0.0.0.0"})
                    else:
                        module_init.call_each_module("set_bind_ip",
                                                     {"ip": "127.0.0.1"})

                    data = '{"res":"success"}'

                    xlog.debug("restart web control.")
                    stop()
                    module_init.stop_all()
                    time.sleep(1)
                    start()
                    module_init.start_all_auto()
                    xlog.debug("launcher web control restarted.")
            elif 'show_systray' in reqs:
                show_systray = int(reqs['show_systray'][0])
                if show_systray != 0 and show_systray != 1:
                    data = '{"res":"fail, show_systray:%s"}' % show_systray
                else:
                    config.set(["modules", "launcher", "show_systray"],
                               show_systray)
                    config.save()

                    data = '{"res":"success"}'
            elif 'no_mess_system' in reqs:
                no_mess_system = int(reqs['no_mess_system'][0])
                if no_mess_system != 0 and no_mess_system != 1:
                    data = '{"res":"fail, show_systray:%s"}' % no_mess_system
                else:
                    config.set(["no_mess_system"], no_mess_system)
                    config.save()

                    data = '{"res":"success"}'
            elif 'keep_old_ver_num' in reqs:
                keep_old_ver_num = int(reqs['keep_old_ver_num'][0])
                if keep_old_ver_num < 0 or keep_old_ver_num > 99:
                    data = '{"res":"fail, keep_old_ver_num:%s not in range 0 to 99"}' % keep_old_ver_num
                else:
                    config.set(["modules", "launcher", "keep_old_ver_num"],
                               keep_old_ver_num)
                    config.save()

                    data = '{"res":"success"}'
            elif 'auto_start' in reqs:
                auto_start = int(reqs['auto_start'][0])
                if auto_start != 0 and auto_start != 1:
                    data = '{"res":"fail, auto_start:%s"}' % auto_start
                else:
                    if auto_start:
                        autorun.enable()
                    else:
                        autorun.disable()

                    config.set(["modules", "launcher", "auto_start"],
                               auto_start)
                    config.save()

                    data = '{"res":"success"}'
            elif 'show_detail' in reqs:
                show_detail = int(reqs['show_detail'][0])
                if show_detail != 0 and show_detail != 1:
                    data = '{"res":"fail, show_detail:%s"}' % show_detail
                else:
                    config.set(["modules", "gae_proxy", "show_detail"],
                               show_detail)
                    config.save()

                    data = '{"res":"success"}'
            elif 'gae_proxy_enable' in reqs:
                gae_proxy_enable = int(reqs['gae_proxy_enable'][0])
                if gae_proxy_enable != 0 and gae_proxy_enable != 1:
                    data = '{"res":"fail, gae_proxy_enable:%s"}' % gae_proxy_enable
                else:
                    config.set(["modules", "gae_proxy", "auto_start"],
                               gae_proxy_enable)
                    config.save()
                    if gae_proxy_enable:
                        module_init.start("gae_proxy")
                    else:
                        module_init.stop("gae_proxy")
                    self.load_module_menus()
                    data = '{"res":"success"}'
            elif 'x_tunnel_enable' in reqs:
                x_tunnel_enable = int(reqs['x_tunnel_enable'][0])
                if x_tunnel_enable != 0 and x_tunnel_enable != 1:
                    data = '{"res":"fail, x_tunnel_enable:%s"}' % x_tunnel_enable
                else:
                    config.set(["modules", "x_tunnel", "auto_start"],
                               x_tunnel_enable)
                    config.save()
                    if x_tunnel_enable:
                        module_init.start("x_tunnel")
                    else:
                        module_init.stop("x_tunnel")
                    self.load_module_menus()
                    data = '{"res":"success"}'
            elif 'smart_router_enable' in reqs:
                smart_router_enable = int(reqs['smart_router_enable'][0])
                if smart_router_enable != 0 and smart_router_enable != 1:
                    data = '{"res":"fail, smart_router_enable:%s"}' % smart_router_enable
                else:
                    config.set(["modules", "smart_router", "auto_start"],
                               smart_router_enable)
                    config.save()
                    if smart_router_enable:
                        module_init.start("smart_router")
                    else:
                        module_init.stop("smart_router")
                    self.load_module_menus()
                    data = '{"res":"success"}'
            elif 'postUpdateStat' in reqs:
                postUpdateStat = reqs['postUpdateStat'][0]
                if postUpdateStat not in ["noChange", "isNew", "isPostUpdate"]:
                    data = '{"res":"fail, postUpdateStat:%s"}' % postUpdateStat
                else:
                    config.set(["update", "postUpdateStat"], postUpdateStat)
                    config.save()
                    data = '{"res":"success"}'
            else:
                data = '{"res":"fail"}'
        elif reqs['cmd'] == ['get_version']:
            current_version = update_from_github.current_version()
            data = '{"current_version":"%s"}' % current_version

        self.send_response('text/html', data)
Esempio n. 39
0
        return None

    return {'url': url, 'response': response}


async def fetch_urls(loop, manifest):
    tasks = [loop.create_task(fetch(url)) for url in manifest]
    await asyncio.wait(tasks)
    return [task.result() for task in tasks]


def get(manifest):
    loop = asyncio.get_event_loop()
    responses = loop.run_until_complete(fetch_urls(loop, manifest))
    loop.close()
    return responses


def post_responses(url, responses):
    requests.post(url, data={'responses': responses})
    return


if __name__ == '__main__':
    start = time.time()
    app_config = config.load()
    responses = get(app_config['crawler']['manifest'])
    post_responses(app_config['data_store']['ip'], responses)
    end = time.time()
    print(f'total time {end - start}')
Esempio n. 40
0
def create_parser(product_choices=None):
    from mozlog import commandline

    import products

    if product_choices is None:
        config_data = config.load()
        product_choices = products.products_enabled(config_data)

    parser = argparse.ArgumentParser(
        description="""Runner for web-platform-tests tests.""",
        usage="""%(prog)s [OPTION]... [TEST]...

TEST is either the full path to a test file to run, or the URL of a test excluding
scheme host and port.""")
    parser.add_argument("--manifest-update",
                        action="store_true",
                        default=None,
                        help="Regenerate the test manifest.")
    parser.add_argument("--no-manifest-update",
                        action="store_false",
                        dest="manifest_update",
                        help="Prevent regeneration of the test manifest.")
    parser.add_argument(
        "--manifest-download",
        action="store_true",
        default=None,
        help="Attempt to download a preexisting manifest when updating.")

    parser.add_argument(
        "--timeout-multiplier",
        action="store",
        type=float,
        default=None,
        help="Multiplier relative to standard test timeout to use")
    parser.add_argument(
        "--run-by-dir",
        type=int,
        nargs="?",
        default=False,
        help="Split run into groups by directories. With a parameter,"
        "limit the depth of splits e.g. --run-by-dir=1 to split by top-level"
        "directory")
    parser.add_argument("--processes",
                        action="store",
                        type=int,
                        default=None,
                        help="Number of simultaneous processes to use")

    parser.add_argument("--no-capture-stdio",
                        action="store_true",
                        default=False,
                        help="Don't capture stdio and write to logging")

    mode_group = parser.add_argument_group("Mode")
    mode_group.add_argument(
        "--list-test-groups",
        action="store_true",
        default=False,
        help="List the top level directories containing tests that will run.")
    mode_group.add_argument(
        "--list-disabled",
        action="store_true",
        default=False,
        help="List the tests that are disabled on the current platform")
    mode_group.add_argument("--list-tests",
                            action="store_true",
                            default=False,
                            help="List all tests that will run")
    mode_group.add_argument("--verify",
                            action="store_true",
                            default=False,
                            help="Run a stability check on the selected tests")
    mode_group.add_argument(
        "--verify-log-full",
        action="store_true",
        default=False,
        help="Output per-iteration test results when running verify")

    test_selection_group = parser.add_argument_group("Test Selection")
    test_selection_group.add_argument("--test-types",
                                      action="store",
                                      nargs="*",
                                      default=wpttest.enabled_tests,
                                      choices=wpttest.enabled_tests,
                                      help="Test types to run")
    test_selection_group.add_argument("--include",
                                      action="append",
                                      help="URL prefix to include")
    test_selection_group.add_argument("--exclude",
                                      action="append",
                                      help="URL prefix to exclude")
    test_selection_group.add_argument(
        "--include-manifest",
        type=abs_path,
        help="Path to manifest listing tests to include")
    test_selection_group.add_argument(
        "--skip-timeout",
        action="store_true",
        help="Skip tests that are expected to time out")
    test_selection_group.add_argument(
        "--tag",
        action="append",
        dest="tags",
        help="Labels applied to tests to include in the run. "
        "Labels starting dir: are equivalent to top-level directories.")

    debugging_group = parser.add_argument_group("Debugging")
    debugging_group.add_argument(
        '--debugger',
        const="__default__",
        nargs="?",
        help="run under a debugger, e.g. gdb or valgrind")
    debugging_group.add_argument('--debugger-args',
                                 help="arguments to the debugger")
    debugging_group.add_argument(
        "--rerun",
        action="store",
        type=int,
        default=1,
        help="Number of times to re run each test without restarts")
    debugging_group.add_argument(
        "--repeat",
        action="store",
        type=int,
        default=1,
        help="Number of times to run the tests, restarting between each run")
    debugging_group.add_argument(
        "--repeat-until-unexpected",
        action="store_true",
        default=None,
        help="Run tests in a loop until one returns an unexpected result")
    debugging_group.add_argument(
        '--pause-after-test',
        action="store_true",
        default=None,
        help=
        "Halt the test runner after each test (this happens by default if only a single test is run)"
    )
    debugging_group.add_argument(
        '--no-pause-after-test',
        dest="pause_after_test",
        action="store_false",
        help=
        "Don't halt the test runner irrespective of the number of tests run")

    debugging_group.add_argument(
        '--pause-on-unexpected',
        action="store_true",
        help="Halt the test runner when an unexpected result is encountered")
    debugging_group.add_argument('--no-restart-on-unexpected',
                                 dest="restart_on_unexpected",
                                 default=True,
                                 action="store_false",
                                 help="Don't restart on an unexpected result")

    debugging_group.add_argument(
        "--symbols-path",
        action="store",
        type=url_or_path,
        help="Path or url to symbols file used to analyse crash minidumps.")
    debugging_group.add_argument(
        "--stackwalk-binary",
        action="store",
        type=abs_path,
        help="Path to stackwalker program used to analyse minidumps.")

    debugging_group.add_argument("--pdb",
                                 action="store_true",
                                 help="Drop into pdb on python exception")

    config_group = parser.add_argument_group("Configuration")
    config_group.add_argument("--binary",
                              action="store",
                              type=abs_path,
                              help="Binary to run tests against")
    config_group.add_argument('--binary-arg',
                              default=[],
                              action="append",
                              dest="binary_args",
                              help="Extra argument for the binary")
    config_group.add_argument("--webdriver-binary",
                              action="store",
                              metavar="BINARY",
                              type=abs_path,
                              help="WebDriver server binary to use")
    config_group.add_argument('--webdriver-arg',
                              default=[],
                              action="append",
                              dest="webdriver_args",
                              help="Extra argument for the WebDriver binary")

    config_group.add_argument(
        "--metadata",
        action="store",
        type=abs_path,
        dest="metadata_root",
        help="Path to root directory containing test metadata"),
    config_group.add_argument(
        "--tests",
        action="store",
        type=abs_path,
        dest="tests_root",
        help="Path to root directory containing test files"),
    config_group.add_argument(
        "--manifest",
        action="store",
        type=abs_path,
        dest="manifest_path",
        help="Path to test manifest (default is ${metadata_root}/MANIFEST.json)"
    )
    config_group.add_argument(
        "--run-info",
        action="store",
        type=abs_path,
        help="Path to directory containing extra json files to add to run info"
    )
    config_group.add_argument("--product",
                              action="store",
                              choices=product_choices,
                              default=None,
                              help="Browser against which to run tests")
    config_group.add_argument("--config",
                              action="store",
                              type=abs_path,
                              dest="config",
                              help="Path to config file")
    config_group.add_argument(
        "--install-fonts",
        action="store_true",
        default=None,
        help="Allow the wptrunner to install fonts on your system")
    config_group.add_argument("--font-dir",
                              action="store",
                              type=abs_path,
                              dest="font_dir",
                              help="Path to local font installation directory",
                              default=None)

    build_type = parser.add_mutually_exclusive_group()
    build_type.add_argument(
        "--debug-build",
        dest="debug",
        action="store_true",
        default=None,
        help="Build is a debug build (overrides any mozinfo file)")
    build_type.add_argument(
        "--release-build",
        dest="debug",
        action="store_false",
        default=None,
        help="Build is a release (overrides any mozinfo file)")

    chunking_group = parser.add_argument_group("Test Chunking")
    chunking_group.add_argument("--total-chunks",
                                action="store",
                                type=int,
                                default=1,
                                help="Total number of chunks to use")
    chunking_group.add_argument("--this-chunk",
                                action="store",
                                type=int,
                                default=1,
                                help="Chunk number to run")
    chunking_group.add_argument(
        "--chunk-type",
        action="store",
        choices=["none", "equal_time", "hash", "dir_hash"],
        default=None,
        help="Chunking type to use")

    ssl_group = parser.add_argument_group("SSL/TLS")
    ssl_group.add_argument(
        "--ssl-type",
        action="store",
        default=None,
        choices=["openssl", "pregenerated", "none"],
        help=
        "Type of ssl support to enable (running without ssl may lead to spurious errors)"
    )

    ssl_group.add_argument("--openssl-binary",
                           action="store",
                           help="Path to openssl binary",
                           default="openssl")
    ssl_group.add_argument(
        "--certutil-binary",
        action="store",
        help="Path to certutil binary for use with Firefox + ssl")

    ssl_group.add_argument(
        "--ca-cert-path",
        action="store",
        type=abs_path,
        help="Path to ca certificate when using pregenerated ssl certificates")
    ssl_group.add_argument(
        "--host-key-path",
        action="store",
        type=abs_path,
        help="Path to host private key when using pregenerated ssl certificates"
    )
    ssl_group.add_argument(
        "--host-cert-path",
        action="store",
        type=abs_path,
        help="Path to host certificate when using pregenerated ssl certificates"
    )

    gecko_group = parser.add_argument_group("Gecko-specific")
    gecko_group.add_argument(
        "--prefs-root",
        dest="prefs_root",
        action="store",
        type=abs_path,
        help="Path to the folder containing browser prefs")
    gecko_group.add_argument("--disable-e10s",
                             dest="gecko_e10s",
                             action="store_false",
                             default=True,
                             help="Run tests without electrolysis preferences")
    gecko_group.add_argument(
        "--stackfix-dir",
        dest="stackfix_dir",
        action="store",
        help="Path to directory containing assertion stack fixing scripts")
    gecko_group.add_argument(
        "--setpref",
        dest="extra_prefs",
        action='append',
        default=[],
        metavar="PREF=VALUE",
        help="Defines an extra user preference (overrides those in prefs_root)"
    )
    gecko_group.add_argument("--leak-check",
                             dest="leak_check",
                             action="store_true",
                             help="Enable leak checking")
    gecko_group.add_argument(
        "--stylo-threads",
        action="store",
        type=int,
        default=1,
        help="Number of parallel threads to use for stylo")
    gecko_group.add_argument(
        "--reftest-internal",
        dest="reftest_internal",
        action="store_true",
        default=None,
        help="Enable reftest runner implemented inside Marionette")
    gecko_group.add_argument(
        "--reftest-external",
        dest="reftest_internal",
        action="store_false",
        help="Disable reftest runner implemented inside Marionette")
    gecko_group.add_argument(
        "--reftest-screenshot",
        dest="reftest_screenshot",
        action="store",
        choices=["always", "fail", "unexpected"],
        default="unexpected",
        help="With --reftest-internal, when to take a screenshot")
    gecko_group.add_argument(
        "--chaos",
        dest="chaos_mode_flags",
        action="store",
        nargs="?",
        const=0xFFFFFFFF,
        type=int,
        help="Enable chaos mode with the specified feature flag "
        "(see http://searchfox.org/mozilla-central/source/mfbt/ChaosMode.h for "
        "details). If no value is supplied, all features are activated")

    servo_group = parser.add_argument_group("Servo-specific")
    servo_group.add_argument(
        "--user-stylesheet",
        default=[],
        action="append",
        dest="user_stylesheets",
        help="Inject a user CSS stylesheet into every test.")

    sauce_group = parser.add_argument_group("Sauce Labs-specific")
    sauce_group.add_argument("--sauce-browser",
                             dest="sauce_browser",
                             help="Sauce Labs browser name")
    sauce_group.add_argument("--sauce-platform",
                             dest="sauce_platform",
                             help="Sauce Labs OS platform")
    sauce_group.add_argument("--sauce-version",
                             dest="sauce_version",
                             help="Sauce Labs browser version")
    sauce_group.add_argument("--sauce-build",
                             dest="sauce_build",
                             help="Sauce Labs build identifier")
    sauce_group.add_argument("--sauce-tags",
                             dest="sauce_tags",
                             nargs="*",
                             help="Sauce Labs identifying tag",
                             default=[])
    sauce_group.add_argument("--sauce-tunnel-id",
                             dest="sauce_tunnel_id",
                             help="Sauce Connect tunnel identifier")
    sauce_group.add_argument("--sauce-user",
                             dest="sauce_user",
                             help="Sauce Labs user name")
    sauce_group.add_argument("--sauce-key",
                             dest="sauce_key",
                             default=os.environ.get("SAUCE_ACCESS_KEY"),
                             help="Sauce Labs access key")
    sauce_group.add_argument("--sauce-connect-binary",
                             dest="sauce_connect_binary",
                             help="Path to Sauce Connect binary")

    webkit_group = parser.add_argument_group("WebKit-specific")
    webkit_group.add_argument("--webkit-port",
                              dest="webkit_port",
                              help="WebKit port")

    parser.add_argument(
        "test_list",
        nargs="*",
        help="List of URLs for tests to run, or paths including tests to run. "
        "(equivalent to --include)")

    commandline.log_formatters["wptreport"] = (formatters.WptreportFormatter,
                                               "wptreport format")

    commandline.add_logging_group(parser)
    return parser
Esempio n. 41
0
def create_parser_update(product_choices=None):
    from mozlog.structured import commandline

    import products

    if product_choices is None:
        config_data = config.load()
        product_choices = products.products_enabled(config_data)

    parser = argparse.ArgumentParser(
        "web-platform-tests-update",
        description="Update script for web-platform-tests tests.")
    parser.add_argument("--product",
                        action="store",
                        choices=product_choices,
                        default=None,
                        help="Browser for which metadata is being updated")
    parser.add_argument("--config",
                        action="store",
                        type=abs_path,
                        help="Path to config file")
    parser.add_argument("--metadata",
                        action="store",
                        type=abs_path,
                        dest="metadata_root",
                        help="Path to the folder containing test metadata"),
    parser.add_argument("--tests",
                        action="store",
                        type=abs_path,
                        dest="tests_root",
                        help="Path to web-platform-tests"),
    parser.add_argument(
        "--manifest",
        action="store",
        type=abs_path,
        dest="manifest_path",
        help="Path to test manifest (default is ${metadata_root}/MANIFEST.json)"
    )
    parser.add_argument(
        "--sync-path",
        action="store",
        type=abs_path,
        help="Path to store git checkout of web-platform-tests during update"),
    parser.add_argument(
        "--remote_url",
        action="store",
        help="URL of web-platfrom-tests repository to sync against"),
    parser.add_argument("--branch",
                        action="store",
                        type=abs_path,
                        help="Remote branch to sync against")
    parser.add_argument("--rev", action="store", help="Revision to sync to")
    parser.add_argument("--patch",
                        action="store_true",
                        dest="patch",
                        default=None,
                        help="Create a VCS commit containing the changes.")
    parser.add_argument(
        "--no-patch",
        action="store_false",
        dest="patch",
        help="Don't create a VCS commit containing the changes.")
    parser.add_argument(
        "--sync",
        dest="sync",
        action="store_true",
        default=False,
        help="Sync the tests with the latest from upstream (implies --patch)")
    parser.add_argument(
        "--ignore-existing",
        action="store_true",
        help=
        "When updating test results only consider results from the logfiles provided, not existing expectations."
    )
    parser.add_argument(
        "--stability",
        nargs="?",
        action="store",
        const="unstable",
        default=None,
        help=
        ("Reason for disabling tests. When updating test results, disable tests that have "
         "inconsistent results across many runs with the given reason."))
    parser.add_argument(
        "--continue",
        action="store_true",
        help="Continue a previously started run of the update script")
    parser.add_argument(
        "--abort",
        action="store_true",
        help="Clear state from a previous incomplete run of the update script")
    parser.add_argument(
        "--exclude",
        action="store",
        nargs="*",
        help="List of glob-style paths to exclude when syncing tests")
    parser.add_argument(
        "--include",
        action="store",
        nargs="*",
        help=
        "List of glob-style paths to include which would otherwise be excluded when syncing tests"
    )
    parser.add_argument(
        "--extra-property",
        action="append",
        default=[],
        help="Extra property from run_info.json to use in metadata update")
    # Should make this required iff run=logfile
    parser.add_argument("run_log",
                        nargs="*",
                        type=abs_path,
                        help="Log file from run of tests")
    commandline.add_logging_group(parser)
    return parser
Esempio n. 42
0
PARSER.add_argument('--dry', action='store_true')
PARSER.add_argument('--chord-mode', action='store_true')
PARSER.add_argument('--test-mode', action='store_true')
PARSER.add_argument('--test-volume', action='store_true')
PARSER.add_argument('--fix-audiolink', action='store_true')
PARSER.add_argument('--ignore-clip', action='store_true')
PARSER.add_argument('--continue-rip', action='store_true')

PARSER.add_argument('--instrument', default="unknown")
PARSER.add_argument('--patch-name', default="unknown")

ARGS = PARSER.parse_args()
print(ARGS)

# Load settings from config.CONFIG FILE
if not config.load(ARGS.config_file):
    print("Couldn't load config file [%s]" % ARGS.config_file)
    exit(1)

# Load logger
if not logger.load():
    print("Couldn't create logger file")
    exit(2)

# Set SOUND DEVICE
if audio.load():
    logger.LOGGER.info("SOUND-DEVICE: {}".format(audio.DEVICE.default.device))
else:
    logger.LOGGER.error(audio.DEVICES_LIST)
    exit(1)
Esempio n. 43
0
def create_parser(product_choices=None):
    from mozlog.structured import commandline

    import products

    if product_choices is None:
        config_data = config.load()
        product_choices = products.products_enabled(config_data)

    parser = argparse.ArgumentParser(
        "web-platform-tests",
        description="Runner for web-platform-tests tests.")
    parser.add_argument("--metadata",
                        action="store",
                        type=abs_path,
                        dest="metadata_root",
                        help="Path to the folder containing test metadata"),
    parser.add_argument("--tests",
                        action="store",
                        type=abs_path,
                        dest="tests_root",
                        help="Path to web-platform-tests"),
    parser.add_argument("--prefs-root",
                        dest="prefs_root",
                        action="store",
                        type=abs_path,
                        help="Path to the folder containing browser prefs"),
    parser.add_argument("--config",
                        action="store",
                        type=abs_path,
                        help="Path to config file")
    parser.add_argument("--binary",
                        action="store",
                        type=abs_path,
                        help="Binary to run tests against")
    parser.add_argument("--test-types",
                        action="store",
                        nargs="*",
                        default=["testharness", "reftest"],
                        choices=["testharness", "reftest"],
                        help="Test types to run")
    parser.add_argument("--processes",
                        action="store",
                        type=int,
                        default=1,
                        help="Number of simultaneous processes to use")
    parser.add_argument("--include",
                        action="append",
                        type=slash_prefixed,
                        help="URL prefix to include")
    parser.add_argument("--exclude",
                        action="append",
                        type=slash_prefixed,
                        help="URL prefix to exclude")
    parser.add_argument("--include-manifest",
                        type=abs_path,
                        help="Path to manifest listing tests to include")

    parser.add_argument("--total-chunks",
                        action="store",
                        type=int,
                        default=1,
                        help="Total number of chunks to use")
    parser.add_argument("--this-chunk",
                        action="store",
                        type=int,
                        default=1,
                        help="Chunk number to run")
    parser.add_argument("--chunk-type",
                        action="store",
                        choices=["none", "equal_time", "hash"],
                        default=None,
                        help="Chunking type to use")

    parser.add_argument(
        "--list-test-groups",
        action="store_true",
        default=False,
        help="List the top level directories containing tests that will run.")
    parser.add_argument(
        "--list-disabled",
        action="store_true",
        default=False,
        help="List the tests that are disabled on the current platform")

    parser.add_argument(
        "--timeout-multiplier",
        action="store",
        type=float,
        default=None,
        help="Multiplier relative to standard test timeout to use")
    parser.add_argument("--repeat",
                        action="store",
                        type=int,
                        default=1,
                        help="Number of times to run the tests")

    parser.add_argument("--no-capture-stdio",
                        action="store_true",
                        default=False,
                        help="Don't capture stdio and write to logging")

    parser.add_argument("--product",
                        action="store",
                        choices=product_choices,
                        default="firefox",
                        help="Browser against which to run tests")

    parser.add_argument('--debugger',
                        help="run under a debugger, e.g. gdb or valgrind")
    parser.add_argument('--debugger-args', help="arguments to the debugger")
    parser.add_argument(
        '--pause-on-unexpected',
        action="store_true",
        help="Halt the test runner when an unexpected result is encountered")

    parser.add_argument(
        "--symbols-path",
        action="store",
        type=url_or_path,
        help="Path to symbols file used to analyse crash minidumps.")
    parser.add_argument(
        "--stackwalk-binary",
        action="store",
        type=abs_path,
        help="Path to stackwalker program used to analyse minidumps.")

    parser.add_argument(
        "--b2g-no-backup",
        action="store_true",
        default=False,
        help="Don't backup device before testrun with --product=b2g")

    commandline.add_logging_group(parser)
    return parser
Esempio n. 44
0
__DEBUG__ = 0

if __name__ == '__main__':

    if __DEBUG__:
        logging.basicConfig(filename='/fuzzy_match.log', level=logging.INFO)

    # Step 1 - Intro
    logging.info('Started Step 1 - Intro')
    intro()
    logging.info('Finished Step 1 - Intro')

    # Step 2 - Load Data
    logging.info('Started Step 2 - Data Load')
    a_c, dfl, dfr = load()
    logging.info('Finished Step 2 - Data Load')

    # Step 3 - Format Data
    logging.info('Started Step 3 - Data Setup')
    dfl = setup(dfl, a_c)
    dfr = setup(dfr, a_c)

    df_clavis = dfl.copy()
    df_clavis = df_clavis[['Id', 'Name', 'Address', 'Opps']]
    df_ocr = dfr.copy()
    df_ocr = df_ocr[['Id', 'Name', 'Address']]
    logging.info('Finished Step 3 - Data Setup')

    # Step 4 - Summarise Data
    logging.info('Started Step 4 - Data Summary')
Esempio n. 45
0
            v = v.get()
    return v


if __name__ == '__main__':
    # Argument
    parser = argparse.ArgumentParser(description='HyperFace training script')
    parser.add_argument('--config', '-c', default='config.json',
                        help='Load config from given json file')
    parser.add_argument('--model', required=True, help='Trained model path')
    args = parser.parse_args()

    logger.info('HyperFace Evaluation')

    # Load config
    config.load(args.config)

    # Define a model
    logger.info('Define a HyperFace model')
    model = models.HyperFaceModel()
    model.train = False
    model.report = False
    model.backward = False

    # Initialize model
    logger.info('Initialize a model using model "{}"'.format(args.model))
    chainer.serializers.load_npz(args.model, model)

    # Setup GPU
    if config.gpu >= 0:
        chainer.cuda.check_cuda_available()
Esempio n. 46
0
      # These citations only work for 'members of the MIT community'.
      return True
   else:
      return False

URLTYPES=[ "pdf", "ps", "txt", "ps_gz", "html" ]

if __name__ == '__main__':
   # First download the bibliography file.
   import BibTeX
   suggest = False
   if sys.argv[1] == 'suggest':
      suggest = True
      del sys.argv[1]

   config.load(sys.argv[1])
   if config.CACHE_UMASK != None:
      os.umask(config.CACHE_UMASK)
   bib = BibTeX.parseFile(config.MASTER_BIB)
   remove_old()

   print "Downloading missing ranks."
   for ent in bib.entries:
      getCite(ent['title'], cache=True, update=True)

   if suggest:
      for ent in bib.entries:
         haveOne = False
         for utype in URLTYPES:
            if ent.has_key("www_%s_url"%utype):
               haveOne = True
def _get_epochs(subject):
    # if already computed, lets load it from disk
    epo_fname = paths('epochs_vhp', subject=subject)
    if op.exists(epo_fname):
        return load('epochs_vhp', subject=subject, preload=True)

    # high pass filter and epoch
    for block in range(1, 6):

        raw = load('sss', subject=subject, block=block, preload=True)

        # Explicit picking of channel to ensure same channels across subjects
        picks = [
            'STI101', 'EEG060', 'EOG061', 'EOG062', 'ECG063', 'EEG064',
            'MISC004'
        ]

        # Potentially add forgotten channels
        ch_type = dict(STI='stim', EEG='eeg', EOG='eog', ECG='ecg', MIS='misc')
        missing_chans = list()
        for channel in picks:
            if channel not in raw.ch_names:
                missing_chans.append(channel)
        if missing_chans:
            info = create_info(missing_chans, raw.info['sfreq'],
                               [ch_type[ch[:3]] for ch in missing_chans])
            raw.add_channels([
                RawArray(np.zeros(
                    (len(missing_chans), raw.n_times)), info, raw.first_samp)
            ],
                             force_update_info=True)

        # Select same channels order across subjects
        picks = [np.where(np.array(raw.ch_names) == ch)[0][0] for ch in picks]
        picks = np.r_[np.arange(306), picks]

        # Filtered
        raw.filter(2, 30, l_trans_bandwidth=.5, filter_length='30s', n_jobs=1)

        # Ensure same sampling rate
        if raw.info['sfreq'] != 1000.0:
            raw.resample(1000.0)

        # Select events
        events = find_events(raw, stim_channel='STI101', shortest_event=1)
        sel = np.where(events[:, 2] <= 255)[0]
        events = events[sel, :]

        # Compensate for delay (as measured manually with photodiod
        events[1, :] += int(.050 * raw.info['sfreq'])

        # Epoch continuous data
        this_epochs = Epochs(raw,
                             events,
                             reject=None,
                             tmin=-.200,
                             tmax=1.6,
                             picks=picks,
                             baseline=None,
                             decim=10)
        save(this_epochs, 'epo_block', subject=subject, block=block)
        this_epochs._data = None
        raw.data = None
        del this_epochs, raw

    epochs = list()
    for block in range(1, 6):
        this_epochs = load('epo_block', subject=subject, block=block)
        epochs.append(this_epochs)
    epochs = concatenate_epochs(epochs)

    # save for faster retest
    save(epochs, 'epochs_vhp', subject=subject, overwrite=True, upload=False)

    return epochs
Esempio n. 48
0
    +str(volRat)
    )
  cmdHdr = ["amixer", "-c",hw['Speaker'],"cget","numid=3"]
  try:
    cmd = cmdHdr[:]
    output = check_output(cmd)
    lines = output.split("\n");
    for l in lines:
      if l.find(": values=") != -1: 
        var = l.split("=")
        var = var[1].split(",")
        vol=int(round(float(var[0])/volRat))

  except CalledProcessError as e:
    print(e.output)

  return vol

if __name__ == '__main__':
  os.chdir(os.path.dirname(sys.argv[0]))
  parser = argparse.ArgumentParser() 
  parser.add_argument('-d','--debug', action = 'store_true',help='set debug')
  parser.add_argument('-c','--config',nargs=1,type=str,default=[config.defaultSpecPath],help='specify different config file')
  args = parser.parse_args()
  if debug: print("config path"+args.config[0])
  config.load(args.config[0])
  makeRc()
  #setVolume(sys.argv[1])
  #print getVolume()

Esempio n. 49
0
import sys
import config
import comm
from MyAlgorithm import MyAlgorithm
from gui.threadGUI import ThreadGUI
from gui.GUI import MainWindow
from PyQt5.QtWidgets import QApplication
from sensors.cameraFilter import CameraFilter

import signal

signal.signal(signal.SIGINT, signal.SIG_DFL)

if __name__ == '__main__':

    cfg = config.load(sys.argv[1])

    #starting comm
    jdrc = comm.init(cfg, 'Introrob')

    cameraCli = jdrc.getCameraClient("Introrob.Camera")
    camera = CameraFilter(cameraCli)
    navdata = jdrc.getNavdataClient("Introrob.Navdata")
    pose = jdrc.getPose3dClient("Introrob.Pose3D")
    cmdvel = jdrc.getCMDVelClient("Introrob.CMDVel")
    extra = jdrc.getArDroneExtraClient("Introrob.Extra")

    algorithm = MyAlgorithm(camera, navdata, pose, cmdvel, extra)

    app = QApplication(sys.argv)
    frame = MainWindow()
Esempio n. 50
0
import asyncio
import config
import api
import progressbar
import time
import random

cfg = config.load()
cookies = config.read_cookies()


API = None

info = None


async def get_user_info():
    r = await API.header_ajax()
    global info
    if r["islogin"] == 0:
        return False
    info["user_id"] = str(r["userInfo"]["user_id"])  # 用户id
    info["real_name"] = str(r["userInfo"]["real_name"])  # 真实姓名
    info["user_number"] = str(r["userInfo"]["user_number"])  # 学号
    info["plat_id"] = str(r["plat_id"])  # 平台id
    return True


async def get_term():
    r = await API.plat_term(info["plat_id"])
    info["term_id_list"] = []
Esempio n. 51
0
                    ],
                    "fabric_subnets": [
                        "30.1.1.1/24"
                    ],
                    "loopback_subnets": [
                        "20.1.1.1/24"
                    ],
                    "management_subnets": [
                        { "cidr": "192.168.10.1/24", "gateway": "192.168.10.1" }
                    ],
                    "node_profiles": [
                        {
                            "node_profile_name": "juniper-qfx5100"
                        },
                        {
                            "node_profile_name": "juniper-qfx10002"
                        }
                    ],
                    "device_count": 1
                }
            )
        except Exception as ex:
            self._exit_with_error(
                "Test failed due to unexpected error: %s" % str(ex))
    # end test


if __name__ == "__main__":
    SanityTestZtpWorkflow(config.load('config/test_config.yml')).test()
# end __main__
Esempio n. 52
0
# Licence: BSD 3-clause
"""Performs stats across subjects of decoding scores fitted within subjects"""
import numpy as np
from base import stats
from config import subjects, load, save
from conditions import analyses

# For each analysis of interest
for analysis in analyses:
    # Load decoding results
    print('load', analysis['name'])
    scores = list()
    for subject in subjects:
        # define path to file to be loaded
        score, times = load('score',
                            subject=subject,
                            analysis=analysis['name'])
        scores.append(score)

    scores = [sc for sc in scores if not np.isnan(sc[0][0])]
    if len(scores) < 7:
        print('%s: not enough subjects' % analysis['name'])
        continue
    chance = analysis['chance']
    alpha = 0.05

    # Compute stats: is decoding different from theoretical chance level (using
    # permutations across subjects)
    print('stats', analysis['name'])
    p_values = stats(np.array(scores) - chance)
    diag_offdiag = scores - np.tile([np.diag(sc) for sc in scores],
Esempio n. 53
0
    args.add_argument('--sample-rate', dest='sample_rate', default=44100)
    args.add_argument('--sample-enc', dest='sample_encoding', default='FLAC')
    args.add_argument('--interval', dest='interval', default='')
    args.add_argument('--lang-code', dest='language_code', default=None)
    args.add_argument('--file-ext', dest='file_ext', default='.flac')
    args.add_argument('--async', dest='async', action='store_true')
    args.add_argument('--async-sleep', dest='async_sleep', type=int, default=5)
    args.add_argument('--async-retry',
                      dest='async_retry',
                      type=int,
                      default=10)
    args.add_argument('--max-alts', dest='max_alts', type=int, default=2)

    args = args.parse_args()

    config = config.load(args.config_filename)
    a2t = AudioToText(config,
                      args.audio_filename,
                      sample_rate=args.sample_rate,
                      sample_encoding=args.sample_encoding,
                      language_code=args.language_code,
                      interval=args.interval,
                      file_ext=args.file_ext,
                      async=args. async,
                      async_sleep=args.async_sleep,
                      async_retry=args.async_retry,
                      debug=not args.quiet)
    if args.force:
        a2t.delete_compat_file()
        a2t.delete_remote_blob()
Esempio n. 54
0
precision_values = list()
precision_sum = 0.0
precision_value = 0.0
recall_values = list()
recall_sum = 0.0
recall_value = 0.0
f1_values = list()
f1_sum = 0.0
f1_value = 0.0
training_time_seconds = list()

args = ""

if __name__ == '__main__':

    args = config.load()
    preprocess_manager = Preprocess_Manager(args)

    # k-fold cross validation
    if args.cross_validation:

        # iterate folds
        for iteration_cross_validation in range(0, args.num_folds):
            preprocess_manager.iteration_cross_validation = iteration_cross_validation
            training_time_seconds.append(train.train(args, preprocess_manager))
            args.iteration_cross_validation = iteration_cross_validation
            test.test(args, preprocess_manager)
            accuracy_value, precision_value, recall_value, f1_value = metric.calc_metrics(args)
            accuracy_values.append(accuracy_value)
            precision_values.append(precision_value)
            recall_values.append(recall_value)
Esempio n. 55
0
import logging

import wx
import srctools.logger

import config
import utilities
from utilities import wxStyles

# the visibility of the log window, is initially setted to the value saved in the config file

visible: bool = config.load('logWindowVisibility')
logger = srctools.logger.get_logger()


class LogHandler(logging.Handler):
    """
	this class represents the log handler, this will
	receive, format and send the log message to the window
	using the same BEE2.4 log format people are familiar with
	"""
    def __init__(self, wxDest=None):
        logger.debug(f'initialised log handler with level NOTSET')
        super().__init__(logging.NOTSET)
        self.setLevel(logging.NOTSET)

    def emit(self, record: logging.LogRecord):
        """
		receive, format, colorize and display a log message
		:param record: logging.LogRecord object
		"""
Esempio n. 56
0
def create_parser_update(product_choices=None):
    from mozlog.structured import commandline

    import products

    if product_choices is None:
        config_data = config.load()
        product_choices = products.products_enabled(config_data)

    parser = argparse.ArgumentParser(
        "web-platform-tests-update",
        description="Update script for web-platform-tests tests.")
    parser.add_argument("--product",
                        action="store",
                        choices=product_choices,
                        default=None,
                        help="Browser for which metadata is being updated")
    parser.add_argument("--config",
                        action="store",
                        type=abs_path,
                        help="Path to config file")
    parser.add_argument("--metadata",
                        action="store",
                        type=abs_path,
                        dest="metadata_root",
                        help="Path to the folder containing test metadata"),
    parser.add_argument("--tests",
                        action="store",
                        type=abs_path,
                        dest="tests_root",
                        help="Path to web-platform-tests"),
    parser.add_argument(
        "--sync-path",
        action="store",
        type=abs_path,
        help="Path to store git checkout of web-platform-tests during update"),
    parser.add_argument(
        "--remote_url",
        action="store",
        help="URL of web-platfrom-tests repository to sync against"),
    parser.add_argument("--branch",
                        action="store",
                        type=abs_path,
                        help="Remote branch to sync against")
    parser.add_argument("--rev", action="store", help="Revision to sync to")
    parser.add_argument(
        "--no-patch",
        action="store_true",
        help="Don't create an mq patch or git commit containing the changes.")
    parser.add_argument("--sync",
                        dest="sync",
                        action="store_true",
                        default=False,
                        help="Sync the tests with the latest from upstream")
    parser.add_argument(
        "--ignore-existing",
        action="store_true",
        help=
        "When updating test results only consider results from the logfiles provided, not existing expectations."
    )
    parser.add_argument(
        "--continue",
        action="store_true",
        help="Continue a previously started run of the update script")
    parser.add_argument(
        "--abort",
        action="store_true",
        help="Clear state from a previous incomplete run of the update script")
    # Should make this required iff run=logfile
    parser.add_argument("run_log",
                        nargs="*",
                        type=abs_path,
                        help="Log file from run of tests")
    commandline.add_logging_group(parser)
    return parser
Esempio n. 57
0
 def __init__(self, parent: wx.Window):
     super().__init__(
         parent,  # parent
         title=f'Logs - {str(config.version)}',  # window title
         # those styles make so that the window can't minimize, maximize, resize and show on the taskbar
         style=wx.FRAME_NO_TASKBAR | wxStyles.TITLEBAR_ONLY_BUTTON_CLOSE
         ^ wx.RESIZE_BORDER)  # init the window
     LogWindow.instance = self
     self.SetIcon(utilities.icon)
     self.SetSize(0, 0, 500, 365)
     sizer = wx.FlexGridSizer(rows=2, cols=1, gap=wx.Size(0, 0))
     try:
         pos = config.load('logWindowPos')
         if pos is not None:
             self.SetPosition(wx.Point(pos))
         else:
             self.SetPosition(wx.Point(100, 100))
     except config.ConfigError as e:
         logger.warning(e)  # not a problem if it fails
     self.text = wx.TextCtrl(
         self,
         style=wx.TE_MULTILINE | wx.TE_READONLY | wx.VSCROLL | wx.TE_RICH,
         size=wx.Size(self.GetSize()[0], 300))  # make the textbox
     self.logHandler = LogHandler()
     # set the log message format
     self.logHandler.setFormatter(
         logging.Formatter(
             # One letter for level name
             '[{levelname[0]}] {module}.{funcName}(): {message}\n',
             style='{',
         ))
     self.logHandler.setLevel(getLevel())
     logging.getLogger().addHandler(self.logHandler)
     # create bottom bar
     self.bottomBar = wx.Panel(self, size=wx.Size(
         self.GetSize()[0], 30))  # makes the bottom "menu" bar
     self.clearBtn = wx.Button(  # makes the clear button
         self.bottomBar,
         label=loc('window.log.btn.clear.name'),
         size=wx.Size(52, 22),
         pos=wx.Point(10, 3))
     self.clearBtn.SetToolTip(wx.ToolTip(loc('window.log.btn.clear.name')))
     self.copyBtn = wx.Button(self.bottomBar,
                              label=loc('window.log.btn.copy.name'),
                              size=wx.Size(52, 22),
                              pos=wx.Point(70, 3))
     self.copyBtn.SetToolTip(wx.ToolTip(loc('window.log.btn.copy.tooltip')))
     self.levelChoice = wx.Choice(
         parent=self.bottomBar,
         size=wx.Size(80, 22),
         pos=wx.Point(self.GetSize()[0] - 100, 3),
         choices=['Debug', 'Info', 'Warning', 'Error'])
     self.levelChoice.SetSelection((getLevel() / 10) - 1)
     self.levelChoice.Refresh()
     sizer.Add(self.text, border=wx.Bottom)
     sizer.Add(self.bottomBar)
     self.SetSizer(sizer)
     self.Bind(wx.EVT_CLOSE, self.OnClose, self)
     self.Bind(wx.EVT_MOVE_END, self.OnMoveEnd, self)
     self.Bind(wx.EVT_BUTTON, self.OnClearButtonPressed, self.clearBtn)
     self.Bind(wx.EVT_BUTTON, self.OnCopyButtonPressed, self.copyBtn)
     self.Bind(wx.EVT_CHOICE, self.OnLevelChoice, self.levelChoice)
     updateVisibility()
     self.levelChoice.Refresh()
Esempio n. 58
0
def main():
    logging.basicConfig()
    p = argparse.ArgumentParser(description="Basic DC-net relay")
    p.add_argument("-p",
                   "--port",
                   type=int,
                   help="Port to listen for \
                   connections on",
                   required=True,
                   dest="port")
    p.add_argument("config_dir")
    p.add_argument("-s",
                   "--socks",
                   type=str,
                   metavar="host:port",
                   help="SOCKS proxy address",
                   default="localhost:8080",
                   dest="socks_addr")
    p.add_argument("-v",
                   type=str,
                   help="display more output (default: WARN)",
                   choices=verbosity.keys(),
                   default="WARN",
                   dest="verbose")
    opts = p.parse_args()
    logger.setLevel(verbosity[opts.verbose])

    global socks_address
    saddr, sport = opts.socks_addr.split(":")
    socks_address = saddr, int(sport)

    system_config = config.load(config.SystemConfig,
                                os.path.join(opts.config_dir, "system.json"))
    nclients = len(system_config.clients.ids)
    ntrustees = len(system_config.trustees.ids)
    naps = len(system_config.aps.ids)

    # start up a new relay
    relay = dcnet.Relay(ntrustees, NullAccumulator(), NullDecoder())
    relay.add_nyms(nclients)
    relay.sync(None)

    # server socket
    print("Starting relay on {}".format(opts.port))
    ssock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    ssock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    ssock.bind(("0.0.0.0", opts.port))
    ssock.listen(1024)

    # make sure everybody connects
    print(("Waiting for {} clients, {} trustees, and {} access points").format(
        nclients, ntrustees, naps))
    ccli, ctru, caps = 0, 0, 0
    crsocks = [None] * nclients
    cwsocks = []
    tsocks = [None] * ntrustees
    apsocks = [None] * naps
    while ccli < nclients or ctru < ntrustees or caps < naps:
        conn, addr = ssock.accept()
        buf = bytearray(1)
        buf[0] = kind = bytes_to_long(conn.recv(1))
        buf.extend(conn.recv(m.sizes[kind] - 1))
        reg = {}
        m.unpack(buf, reg)
        conn.setblocking(0)

        if reg['kind'] == m.TRUSTEE_CONNECT and ctru < ntrustees:
            if tsocks[reg['node']] is not None:
                sys.exit("Trustee connected twice")
            tsocks[reg['node']] = conn
            ctru += 1
        elif reg['kind'] == m.CLIENT_CONNECT and ccli < nclients:
            if crsocks[reg['node']] is not None:
                sys.exit("Clients connected twice")
            crsocks[reg['node']] = conn
            if reg['ap'] == -1:
                cwsocks.append(conn)
            ccli += 1
        elif reg['kind'] == m.AP_CONNECT and caps < naps:
            if apsocks[reg['node']] is not None:
                sys.exit("Access point connected twice")
            apsocks[reg['node']] = conn
            cwsocks.append(conn)
            caps += 1
            global downcellmax
            # Leave room for IP/UDP headers and AP headers
            downcellmax = downmax - 28 - m.overhead[m.AP_DOWNSTREAM] - \
                                        m.overhead[m.RELAY_DOWNSTREAM]
        else:
            sys.exit("Illegal node number or connection type")
    print("All clients, trustees, and access points connected")

    upstreams = {}
    downstream = asyncio.Queue()
    scheduler = itertools.cycle(range(nclients))

    # start the main relay loop
    asyncio. async (main_loop(relay, tsocks, crsocks, cwsocks, upstreams,
                              downstream, scheduler))
    loop = asyncio.get_event_loop()
    try:
        loop.run_forever()
    except KeyboardInterrupt:
        pass
    loop.close()
Esempio n. 59
0
def main(config_file: str):
    config = cfg.load(config_file, cfg.Configuration)
    print(config)

    indexing_strategy = pymia_extr.SliceIndexing()  # slice-wise extraction
    extraction_transform = None  # we do not want to apply any transformation on the slices after extraction
    # define an extractor for training, i.e. what information we would like to extract per sample
    train_extractor = pymia_extr.ComposeExtractor([pymia_extr.NamesExtractor(),
                                                   pymia_extr.DataExtractor(),
                                                   pymia_extr.SelectiveDataExtractor()])

    # define an extractor for testing, i.e. what information we would like to extract per sample
    # not that usually we don't use labels for testing, i.e. the SelectiveDataExtractor is only used for this example
    test_extractor = pymia_extr.ComposeExtractor([pymia_extr.NamesExtractor(),
                                                  pymia_extr.IndexingExtractor(),
                                                  pymia_extr.DataExtractor(),
                                                  pymia_extr.SelectiveDataExtractor(),
                                                  pymia_extr.ImageShapeExtractor()])

    # define an extractor for evaluation, i.e. what information we would like to extract per sample
    eval_extractor = pymia_extr.ComposeExtractor([pymia_extr.NamesExtractor(),
                                                  pymia_extr.SubjectExtractor(),
                                                  pymia_extr.SelectiveDataExtractor(),
                                                  pymia_extr.ImagePropertiesExtractor()])

    # define the data set
    dataset = pymia_extr.ParameterizableDataset(config.database_file,
                                                indexing_strategy,
                                                pymia_extr.SubjectExtractor(),  # for select_indices() below
                                                extraction_transform)

    # generate train / test split for data set
    # we use Subject_0, Subject_1 and Subject_2 for training and Subject_3 for testing
    sampler_ids_train = pymia_extr.select_indices(dataset,
                                                  pymia_extr.SubjectSelection(('Subject_0', 'Subject_1', 'Subject_2')))
    sampler_ids_test = pymia_extr.select_indices(dataset,
                                                 pymia_extr.SubjectSelection(('Subject_3')))

    # set up training data loader
    training_sampler = pymia_extr.SubsetRandomSampler(sampler_ids_train)
    training_loader = pymia_extr.DataLoader(dataset, config.batch_size_training, sampler=training_sampler,
                                            collate_fn=collate_batch, num_workers=1)

    # set up testing data loader
    testing_sampler = pymia_extr.SubsetSequentialSampler(sampler_ids_test)
    testing_loader = pymia_extr.DataLoader(dataset, config.batch_size_testing, sampler=testing_sampler,
                                           collate_fn=collate_batch, num_workers=1)

    sample = dataset.direct_extract(train_extractor, 0)  # extract a subject

    evaluator = init_evaluator()  # initialize evaluator

    for epoch in range(config.epochs):  # epochs loop
        dataset.set_extractor(train_extractor)
        for batch in training_loader:  # batches for training
            # feed_dict = batch_to_feed_dict(x, y, batch, True)  # e.g. for TensorFlow
            # train model, e.g.:
            # sess.run([train_op, loss], feed_dict=feed_dict)
            pass

        # subject assembler for testing
        subject_assembler = pymia_asmbl.SubjectAssembler()

        dataset.set_extractor(test_extractor)
        for batch in testing_loader:  # batches for testing
            # feed_dict = batch_to_feed_dict(x, y, batch, False)  # e.g. for TensorFlow
            # test model, e.g.:
            # prediction = sess.run(y_model, feed_dict=feed_dict)
            prediction = np.stack(batch['labels'], axis=0)  # we use the labels as predictions such that we can validate the assembler
            subject_assembler.add_batch(prediction, batch)

        # evaluate all test images
        for subject_idx in list(subject_assembler.predictions.keys()):
            # convert prediction and labels back to SimpleITK images
            sample = dataset.direct_extract(eval_extractor, subject_idx)
            label_image = pymia_conv.NumpySimpleITKImageBridge.convert(sample['labels'],
                                                                       sample['properties'])

            assembled = subject_assembler.get_assembled_subject(sample['subject_index'])
            prediction_image = pymia_conv.NumpySimpleITKImageBridge.convert(assembled, sample['properties'])
            evaluator.evaluate(prediction_image, label_image, sample['subject'])  # evaluate prediction
import os
import logging
import json
import consul

import config

conf = config.load()


def hostname():
    import socket
    return socket.gethostname()


def get_platform_pattern():
    import glob
    import yaml

    for path in glob.glob(config.PATTERNS_DIR + '/**'):
        f = os.path.join(path, 'metadata.yml')
        if os.path.isfile(f):
            data = yaml.load(file(f))
            if data['type'] == 'platform':
                return os.path.basename(path), data

    return None, None


def node_address(hostname):