Exemplo n.º 1
0
    def run(tasks, worker_scheduler_factory=None, override_defaults={}):

        if worker_scheduler_factory is None:
            worker_scheduler_factory = WorkerSchedulerFactory()

        env_params = EnvironmentParamsContainer.env_params(override_defaults)
        # search for logging configuration path first on the command line, then
        # in the application config file
        logging_conf = env_params.logging_conf_file or \
            configuration.get_config().get('core', 'logging_conf_file', None)
        if logging_conf is not None and not os.path.exists(logging_conf):
            raise Exception("Error: Unable to locate specified logging configuration file!")

        if not configuration.get_config().getboolean('core', 'no_configure_logging', False):
            setup_interface_logging(logging_conf)

        if env_params.lock and not(lock.acquire_for(env_params.lock_pid_dir)):
            sys.exit(1)

        if env_params.local_scheduler:
            sch = worker_scheduler_factory.create_local_scheduler()
        else:
            sch = worker_scheduler_factory.create_remote_scheduler(host=env_params.scheduler_host, port=env_params.scheduler_port)

        w = worker_scheduler_factory.create_worker(scheduler=sch, worker_processes=env_params.workers)

        for task in tasks:
            w.add(task)
        logger = logging.getLogger('luigi-interface')
        logger.info('Done scheduling tasks')
        w.run()
        w.stop()
Exemplo n.º 2
0
Arquivo: s3.py Projeto: grubino/luigi
 def __init__(self, aws_access_key_id=None, aws_secret_access_key=None):
     if not aws_access_key_id:
         aws_access_key_id = configuration.get_config().get('s3', 'aws_access_key_id')
     if not aws_secret_access_key:
         aws_secret_access_key = configuration.get_config().get('s3', 'aws_secret_access_key')
     
     self.s3 = S3Connection(aws_access_key_id,
                            aws_secret_access_key,
                            is_secure=True)
Exemplo n.º 3
0
Arquivo: s3.py Projeto: sonatype/luigi
 def __init__(self, aws_access_key_id=None, aws_secret_access_key=None):
     if not aws_access_key_id:
         aws_access_key_id = configuration.get_config().get('s3', 'aws_access_key_id', os.getenv('S3_ACCESS_KEY_ID'))
     if not aws_secret_access_key:
         aws_secret_access_key = configuration.get_config().get('s3', 'aws_secret_access_key', os.getenv('S3_SECRET_ACCESS_KEY'))
     
     self.s3 = S3Connection(aws_access_key_id,
                            aws_secret_access_key,
                            is_secure=True)
Exemplo n.º 4
0
 def apply_config_defaults(cls):
     cls.scheduler_host.set_default(
         configuration.get_config().get(
             'core', 'default-scheduler-host', 'localhost'))
     cls.scheduler_port.set_default(
         configuration.get_config().get(
             'core', 'default-scheduler-port', 8082))
     cls.logging_conf_file.set_default(
         configuration.get_config().get(
             'core', 'logging_conf_file', None))
Exemplo n.º 5
0
	def do_connect(self, evt):
		evt.Skip()
		last_cons = configuration.get_config()['connections']['last_connected']
		last = ''
		if last_cons:
			last = last_cons[-1]
		# Translators: Title of the connect dialog.
		dlg = dialogs.DirectConnectDialog(parent=gui.mainFrame, id=wx.ID_ANY, title=_("Connect"))
		dlg.panel.host.SetValue(last)
		dlg.panel.host.SelectAll()
		def handle_dlg_complete(dlg_result):
			if dlg_result != wx.ID_OK:
				return
			if dlg.client_or_server.GetSelection() == 0: #client
				server_addr = dlg.panel.host.GetValue()
				server_addr, port = address_to_hostport(server_addr)
				channel = dlg.panel.key.GetValue()
				if dlg.connection_type.GetSelection() == 0:
					self.connect_as_master((server_addr, port), channel)
				else:
					self.connect_as_slave((server_addr, port), channel)
			else: #We want a server
				channel = dlg.panel.key.GetValue()
				self.start_control_server(int(dlg.panel.port.GetValue()), channel)
				if dlg.connection_type.GetSelection() == 0:
					self.connect_as_master(('127.0.0.1', int(dlg.panel.port.GetValue())), channel)
				else:
					self.connect_as_slave(('127.0.0.1', int(dlg.panel.port.GetValue())), channel)
		gui.runScriptModalDialog(dlg, callback=handle_dlg_complete)
Exemplo n.º 6
0
    def __init__(self, scheduler=CentralPlannerScheduler(), worker_id=None,
                 worker_processes=1, ping_interval=None, keep_alive=None,
                 wait_interval=None):
        self._worker_info = self._generate_worker_info()

        if not worker_id:
            worker_id = 'Worker(%s)' % ', '.join(['%s=%s' % (k, v) for k, v in self._worker_info])

        config = configuration.get_config()

        if ping_interval is None:
            ping_interval = config.getfloat('core', 'worker-ping-interval', 1.0)

        if keep_alive is None:
            keep_alive = config.getboolean('core', 'worker-keep-alive', False)
        self.__keep_alive = keep_alive

        if keep_alive:
            if wait_interval is None:
                wait_interval = config.getint('core', 'worker-wait-interval', 1)
            self.__wait_interval = wait_interval

        self._id = worker_id
        self._scheduler = scheduler
        if (isinstance(scheduler, CentralPlannerScheduler)
                and worker_processes != 1):
            warnings.warn("Will only use one process when running with local in-process scheduler")
            worker_processes = 1

        self.worker_processes = worker_processes
        self.host = socket.gethostname()
        self._scheduled_tasks = {}

        # store the previous tasks executed by the same worker
        # for debugging reasons
        self._previous_tasks = []

        class KeepAliveThread(threading.Thread):
            """ Periodically tell the scheduler that the worker still lives """
            def __init__(self):
                super(KeepAliveThread, self).__init__()
                self._should_stop = threading.Event()

            def stop(self):
                self._should_stop.set()

            def run(self):
                while True:
                    self._should_stop.wait(ping_interval)
                    if self._should_stop.is_set():
                        logger.info("Worker %s was stopped. Shutting down Keep-Alive thread" % worker_id)
                        break
                    try:
                        scheduler.ping(worker=worker_id)
                    except:  # httplib.BadStatusLine:
                        logger.warning('Failed pinging scheduler')

        self._keep_alive_thread = KeepAliveThread()
        self._keep_alive_thread.daemon = True
        self._keep_alive_thread.start()
Exemplo n.º 7
0
 def __init__(self):
     config = configuration.get_config()
     connection_string = config.get('task_history', 'db_connection')
     self.engine = sqlalchemy.create_engine(connection_string)
     self.session_factory = sqlalchemy.orm.sessionmaker(bind=self.engine, expire_on_commit=False)
     Base.metadata.create_all(self.engine)
     self.tasks = {}  # task_id -> TaskRecord
Exemplo n.º 8
0
Arquivo: s3.py Projeto: genba/luigi
 def _get_s3_config(self, key):
     try:
         return configuration.get_config().get('s3', key)
     except NoSectionError:
         return None
     except NoOptionError:
         return None
Exemplo n.º 9
0
def test_fg_utils():
  domain_meta_file = '/usr0/home/ymovshov/Documents/Research/Code/3rd_Party/fgcomp2013/release/domain_meta.txt'
  class_meta_file = '/usr0/home/ymovshov/Documents/Research/Code/3rd_Party/fgcomp2013/release/class_meta.txt'
  train_ann_file = '/usr0/home/ymovshov/Documents/Research/Code/3rd_Party/fgcomp2013/release/dataset.txt'

  print 'before load'
  domain_meta = fgu.read_domain_meta(domain_meta_file)
  class_meta = fgu.read_class_meta(class_meta_file)
  dataset = fgu.read_image_annotations(train_ann_file)
  print 'after load'

  print domain_meta.dtypes
  print '-------------'
  print class_meta.dtypes
  print '-------------'
  print dataset.dtypes

  print 'Showing the first two lines of dataset'
  print '------------------------------------------'
  print dataset.iloc[0:2,:]

  print ' '
  print 'Trying single function to load all data'
  print '------------------------------------------'
  config = get_config(small=True)
  (dataset, class_meta, domain_meta) = fgu.get_all_metadata(config)
  print dataset.iloc[0:2,:]
Exemplo n.º 10
0
	def __init__(self, *args, **kwargs):
		super(GlobalPlugin, self).__init__(*args, **kwargs)
		self.local_machine = local_machine.LocalMachine()
		self.slave_session = None
		self.master_session = None
		self.create_menu()
		self.connecting = False
		self.url_handler_window = url_handler.URLHandlerWindow(callback=self.verify_connect)
		url_handler.register_url_handler()
		self.master_transport = None
		self.slave_transport = None
		self.server = None
		self.hook_thread = None
		self.sending_keys = False
		self.key_modified = False
		self.sd_server = None
		self.sd_relay = None
		self.sd_bridge = None
		cs = configuration.get_config()['controlserver']
		self.temp_location = os.path.join(shlobj.SHGetFolderPath(0, shlobj.CSIDL_COMMON_APPDATA), 'temp')
		self.ipc_file = os.path.join(self.temp_location, 'remote.ipc')
		if globalVars.appArgs.secure:
			self.handle_secure_desktop()
		if cs['autoconnect'] and not self.master_session and not self.slave_session:
			self.perform_autoconnect()
		self.sd_focused = False
Exemplo n.º 11
0
def _create_scheduler():
    config = configuration.get_config()
    retry_delay = config.getfloat('scheduler', 'retry-delay', 900.0)
    remove_delay = config.getfloat('scheduler', 'remove-delay', 600.0)
    worker_disconnect_delay = config.getfloat('scheduler',
                                              'worker-disconnect-delay', 60.0)
    return scheduler.CentralPlannerScheduler(retry_delay, remove_delay,
                                             worker_disconnect_delay)
Exemplo n.º 12
0
def import_keytab(node, keytab_file):
    "import and encrypt a keytab for a particular server"

    if not configuration.get_config().has_node(node):
        command.fail("no such node: %s" % node)
    keytab_target = os.path.join(configuration.get_project(),
                                 "keytab.%s.crypt" % node)
    keycrypt.gpg_encrypt_file(keytab_file, keytab_target)
Exemplo n.º 13
0
def check_kube_init():
    config = configuration.get_config()
    master_node_count = len(
        [node for node in config.nodes if node.kind == "master"])
    expect_prometheus_query_exact('sum(up{job="kubernetes-apiservers"})',
                                  master_node_count,
                                  "kubernetes apiservers are online")
    print("all", master_node_count, "kubernetes apiservers seem to be online!")
Exemplo n.º 14
0
Arquivo: hdfs.py Projeto: nailor/luigi
def get_configured_hadoop_version():
    """
    CDH4 (hadoop 2+) has a slightly different syntax for interacting with hdfs
    via the command line. The default version is CDH4, but one can override
    this setting with "cdh3" or "apache1" in the hadoop section of the config
    in order to use the old syntax
    """
    return configuration.get_config().get("hadoop", "version", "cdh4").lower()
Exemplo n.º 15
0
def setup_prometheus(ops: Operations) -> None:
    config = configuration.get_config()
    for node in config.nodes:
        if node.kind != "supervisor":
            continue
        ops.ssh_upload_bytes("upload prometheus config to @HOST", node, configuration.get_prometheus_yaml().encode(),
                             "/etc/prometheus.yaml")
        ops.ssh("restart prometheus on @HOST", node, "systemctl", "restart", "prometheus")
Exemplo n.º 16
0
def check_certs_on_supervisor():
    config = configuration.get_config()
    for node in config.nodes:
        if node.kind == "supervisor":
            ssh.check_ssh(node, "test", "-e",
                          "/etc/homeworld/authorities/kubernetes.pem")
            ssh.check_ssh(node, "test", "-e",
                          "/etc/homeworld/keys/kubernetes-worker.pem")
Exemplo n.º 17
0
Arquivo: server.py Projeto: bu2/luigi
def _create_scheduler():
    config = configuration.get_config()
    retry_delay = config.getfloat('scheduler', 'retry-delay', 900.0)
    remove_delay = config.getfloat('scheduler', 'remove-delay', 600.0)
    worker_disconnect_delay = config.getfloat('scheduler', 'worker-disconnect-delay', 60.0)
    use_task_history = config.getboolean('scheduler', 'record_task_history', False)
    task_history = DbTaskHistory() if use_task_history else NopHistory()
    return scheduler.CentralPlannerScheduler(retry_delay, remove_delay, worker_disconnect_delay, task_history)
Exemplo n.º 18
0
def check_online():
    config = configuration.get_config()
    nodes_expected = len(config.nodes)
    expect_prometheus_query_exact('sum(up{job="node-resources"})',
                                  nodes_expected, "nodes are online")
    expect_prometheus_query_exact('sum(keysystem_ssh_access_check)',
                                  nodes_expected, "nodes are accessible")
    print("all", nodes_expected, "nodes are online and accessible")
Exemplo n.º 19
0
def get_configured_hadoop_version():
    """
    CDH4 (hadoop 2+) has a slightly different syntax for interacting with hdfs
    via the command line. The default version is CDH4, but one can override
    this setting with "cdh3" or "apache1" in the hadoop section of the config
    in order to use the old syntax
    """
    return configuration.get_config().get("hadoop", "version", "cdh4").lower()
Exemplo n.º 20
0
    def __init__(self, scheduler=CentralPlannerScheduler(), worker_id=None,
                 worker_processes=1, ping_interval=None, keep_alive=None,
                 wait_interval=None):
        if not worker_id:
            worker_id = 'worker-%09d' % random.randrange(0, 999999999)

        config = configuration.get_config()

        if ping_interval is None:
            ping_interval = config.getfloat('core', 'worker-ping-interval', 1.0)

        if keep_alive is None:
            keep_alive = config.getboolean('core', 'worker-keep-alive', False)
        self.__keep_alive = keep_alive

        if keep_alive:
            if wait_interval is None:
                wait_interval = config.getint('core', 'worker-wait-interval', 1)
            self.__wait_interval = wait_interval

        self.__id = worker_id
        self.__scheduler = scheduler
        if (isinstance(scheduler, CentralPlannerScheduler)
                and worker_processes != 1):
            warnings.warn("Will only use one process when running with local in-process scheduler")
            worker_processes = 1

        self.worker_processes = worker_processes
        self.host = socket.gethostname()
        self.__scheduled_tasks = {}

        # store the previous tasks executed by the same worker
        # for debugging reasons
        self._previous_tasks = []

        class KeepAliveThread(threading.Thread):
            """ Periodically tell the scheduler that the worker still lives """
            def __init__(self):
                super(KeepAliveThread, self).__init__()
                self._should_stop = threading.Event()

            def stop(self):
                self._should_stop.set()

            def run(self):
                while True:
                    self._should_stop.wait(ping_interval)
                    if self._should_stop.is_set():
                        logger.info("Worker was stopped. Shutting down Keep-Alive thread")
                        break
                    try:
                        scheduler.ping(worker=worker_id)
                    except:  # httplib.BadStatusLine:
                        logger.warning('Failed pinging scheduler')

        self._keep_alive_thread = KeepAliveThread()
        self._keep_alive_thread.daemon = True
        self._keep_alive_thread.start()
Exemplo n.º 21
0
    def run(tasks, worker_scheduler_factory=None, override_defaults={}):

        if worker_scheduler_factory is None:
            worker_scheduler_factory = WorkerSchedulerFactory()

        env_params = EnvironmentParamsContainer.env_params(override_defaults)
        # search for logging configuration path first on the command line, then
        # in the application config file
        logging_conf = env_params.logging_conf_file or \
            configuration.get_config().get('core', 'logging_conf_file', None)
        if logging_conf is not None and not os.path.exists(logging_conf):
            raise Exception(
                "Error: Unable to locate specified logging configuration file!"
            )

        if not configuration.get_config().getboolean(
                'core', 'no_configure_logging', False):
            setup_interface_logging(logging_conf)

        if env_params.lock:
            warnings.warn(
                "The --lock flag is deprecated and will be removed."
                "Locking is now the default behavior."
                "Use --no-lock to override to not use lock",
                DeprecationWarning)

        if (not env_params.no_lock
                and not (lock.acquire_for(env_params.lock_pid_dir))):
            sys.exit(1)

        if env_params.local_scheduler:
            sch = worker_scheduler_factory.create_local_scheduler()
        else:
            sch = worker_scheduler_factory.create_remote_scheduler(
                host=env_params.scheduler_host, port=env_params.scheduler_port)

        w = worker_scheduler_factory.create_worker(
            scheduler=sch, worker_processes=env_params.workers)

        for t in tasks:
            w.add(t)
        logger = logging.getLogger('luigi-interface')
        logger.info('Done scheduling tasks')
        w.run()
        w.stop()
Exemplo n.º 22
0
def get_hipchat_token():
    global hipchat_token
    if hipchat_token != None:
        return hipchat_token
    hipchat_token = get_config("hipchat.token")
    if hipchat_token is None:
        handle_missing_config("Please set your Hipchat private API token (you can grab it from here https://evme.hipchat.com/account/api):", 'hipchat.token')
    else:
        return hipchat_token
def get_all_metadata(config=None, args=None):
  if config == None and args == None:
    raise Exception('Either config or args need to be not None')
  if config == None:
    config = get_config(args)
    
  class_meta  = read_class_meta(config.dataset.class_meta_file)
  attrib_meta_with_name = read_attribute_meta(config.dataset.attrib_meta_file)
  attrib_meta = attrib_meta_with_name.drop('class_name',axis=1)
  train_annos = read_image_annotations(config.dataset.train_annos_file)
  test_annos = read_image_annotations(config.dataset.test_annos_file,
                                      has_class_id=False)
  domain_meta = read_domain_meta(config.dataset.domain_meta_file)
  train_annos['class_name'] = np.array([class_meta.class_name[class_index] for 
                                         class_index in 
                                         train_annos.class_index])
#   test_annos['class_name'] = np.array([class_meta.class_name[class_index] for 
#                                          class_index in 
#                                          test_annos.class_index])

  # Prepand path to the dataset to each img_path
  train_annos.img_path = train_annos.img_path.apply(lambda x: config.dataset.main_path.joinpath(x).abspath())
  test_annos.img_path = test_annos.img_path.apply(lambda x: config.dataset.main_path.joinpath(x).abspath())

  # Filter the class meta and train/test annotations to just use the 
  # domains defined in config
  class_meta = class_meta[class_meta.domain_index.isin(config.dataset.domains)]
  train_annos = train_annos[train_annos.domain_index.isin(config.dataset.domains)]
  test_annos = test_annos[test_annos.domain_index.isin(config.dataset.domains)]
  
  
  # Create dev set
  dev_annos_train, dev_annos_test = create_dev_set(train_annos, 
                                                   config)

  # Should we use the dev set as the test set
  if config.dataset.dev_set.use:
    train_used, test_used = dev_annos_train, dev_annos_test 
  else:
    train_used, test_used = train_annos, test_annos
    
    
  if config.flip_images:
    memory = Memory(cachedir=config.cache_dir, verbose=config.logging.verbose)
    flip_func = memory.cache(create_flipped_images)
    train_used = flip_func(train_used, config)

  return ({'real_train_annos': train_annos,
           'real_test_annos': test_annos,
           'train_annos': train_used,
           'test_annos': test_used,
           'validation_annos': dev_annos_test, 
            'class_meta': class_meta,
            'domain_meta': domain_meta,
            'attrib_meta': attrib_meta,
            'attrib_meta_with_name': attrib_meta_with_name},
          config)
def get_vectorised_dataset():
    """
    Retrieves the pickled vectorised dataset which 
    (indicated in the config file.)
    """
    PICKLE_VECTORISED_DATASET = conf.get_config('pickle_files',
                                                'vectorised_dataset')
    FEATURES_FILENAME = f"{PICKLE_PATH}/{PICKLE_VECTORISED_DATASET}"
    return pickle.load(open(FEATURES_FILENAME, "rb"))
Exemplo n.º 25
0
    def __init__(self, host='localhost', port=8082, connect_timeout=None):
        self._host = host
        self._port = port

        config = configuration.get_config()

        if connect_timeout is None:
            connect_timeout = config.getfloat('core', 'rpc-connect-timeout', 10.0)
        self._connect_timeout = connect_timeout
Exemplo n.º 26
0
def main():
    #  For every browser from configuration.py run set of test.
    for browser_type in get_config("tested_browsers"):
        # create new browser of given type (Chrome, Firefox, etc.)
        my_browser = Browser(browser_type)
        # set current browser as shared browser for all testing levels (do not create new browser for every JSR_level)
        set_shared_browser(my_browser)
        # for every browser from configuration.py run set of test.
        for jsr_level in get_config("tested_jsr_levels"):
            output.print_testing_header(browser_type, jsr_level)
            # set jsr_level to given level
            my_browser.jsr_level = jsr_level
            # run set of tests
            pytest.main(
                ['-s', '--ignore=testing/tests_definition/test_gps.py'])
            output.print_testing_footer(browser_type, jsr_level)
        # Close browser.
        my_browser.quit()
Exemplo n.º 27
0
def ssh_foreach(ops: setup.Operations, node_kind: str, *params: str):
    config = configuration.get_config()
    valid_node_kinds = configuration.Node.VALID_NODE_KINDS
    if not (node_kind == "node" or node_kind in valid_node_kinds):
        command.fail("usage: spire foreach {node," +
                     ",".join(valid_node_kinds) + "} command")
    for node in config.nodes:
        if node_kind == "node" or node.kind == node_kind:
            ops.ssh("run command on @HOST", node, *params)
Exemplo n.º 28
0
def check_pull():
    "verify that container pulling from the homeworld registry, and associated container execution, are functioning"

    config = configuration.get_config()
    node_count = len(
        [node for node in config.nodes if node.kind != "supervisor"])
    expect_prometheus_query_exact("sum(oci_pull_check)", node_count,
                                  "nodes are pulling ocis properly")
    print("oci pulling seems to work!")
Exemplo n.º 29
0
def get_jira_password():
    global jira_password
    if jira_password != None:
        return jira_password
    jira_password = get_config("jira.password")
    if jira_password is None:
        configuration.handle_missing_config('Please set your jira password:'******' jira.password', '<password>')
    else:
        return jira_password
Exemplo n.º 30
0
def get_dataURL_canvas(driver, name):
    try:
        driver.get(get_config("testing_page"))
        sleep(1)
        img = driver.execute_script("var canvas = document.getElementById('"+name+"'); return canvas.toDataURL()")
    except:
        return "ERROR"
    else:
        return img
Exemplo n.º 31
0
Arquivo: rpc.py Projeto: genba/luigi
    def __init__(self, host='localhost', port=8082, connect_timeout=None):
        self._host = host
        self._port = port

        config = configuration.get_config()

        if connect_timeout is None:
            connect_timeout = config.getfloat('core', 'rpc-connect-timeout', 10.0)
        self._connect_timeout = connect_timeout
Exemplo n.º 32
0
def get_jira_username():
    global jira_username
    if jira_username != None:
        return jira_username
    jira_username = get_config("jira.username")
    if jira_username is None:
        configuration.handle_missing_config('Please set your jira username:'******'jira.username', '<username>')
    else:
        return jira_username
Exemplo n.º 33
0
def check_aci_pull():
    config = configuration.get_config()
    node_count = len(
        [node for node in config.nodes if node.kind != "supervisor"])
    expect_prometheus_query_exact("sum(aci_pull_check)", node_count,
                                  "nodes are pulling acis properly")
    expect_prometheus_query_exact("sum(aci_rkt_check)", node_count,
                                  "nodes are launching acis properly")
    print("aci pulling seems to work!")
Exemplo n.º 34
0
def precision_recall():
#   from sklearn.metrics import roc_auc_score
#   from sklearn.metrics import roc_curve
  from sklearn.metrics import precision_recall_curve
  from sklearn.metrics import auc
  from sklearn.metrics import classification_report
  from mpltools import style
  style.use('ggplot')

  makes = ['bmw', 'ford']
  types = ['sedan', 'SUV']
  args = makes + types
  config = get_config(args)
  (dataset, config) = fgu.get_all_metadata(config)


  for ii, attrib_name in enumerate(args):
  #   attrib_name = 'bmw'

    attrib_clf = AttributeClassifier.load('../../../attribute_classifiers/{}.dat'.format(attrib_name))
    bnet = BayesNet(config, dataset['train_annos'],
                    dataset['class_meta'], [attrib_clf], desc=str(args))

    res = bnet.create_attrib_res_on_images()

    attrib_selector = AttributeSelector(config, dataset['class_meta'])
  #   attrib_meta = attrib_selector.create_attrib_meta([attrib_clf.name])
    pos_classes = attrib_selector.class_ids_for_attribute(attrib_name)
    true_labels = np.array(res.class_index.isin(pos_classes))


    print "--------------{}-------------".format(attrib_name)
    print res[str.lower(attrib_name)].describe()

    print classification_report(true_labels, np.array(res[str.lower(attrib_name)]) > 0.65,
                                target_names=['not-{}'.format(attrib_name),
                                              attrib_name])



    precision, recall, thresholds = precision_recall_curve(true_labels, np.array(res[str.lower(attrib_name)]))
    score = auc(recall, precision)
    print("Area Under Curve: %0.2f" % score)
#     score = roc_auc_score(true_labels, np.array(res[str.lower(attrib_name)]))
#     fpr, tpr, thresholds = roc_curve(true_labels, np.array(res[str.lower(attrib_name)]))
    plt.subplot(2,2,ii+1)
#     plt.plot(fpr, tpr)
    plt.plot(recall, precision, label='Precision-Recall curve')
    plt.title('Precision-Recall: {}'.format(attrib_name))
#     plt.xlabel('False Positive Rate')
#     plt.ylabel('True Positive Rate')
    plt.xlabel('Recall')
    plt.ylabel('Precision')
    plt.legend(['area = {}'.format(score)])

  plt.draw()
  plt.show()
Exemplo n.º 35
0
def get_blob_canvas(driver, name):
    try:
        driver.get(get_config("testing_page"))
        sleep(1)
        img = driver.execute_script("var canvas = document.getElementById('"+name+"'); return new Promise(function(resolve, reject) { canvas.toBlob(function(blob) { resolve(blob.arrayBuffer().then(a => Array.from(new Int8Array(a))))})});")
    except:
        return "ERROR"
    else:
        return img
Exemplo n.º 36
0
def main(_):
    if FLAGS.config == None:
        raise ValueError("Please specify a configuration file.")
    else:
        config = configuration.get_config(FLAGS.config)

    fout = file(config['log'], 'w')
    sys.stdout = writer(sys.stdout, fout)

    print('configuration:')
    for par, value in config.iteritems():
        print('{0}\t{1}'.format(par, value))

    eval_config = config.copy()  # same parameters for evaluation, except for:
    eval_config['batch_size'] = 1  # batch_size
    eval_config['num_steps'] = 1  # and number of steps

    # hypotheses = list of all hypotheses in n-best list
    all_data, id_to_word, total_length, hypotheses = reader.ptb_raw_data(
        config)

    # if processing per sentence
    if 'per_sentence' in config:
        # set num_steps = total length of each (padded) sentence
        config['num_steps'] = total_length
        # vocab is expanded with <bos> and padding symbol @
        config['vocab_size'] = len(id_to_word)
        eval_config['vocab_size'] = len(id_to_word)
        debug('vocabulary size: {0}\n'.format(config['vocab_size']))

    with tf.Graph().as_default():

        with tf.name_scope("Test"):
            test_hypotheses = inputLM(config=eval_config,
                                      data=hypotheses,
                                      name="Hypotheses")
            with tf.variable_scope("Model", reuse=None):
                mtest = wordLM(is_training=False,
                               config=eval_config,
                               input_=test_hypotheses)

        # sv = training helper that checkpoints models and computes summaries
        sv = tf.train.Supervisor(logdir=config['save_path'])

        # managed_session launches the checkpoint and summary services
        with sv.managed_session() as session:

            # restore variables from disk
            sv.saver.restore(session, config['lm'])
            print("Model restored.")

            out = open(config['result'], 'w')

            print('Start rescoring...')
            run_epoch(session, mtest, id_to_word, out)

            out.close()
Exemplo n.º 37
0
def get_imageData_canvas(driver, name):
    try:
        driver.get(get_config("testing_page"))
        sleep(1)
        img = driver.execute_script("var canvas = document.getElementById('"+name+"'); return canvas.getContext('2d').getImageData(0, 0, canvas.width, canvas.height).data")
    except:
        return "ERROR"
    else:
        return img
Exemplo n.º 38
0
def getConfiguration(cfgfile=None, config_required={'Main': {'key1': 'value1', 'key2': 'value2'}}):
    '''
    read an ini configuration file and return a dictionary of key/value pairs
    update configuration file if missing any sections
    accepts: 
        cfgfile - path to configuration file
        config_required - nested dictionary in the following format:
        {'Section1':
            {'key1': 'value1', 'key2': 'value2'},
            
         'Section 2':
            {'key1': 'value1'}
        }
    '''
    if not cfgfile:
        raise ValueError('no configuration file specified')
    # required configuraiton options
    # Section: {'option': 'default value'}
    logger = logging.getLogger(__name__)
    logger.debug('getting configuration from file: {}'.format(cfgfile))
    cfgpath = os.path.dirname(cfgfile)
#     config_required = {
#         'Main': {'credentials': os.path.join(cfgpath, 'credentials/'), 
#                  },
#         }

    config = configuration.get_config(cfgfile)

    update_config = False

    logger.debug('checking sections')
    for section, values in list(config_required.items()):
        if not config.has_section(section):
            logger.warning('section: {} not found in {}'.format(section, cfgfile))
            logger.debug('adding section {}'.format(section))
            config.add_section(section)
            update_config = True
        for option, value in list(values.items()):
            if not config.has_option(section, option):
                logger.warning('option: {} not found in {}'.format(option, cfgfile))
                logger.debug('adding option {}: {}'.format(option, value))

                config.set(section, option, value)
                update_config = True


    # for section, options in config_required.items():

    if update_config:
        try:
            logger.debug('updating configuration file at: {}'.format(cfgfile))
            configuration.create_config(cfgfile, config)
        except Exception as e:
            logger.error(e)
            
    return(config)
Exemplo n.º 39
0
def redeploy_keyclients(ops: command.Operations) -> None:
    config = configuration.get_config()
    for node in config.nodes:
        # do not delete the cluster.conf on the supervisor because the
        # keyclient and keyserver on the supervisor use the same cluster.conf
        if node.kind != "supervisor":
            ssh_cmd(ops, "delete existing cluster config from @HOST", node, "rm", "-f", CONFIG_DIR + "/cluster.conf")
        ssh_cmd(ops, "delete existing local config from @HOST", node, "rm", "-f", CONFIG_DIR + "/local.conf")
        # restart local keyclient (will regenerate configs on restart)
        ssh_cmd(ops, "restart keyclient daemon on @HOST", node, "systemctl", "restart", "keyclient.service")
Exemplo n.º 40
0
def launch_spec(spec_name):
    config = configuration.get_config()
    spec = configuration.get_single_kube_spec(spec_name).encode()
    for node in config.nodes:
        if node.kind == "supervisor":
            ssh.check_ssh(node, "mkdir", "-p", DEPLOYQUEUE)
            ssh.upload_bytes(
                node, spec,
                "%s/%d.%s" % (DEPLOYQUEUE, int(time.time()), spec_name))
            print("Uploaded spec to deployqueue.")
Exemplo n.º 41
0
def check_flannel_pods():
    "verify that the flannel pods are running"

    config = configuration.get_config()
    node_count = len(
        [node for node in config.nodes if node.kind != "supervisor"])
    expect_prometheus_query_exact(
        'sum(kube_daemonset_status_number_ready{daemonset="kube-flannel-ds"})',
        node_count, "flannel pods are ready")
    print("flannel's pods are ready!")
Exemplo n.º 42
0
def modify_dns_bootstrap(ops: command.Operations, is_install: bool) -> None:
    config = configuration.get_config()
    for node in config.nodes:
        strip_cmd = "grep -vF AUTO-HOMEWORLD-BOOTSTRAP /etc/hosts >/etc/hosts.new && mv /etc/hosts.new /etc/hosts"
        ssh_raw(ops, "strip bootstrapped dns on @HOST", node, strip_cmd)
        if is_install:
            for hostname, ip in config.dns_bootstrap.items():
                new_hosts_line = "%s\t%s # AUTO-HOMEWORLD-BOOTSTRAP" % (ip, hostname)
                strip_cmd = "echo %s >>/etc/hosts" % escape_shell(new_hosts_line)
                ssh_raw(ops, "bootstrap dns on @HOST: %s" % hostname, node, strip_cmd)
Exemplo n.º 43
0
 def __init__(self):
     config = configuration.get_config()
     connection_string = config.get('task_history', 'db_connection')
     try:
         self.engine = create_engine(connection_string)
         self.session_factory = sessionmaker(bind=self.engine, expire_on_commit=False)
         Base.metadata.create_all(self.engine)
     except NameError:
         raise Exception("Using DbTaskHistory without sqlalchemy module!")
     self.tasks = {}  # task_id -> TaskRecord
Exemplo n.º 44
0
def setup_bootstrap_registry(ops: Operations) -> None:
    config = configuration.get_config()
    for node in config.nodes:
        if node.kind != "supervisor":
            continue

        ops.ssh("unmask nginx on @HOST", node, "systemctl", "unmask", "nginx")
        ops.ssh("enable nginx on @HOST", node, "systemctl", "enable", "nginx")
        ops.ssh("restart nginx on @HOST", node, "systemctl", "restart",
                "nginx")
Exemplo n.º 45
0
def get_webgl_pixels(driver, name):
    try:
        driver.get(get_config("testing_page"))
        sleep(1)
        gl = "var canvas = document.getElementById('"+name+"'); var gl = canvas.getContext('webgl2', {preserveDrawingBuffer: true}) || canvas.getContext('experimental-webgl2', {preserveDrawingBuffer: true}) || canvas.getContext('webgl', {preserveDrawingBuffer: true}) || canvas.getContext('experimental-webgl', {preserveDrawingBuffer: true}) || canvas.getContext('moz-webgl', {preserveDrawingBuffer: true});"
        image = driver.execute_script(gl+"var imageData = new Uint8Array(gl.canvas.width*gl.canvas.height*4);gl.readPixels(0, 0, gl.canvas.width, gl.canvas.height, gl.RGBA, gl.UNSIGNED_BYTE, imageData); return imageData;")
    except:
        return "ERROR"
    else:
        return image
Exemplo n.º 46
0
def admin_control():
    print("【管理員模式】")
    print("0. 產生主表(請使用專用表格)")
    command = input("# 請輸入您所需要的功能,或輸入'exit'返回主選單:  ")
    if command == 'exit':
        print("# 返回主選單")
        t_sleep(1)
    elif command == "0":
        # "C:\Users\ricardo\Desktop\Data\0311_藍天百腦匯報名清單(登陸出席).csv"
        while True:
            account = input("# 請輸入帳號: ")
            password = input("# 請輸入密碼: ")
            try:
                config = conf.get_config(account, password)
                # 身分驗證
                print('# 登入中....')
                conn = database_management.pymysql_connect(**config)
                print("# 登入成功,歡迎回來", account, '\n\n')
                t_sleep(1)
                break
            except pymysql.err.OperationalError:
                print("# 您輸入的帳號或密碼錯誤,請再輸入一次。\n\n")
        # 12. 【活動結束後資料建檔】「已登記出席統計表」生成「計算完成統計表」並「輸入資料庫」"
        # "C:\Users\ricardo\Desktop\Data\0311_藍天百腦匯報名清單(登陸出席).csv"
        # Produce csv file after processing
        path, sem, semester_first, semester_second, fc, sc, date = view_CLI.get_information(
            "10")
        file_source = file_management.File(path, sem, semester_first,
                                           semester_second, fc, sc, date)
        file_source.get_file()
        data_source = data_processing.Data(file_source.year,
                                           file_source.semester,
                                           file_source.file_path,
                                           file_source.first_cat,
                                           file_source.second_cat)
        data, produced_df_path = data_source.data_processing()
        file_management.remove_temp()
        print('# 成功生成CSV')
        print('# 開始將生成csv輸入資料庫...')
        # set name of the table
        db_connection = database_management.DataConnection(
            data, config, fc, sc, date)
        # create new table for the data
        db_connection.create_table("主資料表")
        '''
        To tackle 'The MySQL server is running with the --secure-file-priv option so it cannot execute this statement' error
        reference: https://blog.csdn.net/fdipzone/article/details/78634992
        '''
        # insert data into mysql table
        db_connection.insert_table("主資料表")
        db_connection.create_table("黑名單統計表")
        db_connection.insert_table("黑名單統計表")
        print("# 資料輸入資料庫成功,返回主選單")
        t_sleep(1)
        file_management.remove_temp()
Exemplo n.º 47
0
def _create_scheduler():
    config = configuration.get_config()
    retry_delay = config.getfloat('scheduler', 'retry-delay', 900.0)
    remove_delay = config.getfloat('scheduler', 'remove-delay', 600.0)
    worker_disconnect_delay = config.getfloat('scheduler', 'worker-disconnect-delay', 60.0)
    if config.getboolean('scheduler', 'record_task_history', False):
        import db_task_history # Needs sqlalchemy, thus imported here
        task_history_impl = db_task_history.DbTaskHistory()
    else:
        task_history_impl = task_history.NopHistory()
    return scheduler.CentralPlannerScheduler(retry_delay, remove_delay, worker_disconnect_delay, task_history_impl)
Exemplo n.º 48
0
def get_hdfs_syntax():
    """
    CDH4 (hadoop 2+) has a slightly different syntax for interacting with
    hdfs via the command line. The default version is CDH4, but one can
    override this setting with "cdh3" or "apache1" in the hadoop section of the config in
    order to use the old syntax
    """
    config = configuration.get_config()
    if config.getboolean("hdfs", "use_snakebite", False):
        return "snakebite"
    return config.get("hadoop", "version", "cdh4").lower()
Exemplo n.º 49
0
def get_hdfs_syntax():
    """
    CDH4 (hadoop 2+) has a slightly different syntax for interacting with
    hdfs via the command line. The default version is CDH4, but one can
    override this setting with "cdh3" or "apache1" in the hadoop section of the config in
    order to use the old syntax
    """
    config = configuration.get_config()
    if config.getboolean("hdfs", "use_snakebite", False):
        return "snakebite"
    return config.get("hadoop", "version", "cdh4").lower()
Exemplo n.º 50
0
 def write_luigi_history(arglist, history):
     """
     Writes history to a file in the job's output directory in JSON format.
     Currently just for tracking the job ID in a configuration where no history is stored in the output directory by Hadoop.
     """
     history_filename = configuration.get_config().get("core", "history-filename", "")
     if history_filename and "-output" in arglist:
         output_dir = arglist[arglist.index("-output") + 1]
         f = luigi.hdfs.HdfsTarget(os.path.join(output_dir, history_filename)).open("w")
         f.write(json.dumps(history))
         f.close()
Exemplo n.º 51
0
	def on_options_item(self, evt):
		evt.Skip()
		conf = configuration.get_config()
		# Translators: The title of the add-on options dialog.
		dlg = dialogs.OptionsDialog(gui.mainFrame, wx.ID_ANY, title=_("Options"))
		dlg.set_from_config(conf)
		def handle_dlg_complete(dlg_result):
			if dlg_result != wx.ID_OK:
				return
			dlg.write_to_config(conf)
		gui.runScriptModalDialog(dlg, callback=handle_dlg_complete)
Exemplo n.º 52
0
 def write_luigi_history(arglist, history):
     '''
     Writes history to a file in the job's output directory in JSON format.
     Currently just for tracking the job ID in a configuration where no history is stored in the output directory by Hadoop.
     '''
     history_filename = configuration.get_config().get('core', 'history-filename', '')
     if history_filename and '-output' in arglist:
         output_dir = arglist[arglist.index('-output') + 1]
         f = luigi.hdfs.HdfsTarget(os.path.join(output_dir, history_filename)).open('w')
         f.write(json.dumps(history))
         f.close()
Exemplo n.º 53
0
Arquivo: hdfs.py Projeto: nailor/luigi
 def __init__(self):
     super(SnakebiteHdfsClient, self).__init__()
     try:
         from snakebite.client import Client
         self.config = configuration.get_config()
         self._bite = None
         self.pid = -1
     except Exception as err:    # IGNORE:broad-except
         raise RuntimeError("You must specify namenode_host and namenode_port "
                            "in the [hdfs] section of your luigi config in "
                            "order to use luigi's snakebite support", err)
Exemplo n.º 54
0
 def jobconfs(self):
     jcs = []
     jcs.append("mapred.job.name=%s" % self.task_id)
     pool = self.pool
     if pool is not None:
         # Supporting two schedulers: fair (default) and capacity using the same option
         scheduler_type = configuration.get_config().get("hadoop", "scheduler", "fair")
         if scheduler_type == "fair":
             jcs.append("mapred.fairscheduler.pool=%s" % pool)
         elif scheduler_type == "capacity":
             jcs.append("mapred.job.queue.name=%s" % pool)
     return jcs
Exemplo n.º 55
0
 def jobconfs(self):
     jcs = []
     jcs.append('mapred.job.name=%s' % self.task_id)
     pool = self.pool
     if pool is not None:
         # Supporting two schedulers: fair (default) and capacity using the same option
         scheduler_type = configuration.get_config().get('hadoop', 'scheduler', 'fair')
         if scheduler_type == 'fair':
             jcs.append('mapred.fairscheduler.pool=%s' % pool)
         elif scheduler_type == 'capacity':
             jcs.append('mapred.job.queue.name=%s' % pool)
     return jcs
Exemplo n.º 56
0
    def _get_value_from_config(self):
        """Loads the default from the config. Returns _no_value if it doesn't exist"""

        if not self.__config:
            return _no_value

        conf = configuration.get_config()
        (section, name) = (self.__config['section'], self.__config['name'])

        try:
            value = conf.get(section, name)
        except (NoSectionError, NoOptionError), e:
            return _no_value
Exemplo n.º 57
0
	def perform_autoconnect(self):
		cs = configuration.get_config()['controlserver']
		channel = cs['key']
		if cs['self_hosted']:
			port = cs['port']
			address = ('localhost',port)
			self.start_control_server(port, channel)
		else:
			address = address_to_hostport(cs['host'])
		if cs['connection_type']==0:
			self.connect_as_slave(address, channel)
		else:
			self.connect_as_master(address, channel)
Exemplo n.º 58
0
    def _get_default_from_config(self, safe):
        """Loads the default from the config. If safe=True, then returns None if missing. Otherwise,
           raises an UnknownConfigException."""

        conf = configuration.get_config()
        (section, name) = (self.default_from_config['section'], self.default_from_config['name'])
        try:
            return conf.get(section, name)
        except (NoSectionError, NoOptionError), e:
            if safe:
                return None
            raise UnknownConfigException("Couldn't find value for section={0} name={1}. Search config files: '{2}'".format(
                section, name, ", ".join(conf._config_paths)), e)