Ejemplo n.º 1
0
def reinject_decorated_tables():
    """
    reinject the decorated tables (and columns)
    """

    logger.info("reinject_decorated_tables")

    # need to clear any non-decorated tables that were added during the previous run
    orca._TABLES.clear()
    orca._COLUMNS.clear()
    orca._TABLE_CACHE.clear()
    orca._COLUMN_CACHE.clear()

    for name, func in iteritems(_DECORATED_TABLES):
        logger.debug("reinject decorated table %s" % name)
        orca.add_table(name, func)

    for column_key, args in iteritems(_DECORATED_COLUMNS):
        table_name, column_name = column_key
        logger.debug("reinject decorated column %s.%s" % (table_name, column_name))
        orca.add_column(table_name, column_name, args['func'], cache=args['cache'])

    for name, args in iteritems(_DECORATED_INJECTABLES):
        logger.debug("reinject decorated injectable %s" % name)
        orca.add_injectable(name, args['func'], cache=args['cache'])
Ejemplo n.º 2
0
  def __init__(self, connection=None, specification = None , base_url = None):
    self._model_apis = {}

    connection = connection if connection else config_connection()

    # Set the default URL for the twitter API we're using.
    self._base_url = Api.BASE_URL if base_url is None else base_url

    if specification is None:
      with closing(open_data("api.yaml")) as f:
        specification = yaml.load(f)

    self._connection = connection
    self.limit_remaining = None
    self.limit_reset = None

    # Now look through all immediate attributes
    for attribute, value in iteritems(specification):
      if hasattr(models, attribute):
        # The element matches a model object, so it will be used to bless objects of that type.
        target_class = getattr(models, attribute)
        self._model_apis[target_class] = dict()

        # Instantiate the specificatons for all the relevant methods
        for inner_attribute, inner_value in iteritems(value):
          inner_value["method"] = inner_attribute
          self._model_apis[target_class][inner_attribute] = _ApiMethodSpec(**inner_value)
      else:
        # Otherwise, add it to the tree of API calls.
        value["method"] = attribute
        setattr(self, attribute, ApiMethod(self._connection, self, _ApiMethodSpec(**value)))
Ejemplo n.º 3
0
def parse_params(indict, spec=NIF_PARAMS):
    logger.debug("Parsing: {}\n{}".format(indict, spec))
    outdict = indict.copy()
    wrong_params = {}
    for param, options in iteritems(spec):
        if param[0] != "@":  # Exclude json-ld properties
            for alias in options.get("aliases", []):
                if alias in indict:
                    outdict[param] = indict[alias]
            if param not in outdict:
                if options.get("required", False) and "default" not in options:
                    wrong_params[param] = spec[param]
                else:
                    if "default" in options:
                        outdict[param] = options["default"]
            else:
                if "options" in spec[param] and \
                   outdict[param] not in spec[param]["options"]:
                    wrong_params[param] = spec[param]
    if wrong_params:
        logger.debug("Error parsing: %s", wrong_params)
        message = Error(
            status=400,
            message="Missing or invalid parameters",
            parameters=outdict,
            errors={param: error
                    for param, error in iteritems(wrong_params)})
        raise message
    return outdict
Ejemplo n.º 4
0
    def __str__(self):
        lines = []
        if self.total:
            lines.append("Total: %(read)s, %(write)s" % self.total)
        if self.default:
            lines.append("Default: %(read)s, %(write)s" % self.default)
        for tablename, table_limit in iteritems(self.tables):
            lines.append(
                "%s: %s, %s" % (tablename, table_limit["read"], table_limit["write"])
            )
            indexes = self.indexes.get(tablename, {})
            for indexname, limit in iteritems(indexes):
                lines.append(
                    "%s:%s: %s, %s"
                    % (tablename, indexname, limit["read"], limit["write"])
                )

        # Add all the throttled indexes that don't have their table throttled.
        for tablename, data in iteritems(self.indexes):
            if tablename in self.tables:
                continue
            for indexname, limit in iteritems(data):
                lines.append(
                    "%s:%s: %s, %s"
                    % (tablename, indexname, limit["read"], limit["write"])
                )
        if lines:
            return "\n".join(lines)
        else:
            return "No throttle"
Ejemplo n.º 5
0
 def assertValidCheckFile(self, path):
   """Tests whether a check definition has a valid configuration."""
   # Figure out the relative path of the check files.
   prefix = os.path.commonprefix(config.CONFIG["Checks.config_dir"])
   relpath = os.path.relpath(path, prefix)
   # If the config can't load fail immediately.
   try:
     configs = checks.LoadConfigsFromFile(path)
   except yaml.error.YAMLError as e:
     self.fail("File %s could not be parsed: %s\n" % (relpath, e))
   # Otherwise, check all the configs and pass/fail at the end.
   errors = collections.OrderedDict()
   for check_id, check_spec in iteritems(configs):
     check_errors = self.GetCheckErrors(check_spec)
     if check_errors:
       msg = errors.setdefault(relpath, ["check_id: %s" % check_id])
       msg.append(check_errors)
   if errors:
     message = ""
     for k, v in iteritems(errors):
       message += "File %s errors:\n" % k
       message += "  %s\n" % v[0]
       for err in v[1]:
         message += "    %s\n" % err
     self.fail(message)
Ejemplo n.º 6
0
    def export(self, **options):
        """
        Exports the data into a XML file-like stream.

        Arguments:
            options (dict): The exporting options

        Returns:
            io.BytesIO: A XML file-like stream

        Raises:
            ExportError: When data fails to export
        """
        data = self._data.normalize(includeKey=True)
        database = Element("database", name="dtb_{}".format(self._data._base.year))

        for table_name, rows in iteritems(data):
            if not options.get("minify"):
                database.append(Comment(" Table {} ".format(table_name)))

            table = SubElement(database, "table", name=table_name)

            for row_data in itervalues(rows):
                row = SubElement(table, "row")

                for column_name, column_value in iteritems(row_data):
                    SubElement(row, "field", name=column_name).text = unicode(column_value)

        xml_data = xml_str(database, pretty_print=not options.get("minify"), xml_declaration=True, encoding="utf-8")

        return io.BytesIO(xml_data)
Ejemplo n.º 7
0
def sql_where_from_params(**kwargs):
    """
    Utility function for converting a param dictionary into a where clause
    Lists and tuples become in clauses
    """
    clauses = [ 'true' ]
    type_handler = collections.OrderedDict()
    type_handler[type(None)] = "{0} is null"
    type_handler[list]       = "{0} in (%({0})s)"
    type_handler[tuple]      = "{0} in %({0})s"

    for key, value in sorted(iteritems(kwargs)):
        if isinstance(value, (tuple, list)):
            if not value:
                clauses = [ 'true = false' ]
                break

        for proposed_type, pattern in iteritems(type_handler):
            if isinstance(value, proposed_type):
                clauses.append(pattern.format(key))
                break
        else:
            clauses.append("{0} = %({0})s".format(key))

    return ' and '.join(clauses)
Ejemplo n.º 8
0
  def testNoTraceOfDeletedHuntIsLeftInTheDataStore(self):
    # This only works with the test data store (FakeDataStore).
    if not isinstance(data_store.DB, fake_data_store.FakeDataStore):
      self.skipTest("Only supported on FakeDataStore.")

    with test_lib.ConfigOverrider(
        {"DataRetention.hunts_ttl": rdfvalue.Duration("1s")}):
      with test_lib.FakeTime(40 + 60 * self.NUM_HUNTS):
        self._RunCleanup()

      for hunt_urn in self.hunts_urns:
        hunt_id = hunt_urn.Basename()

        for subject, subject_data in iteritems(data_store.DB.subjects):
          # Foreman rules are versioned, so hunt ids will be mentioned
          # there. Ignoring audit events as well.
          if subject == "aff4:/foreman" or subject.startswith("aff4:/audit"):
            continue

          self.assertNotIn(hunt_id, subject)

          for column_name, values in iteritems(subject_data):
            self.assertNotIn(hunt_id, column_name)

            for value, _ in values:
              self.assertNotIn(hunt_id, utils.SmartUnicode(value))
Ejemplo n.º 9
0
  def _ParseInsserv(self, data):
    """/etc/insserv.conf* entries define system facilities.

    Full format details are in man 8 insserv, but the basic structure is:
      $variable          facility1 facility2
      $second_variable   facility3 $variable

    Any init script that specifies Required-Start: $second_variable needs to be
    expanded to facility1 facility2 facility3.

    Args:
      data: A string of insserv definitions.
    """
    p = config_file.FieldParser()
    entries = p.ParseEntries(data)
    raw = {e[0]: e[1:] for e in entries}
    # Now expand out the facilities to services.
    facilities = {}
    for k, v in iteritems(raw):
      # Remove interactive tags.
      k = k.replace("<", "").replace(">", "")
      facilities[k] = v
    for k, vals in iteritems(facilities):
      self.insserv[k] = []
      for v in vals:
        self.insserv[k].extend(self._InsservExpander(facilities, v))
Ejemplo n.º 10
0
 def remote_getBuildRequests(self):
     brids = dict()
     for builderid, brid in iteritems(self.brids):
         builderDict = yield self.master.data.get(('builders', builderid))
         brids[builderDict['name']] = brid
     defer.returnValue([(n, RemoteBuildRequest(self.master, n, brid))
                        for n, brid in iteritems(brids)])
Ejemplo n.º 11
0
        def check(result):
            # finish up the debounced updateSummary before checking
            self.debounceClock.advance(1)
            self.assertEqual(self.expected_remote_commands, [],
                             "assert all expected commands were run")

            # in case of unexpected result, display logs in stdout for debugging failing tests
            if result != self.exp_result:
                for loog in itervalues(self.step.logs):
                    print(loog.stdout)
                    print(loog.stderr)

            self.assertEqual(result, self.exp_result, "expected result")
            if self.exp_state_string:
                stepStateString = self.master.data.updates.stepStateString
                stepids = list(stepStateString)
                assert stepids, "no step state strings were set"
                self.assertEqual(stepStateString[stepids[0]],
                                 self.exp_state_string,
                                 "expected step state strings")
            for pn, (pv, ps) in iteritems(self.exp_properties):
                self.assertTrue(self.properties.hasProperty(pn),
                                "missing property '%s'" % pn)
                self.assertEqual(self.properties.getProperty(pn),
                                 pv, "property '%s'" % pn)
                if ps is not None:
                    self.assertEqual(
                        self.properties.getPropertySource(pn), ps, "property '%s' source" % pn)
            for pn in self.exp_missing_properties:
                self.assertFalse(self.properties.hasProperty(pn),
                                 "unexpected property '%s'" % pn)
            for l, contents in iteritems(self.exp_logfiles):
                self.assertEqual(
                    self.step.logs[l].stdout, contents, "log '%s' contents" % l)
Ejemplo n.º 12
0
	def connection_lost(self, reason):
		self.service._OnDisconnect() # use RemoteObject(self) as argument?

		if self.authed:
			if reason.check(error.ConnectionDone):
				logger.debug("Connection to %s closed", self.name)
			else:
				logger.info("Connection to %s lost: %s", self.name, reason.getErrorMessage())

			self.authed = False
			self.friends.reset_connection(self.name, RemoteObject(self))

			self.service._delete_all_objects(self.connid) # makes use by other peers impossible

			logger.debug("%s outstanding requests, %s streams", len(self._deferreds), len(self._multideferreds))

			if reason.check(error.ConnectionDone):
				for sequid, (deferred, __) in itertools.chain(iteritems(self._deferreds), iteritems(self._multideferreds)):
					deferred.errback(ConnectionLost("Connection to {} closed".format(self.name)))
			else:
				for sequid, (deferred, __) in itertools.chain(iteritems(self._deferreds), iteritems(self._multideferreds)):
					deferred.errback(ConnectionLost("Connection to {} lost: {}".format(self.name, reason.getErrorMessage())))

		else:
			if reason.check(error.ConnectionDone):
				logger.debug("Connection to %s closed", self.addr)
			else:
				logger.info("Connection to %s lost: %s", self.addr, reason.getErrorMessage())
			assert not self._deferreds and not self._multideferreds

		self.closed = True
Ejemplo n.º 13
0
 def _zerorpc_inspect(self):
     methods = dict((m, f) for m, f in iteritems(self._methods) if not m.startswith("_"))
     detailled_methods = dict(
         (m, dict(args=self._format_args_spec(f._zerorpc_args()), doc=f._zerorpc_doc()))
         for (m, f) in iteritems(methods)
     )
     return {"name": self._name, "methods": detailled_methods}
Ejemplo n.º 14
0
    def __init__(self, json):
        self.raw = json
        super(JSON_Map, self).__init__(json)

        cls = self.__class__

        # Map the json structure to event dispatcher properties
        # but only those attributes which do not already exist in the object
        properties = JSON_Map.map_attributes(self, json)

        self._python_properties = set()
        for c in cls.__mro__:
            for attr_name, attr in iteritems(c.__dict__):
                if isinstance(attr, property):
                    self._python_properties.add(attr_name)

        self._json_maps = {}
        for attr_name, attr in iteritems(self.__dict__):
            if isinstance(attr, JSON_Map) and attr_name in json:
                self._json_maps[attr_name] = attr

        with self.temp_unbind_all(*iterkeys(self.event_dispatcher_properties)):
            for key in iterkeys(properties):
                if key in json:
                    setattr(self, key, json[key])
        self.bind(**{p: partial(self._update_raw , p) for p in properties})
Ejemplo n.º 15
0
  def MultiResolvePrefix(self,
                         subjects,
                         attribute_prefix,
                         timestamp=None,
                         limit=None):
    unicode_to_orig = {utils.SmartUnicode(s): s for s in subjects}
    result = {}
    for unicode_subject, orig_subject in iteritems(unicode_to_orig):

      values = self.ResolvePrefix(
          unicode_subject, attribute_prefix, timestamp=timestamp, limit=limit)

      if not values:
        continue

      if limit:
        if limit < len(values):
          values = values[:limit]
        result[orig_subject] = values
        limit -= len(values)
        if limit <= 0:
          return iteritems(result)
      else:
        result[orig_subject] = values

    return iteritems(result)
Ejemplo n.º 16
0
Archivo: flow.py Proyecto: google/grr
  def Handle(self, args, token=None):
    """Renders list of descriptors for all the flows."""

    if data_store.RelationalDBEnabled():
      flow_iterator = iteritems(registry.FlowRegistry.FLOW_REGISTRY)
    else:
      flow_iterator = iteritems(registry.AFF4FlowRegistry.FLOW_REGISTRY)

    result = []
    for name, cls in sorted(flow_iterator):

      # Flows without a category do not show up in the GUI.
      if not getattr(cls, "category", None):
        continue

      # Only show flows that the user is allowed to start.
      try:
        if self.access_check_fn:
          self.access_check_fn(token.username, name)
      except access_control.UnauthorizedAccess:
        continue

      result.append(ApiFlowDescriptor().InitFromFlowClass(cls, token=token))

    return ApiListFlowDescriptorsResult(items=result)
Ejemplo n.º 17
0
Archivo: osx.py Proyecto: google/grr
def CreateServiceProto(job):
  """Create the Service protobuf.

  Args:
    job: Launchdjobdict from servicemanagement framework.

  Returns:
    sysinfo_pb2.OSXServiceInformation proto
  """
  service = rdf_client.OSXServiceInformation(
      label=job.get("Label"),
      program=job.get("Program"),
      sessiontype=job.get("LimitLoadToSessionType"),
      lastexitstatus=int(job["LastExitStatus"]),
      timeout=int(job["TimeOut"]),
      ondemand=bool(job["OnDemand"]))

  for arg in job.get("ProgramArguments", "", stringify=False):
    # Returns CFArray of CFStrings
    service.args.Append(str(arg))

  mach_dict = job.get("MachServices", {}, stringify=False)
  for key, value in iteritems(mach_dict):
    service.machservice.Append("%s:%s" % (key, value))

  job_mach_dict = job.get("PerJobMachServices", {}, stringify=False)
  for key, value in iteritems(job_mach_dict):
    service.perjobmachservice.Append("%s:%s" % (key, value))

  if "PID" in job:
    service.pid = job["PID"].value

  return service
    def fill_template_service(item):
        """
        Prepare fields of service with fields of service templates

        :param item: field name / values of the service
        :type item: dict
        :return: None
        """
        service = current_app.data.driver.db['service']
        ignore_fields = ['_id', '_etag', '_updated', '_created', '_template_fields', '_templates',
                         '_is_template', '_realm', 'host', '_templates_from_host_template']
        fields_not_update = []
        for (field_name, field_value) in iteritems(item):
            fields_not_update.append(field_name)
        item['_template_fields'] = {}
        if ('_is_template' not in item or not item['_is_template']) \
                and '_templates' in item and item['_templates'] != []:
            for service_template in item['_templates']:
                services = service.find_one({'_id': ObjectId(service_template)})
                if services is not None:
                    for (field_name, field_value) in iteritems(services):
                        if field_name not in fields_not_update \
                                and field_name not in ignore_fields:
                            item[field_name] = field_value
                            item['_template_fields'][field_name] = service_template
            schema = service_schema()
            ignore_schema_fields = ['_realm', '_template_fields', '_templates', '_is_template',
                                    '_templates_from_host_template']
            for key in schema['schema']:
                if key not in ignore_schema_fields:
                    if key not in item:
                        item['_template_fields'][key] = 0
Ejemplo n.º 19
0
 def check_metrics(self, results):
     for intent_name, intent_metrics in iteritems(results["metrics"]):
         if intent_name is None or intent_name == "null":
             continue
         classification_precision = intent_metrics["intent"]["precision"]
         classification_recall = intent_metrics["intent"]["recall"]
         self.assertGreaterEqual(
             classification_precision, INTENT_CLASSIFICATION_THRESHOLD,
             "Intent classification precision is too low (%.3f) for intent "
             "'%s'" % (classification_precision, intent_name))
         self.assertGreaterEqual(
             classification_recall, INTENT_CLASSIFICATION_THRESHOLD,
             "Intent classification recall is too low (%.3f) for intent "
             "'%s'" % (classification_recall, intent_name))
         for slot_name, slot_metrics in iteritems(intent_metrics["slots"]):
             precision = slot_metrics["precision"]
             recall = slot_metrics["recall"]
             self.assertGreaterEqual(
                 precision, SLOT_FILLING_THRESHOLD,
                 "Slot precision is too low (%.3f) for slot '%s' of intent "
                 "'%s'" % (precision, slot_name, intent_name))
             self.assertGreaterEqual(
                 recall, SLOT_FILLING_THRESHOLD,
                 "Slot recall is too low (%.3f) for slot '%s' of intent "
                 "'%s'" % (recall, slot_name, intent_name))
Ejemplo n.º 20
0
    def json(self):
        intent_datasets_json = {d.intent_name: d.json
                                for d in self.intent_datasets}
        intents = {
            intent_name: {
                "utterances": dataset_json["utterances"]
            }
            for intent_name, dataset_json in iteritems(intent_datasets_json)
        }
        ents = deepcopy(self.entities)
        ents_values = dict()
        for entity_name, entity in iteritems(self.entities):
            ents_values[entity_name] = set(a.value for a in entity.utterances)
            if entity.use_synonyms:
                ents_values[entity_name].update(
                    set(t for s in entity.utterances for t in s.synonyms))

        for dataset in self.intent_datasets:
            for ent_name, ent in iteritems(dataset.entities):
                if ent_name not in ents:
                    ents[ent_name] = ent
                elif not is_builtin_entity(ent_name):
                    for u in ent.utterances:
                        if u.value not in ents_values:
                            ents[ent_name].utterances.append(u)
        ents = {
            entity_name: entity.json
            for entity_name, entity in iteritems(ents)
        }
        return dict(language=self.language, intents=intents, entities=ents)
    def fill_template_host(item):
        """
        Prepare fields of host with fields of host templates

        :param item: field name / values of the host
        :type item: dict
        :return: None
        """
        host = current_app.data.driver.db['host']
        ignore_fields = ['_id', '_etag', '_updated', '_created', '_template_fields', '_templates',
                         '_is_template', 'realm', '_templates_with_services']
        fields_not_update = []
        for (field_name, field_value) in iteritems(item):
            fields_not_update.append(field_name)
        item['_template_fields'] = {}
        if ('_is_template' not in item or not item['_is_template']) \
                and '_templates' in item and item['_templates'] != []:
            for host_template in item['_templates']:
                hosts = host.find_one({'_id': ObjectId(host_template)})
                if hosts is not None:
                    for (field_name, field_value) in iteritems(hosts):
                        if field_name not in fields_not_update \
                                and field_name not in ignore_fields:
                            item[field_name] = field_value
                            item['_template_fields'][field_name] = host_template
            schema = host_schema()
            ignore_schema_fields = ['realm', '_template_fields', '_templates', '_is_template',
                                    '_templates_with_services']
            for key in schema['schema']:
                if key not in ignore_schema_fields:
                    if key not in item:
                        item['_template_fields'][key] = 0
Ejemplo n.º 22
0
def parse_axes(A_shape, B_shape, conv_axes, dot_axes, mode):
    A_ndim, B_ndim = len(A_shape), len(B_shape)
    if conv_axes is None:
        conv_axes = [list(range(A_ndim)), list(range(A_ndim))]
    axes = {'A' : {'conv' : list(conv_axes[0]),
                   'dot'  : list(dot_axes[0]),
                   'ignore' : [i for i in range(A_ndim)
                             if i not in conv_axes[0] and i not in dot_axes[0]]},
            'B' : {'conv' : list(conv_axes[1]),
                   'dot'  : list(dot_axes[1]),
                   'ignore' : [i for i in range(B_ndim)
                               if i not in conv_axes[1] and i not in dot_axes[1]]}}
    assert len(axes['A']['dot'])  == len(axes['B']['dot'])
    assert len(axes['A']['conv']) == len(axes['B']['conv'])
    i1 =      len(axes['A']['ignore'])
    i2 = i1 + len(axes['B']['ignore'])
    i3 = i2 + len(axes['A']['conv'])
    axes['out'] = {'ignore_A' : list(range(i1)),
                   'ignore_B' : list(range(i1, i2)),
                   'conv'     : list(range(i2, i3))}
    conv_shape = [compute_conv_size(A_shape[i], B_shape[j], mode)
                  for i, j in zip(axes['A']['conv'], axes['B']['conv'])]
    shapes = {'A'   : {s : [A_shape[i] for i in ax] for s, ax in iteritems(axes['A'])},
              'B'   : {s : [B_shape[i] for i in ax] for s, ax in iteritems(axes['B'])}}
    shapes['out'] = {'ignore_A' : shapes['A']['ignore'],
                     'ignore_B' : shapes['B']['ignore'],
                     'conv'     : conv_shape}
    return axes, shapes
Ejemplo n.º 23
0
    def _sync_metadata(self, kp):
        """
        Syncs all of the metadata keyspaces and their underlying tables and columns. Sets keyspace to be a dict
        of all MetaKeyspace in the connection by name:MetaKeyspace
        :return:
        """

        self.keyspaces = {}
        #TODO: Turn off warnings when this occurs
        self.session.row_factory = dict_factory

        #gets all of the column data for all tables/keyspaces
        result = self.session.execute("""SELECT keyspace_name, columnfamily_name, column_name, component_index, index_name,
                             index_options, index_type, type as cql_type, validator FROM system.schema_columns""")


        cols = [ColumnMeta(**row) for row in result]
        for i in cols:
            #create keyspace if not already exists
            if self.keyspaces.get(i.keyspace) is None:
                self.keyspaces.update({i.keyspace:KeyspaceMeta(i.keyspace)})

            #add table if not already exists
            kp = self.keyspaces.get(i.keyspace)
            if kp.tables.get(i.table) is None:
                kp.tables.update({i.table:TableMeta(i.keyspace, i.table)})

            #finally add/overwrite column into table
            tb = kp.tables.get(i.table)
            tb.columns[i.name] = i
        for kp_nm, kp in iteritems(self.keyspaces):
            for tbl_nm, tbl in iteritems(kp.tables):
                tbl.categorize_columns()

        self.session.row_factory = self.panda_factory
Ejemplo n.º 24
0
    def to_dict(self):
        """Returns a json-serializable dict"""
        if hasattr(self.tfidf_vectorizer, "vocabulary_"):
            # pylint: # pylint: disable=W0212
            vocab = {k: int(v) for k, v in
                     iteritems(self.tfidf_vectorizer.vocabulary_)}
            idf_diag = self.tfidf_vectorizer._tfidf._idf_diag.data.tolist()
            # pylint: enable=W0212
            entity_utterances_to_entity_names = {
                k: list(v)
                for k, v in iteritems(self.entity_utterances_to_feature_names)
            }
        else:
            vocab = None
            idf_diag = None
            entity_utterances_to_entity_names = dict()

        tfidf_vectorizer = {
            'vocab': vocab,
            'idf_diag': idf_diag
        }

        return {
            'language_code': self.language,
            'tfidf_vectorizer': tfidf_vectorizer,
            'best_features': self.best_features,
            'entity_utterances_to_feature_names':
                entity_utterances_to_entity_names,
            'config': self.config.to_dict(),
            'unknown_words_replacement_string':
                self.unknown_words_replacement_string
        }
Ejemplo n.º 25
0
 def __init__(self, d):
     for k, v in iteritems(d):
         setattr(self, k, v)
     self.properties = properties.Properties()
     for k, v in iteritems(d['properties']):
         self.properties.setProperty(k, v[0], v[1])
     self.who = d['author']
Ejemplo n.º 26
0
    def doRequest(self):
        # create a new session if it doesn't exist
        self.session = getSession()

        requestkwargs = {
            'method': self.method,
            'url': self.url
        }

        for param in self.requestsParams:
            value = getattr(self, param, None)
            if value is not None:
                requestkwargs[param] = value

        log = self.addLog('log')

        # known methods already tested in __init__

        log.addHeader('Performing %s request to %s\n' %
                      (self.method, self.url))
        if self.params:
            log.addHeader('Parameters:\n')
            params = requestkwargs.get("params", {})
            if params:
                params = sorted(iteritems(params), key=lambda x: x[0])
                requestkwargs['params'] = params
            for k, v in params:
                log.addHeader('\t%s: %s\n' % (k, v))
        data = requestkwargs.get("data", None)
        if data:
            log.addHeader('Data:\n')
            if isinstance(data, dict):
                for k, v in iteritems(data):
                    log.addHeader('\t%s: %s\n' % (k, v))
            else:
                log.addHeader('\t%s\n' % data)

        try:
            r = yield self.session.request(**requestkwargs)
        except requests.exceptions.ConnectionError as e:
            log.addStderr(
                'An exception occurred while performing the request: %s' % e)
            self.finished(FAILURE)
            return

        if r.history:
            log.addStdout('\nRedirected %d times:\n\n' % len(r.history))
            for rr in r.history:
                self.log_response(rr)
                log.addStdout('=' * 60 + '\n')

        self.log_response(r)

        log.finish()

        self.descriptionDone = ["Status code: %d" % r.status_code]
        if (r.status_code < 400):
            self.finished(SUCCESS)
        else:
            self.finished(FAILURE)
Ejemplo n.º 27
0
 def print_model_params(self):
         self.pub("###")
         self.pub("####### RecNet - Recurrent Neural Network Framework ########")
         self.pub("###")
         date_time = str(datetime.datetime.today())
         self.pub("# Start Datetime: "+ date_time )
         self.pub("###")
         self.pub("# Basic Informations")
         for kk, pp in iteritems(self.prm.basic):
             str_obj = str(kk) + ": "+ str(pp)
             self.pub(str_obj)
         self.pub("###")
         self.pub("# Data Information")
         for kk, pp in iteritems(self.prm.data):
             str_obj = str(kk) + ": " + str(pp)
             self.pub(str_obj)
         self.pub("###")
         self.pub("# Network Structure")
         for kk, pp in iteritems(self.prm.struct):
             str_obj = str(kk) + ": "+ str(pp)
             self.pub(str_obj)
         self.pub("###")
         self.pub("# Optimization Parameters")
         for kk, pp in iteritems(self.prm.optimize):
             str_obj = str(kk) + ": "+ str(pp)
             self.pub(str_obj)
         self.pub("###")
Ejemplo n.º 28
0
    def __init__(self, private={}, public={}, private_seeds={}):
        # It is possible to distinguish between private and public seeds
        # based on the string content.  Consider modifying this function
        # to take merely one dict of seeds.  Trees should still be stored
        # separately.
        self.trees = {}
        self.private_trees = {}
        self.public_trees = {}

        def treegen(value, entropy=False):
            if entropy:
                # this method also takes a netcode parameter, but we don't care
                # what network pycoin thinks this node is, because we only use it
                # for key derivation.
                return BIP32Node.from_master_secret(unhexlify(value))
            else:
                # this method will infer a network from the header bytes. We
                # don't care right now for the same reason as above, but we will
                # if Gem's API stops returning 'xpub' as the pubkey header bytes
                # because if pycoin doesn't recognize a header it will error.
                return BIP32Node.from_hwif(value)

        for name, seed in iteritems(private):
            tree = treegen(seed)
            self.private_trees[name] = self.trees[name] = tree

        for name, seed in iteritems(private_seeds):
            tree = treegen(seed, True)
            self.private_trees[name] = self.trees[name] = tree

        for name, seed in iteritems(public):
            tree = BIP32Node.from_hwif(seed)
            self.public_trees[name] = self.trees[name] = tree
Ejemplo n.º 29
0
 def total_issues(self):
     """Number of issue entries for this arch."""
     total = 0
     for issue_status, issue_types in iteritems(self.issues):
         for issue_type, ies in iteritems(issue_types):
             total += len(ies)
     return total
Ejemplo n.º 30
0
  def __init__(self,
               fhandle=None,
               action=None,
               initial_profiles=None,
               **session_args):
    super(GrrRekallSession, self).__init__(
        cache_dir=config.CONFIG["Client.rekall_profile_cache_path"])

    self.action = action

    # Just hard code the initial repository manager. Note this can be
    # overwritten later if needed.
    self._repository_managers = [(None,
                                  RekallCachingIOManager(
                                      initial_profiles=initial_profiles,
                                      session=self))]

    # Apply default configuration options to the session state, unless
    # explicitly overridden by the session_args.
    with self.state:
      for k, v in iteritems(session_args):
        self.state.Set(k, v)

      for name, options in iteritems(rekall_config.OPTIONS.args):
        # We don't want to override configuration options passed via
        # **session_args.
        if name not in session_args:
          self.state.Set(name, options.get("default"))

    # Ensure the action's Progress() method is called when Rekall reports
    # progress.
    self.proc = psutil.Process()
    self.memory_quota = config.CONFIG["Client.rss_max"] * 1024 * 1024
    self.progress.Register(id(self), lambda *_, **__: self._CheckQuota())
Ejemplo n.º 31
0
 def _get_outgoing_weights(self, tag):
     return [((first, second), w) for (first, second), w
             in iteritems(self.crf_model.transition_features_)
             if first == tag]
Ejemplo n.º 32
0
        score = 0
        for i in range(len(sentence)):
            if i == 0:
                # beginning word
                score += np.log(bigram_probs[start_idx, sentence[i]])
            else:
                # middle word
                score += np.log(bigram_probs[sentence[i - 1], sentence[i]])
        # final word
        score += np.log(bigram_probs[sentence[-1], end_idx])

        # normalize the score
        return score / (len(sentence) + 1)

    # a function to map word indexes back to real words
    idx2word = dict((v, k) for k, v in iteritems(word2idx))

    def get_words(sentence):
        return ' '.join(idx2word[i] for i in sentence)

    # when we sample a fake sentence, we want to ensure not to sample
    # start token or end token
    sample_probs = np.ones(V)
    sample_probs[start_idx] = 0
    sample_probs[end_idx] = 0
    sample_probs /= sample_probs.sum()

    # test our model on real and fake sentences
    while True:
        # real sentence
        real_idx = np.random.choice(len(sentences))
Ejemplo n.º 33
0
def db_model_factory(Base, model, all_models):
    def get_or_create_association_table(model1_name, model2_name):
        _association_table_name = calculate_association_table_name(
            model1_name, model2_name
        )
        logger.debug(
            "Creating/getting ManyToMany relationship table: %s",
            _association_table_name,
        )
        if _association_table_name in globals():
            return globals()[_association_table_name]

        # create an association table
        _association_table = Table(
            _association_table_name,
            Base.metadata,
            Column(
                "%s_pk" % model1_name.lower(), String, ForeignKey("%s.pk" % model1_name)
            ),
            Column(
                "%s_pk" % model2_name.lower(), String, ForeignKey("%s.pk" % model2_name)
            ),
        )
        # track it in our globals
        set_global(_association_table_name, _association_table)
        return _association_table

    logger.debug("-----")
    logger.debug("Generating model: %s", model.name)
    model_fields = {"__tablename__": model.name, "pk": Column(String, primary_key=True)}

    # populate all of the relevant additional relationships for this model
    for field_name, rel in iteritems(model.additional_rels):
        kwargs = {}
        if rel.get("back_populates", None) is not None:
            kwargs["back_populates"] = rel["back_populates"]
        if rel.get("secondary", None) is not None:
            kwargs["secondary"] = get_or_create_association_table(*rel["secondary"])
        logger.debug(
            "Creating additional relationship %s.%s -> %s (%s)",
            model.name,
            field_name,
            rel["to_model"],
            kwargs,
        )
        model_fields[field_name] = relationship(rel["to_model"], **kwargs)

    # now populate all of the standard fields
    for field_name in model.field_names:
        field = model.fields[field_name]
        if field.field_type in SQLALCHEMY_FIELD_MAPPER:
            # if it's a simple field
            model_fields[field.name] = Column(
                field.name, SQLALCHEMY_FIELD_MAPPER[field.field_type]
            )

        elif field.field_type in all_models:
            # if it's a foreign key reference
            if isinstance(field, StatikForeignKeyField):
                model_fields["%s_id" % field.name] = Column(
                    "%s_id" % field.name, ForeignKey("%s.pk" % field.field_type)
                )
                # if it's a self-referencing foreign key
                if field.field_type == model.name:
                    back_populates = field.back_populates or "children"
                    model_fields[back_populates] = relationship(
                        field.field_type,
                        backref=backref(field_name, remote_side=[model_fields["pk"]]),
                    )
                else:
                    kwargs = {}
                    if field.back_populates is not None:
                        kwargs["back_populates"] = field.back_populates
                        logger.debug(
                            "Field %s.%s has back-populates field name: %s",
                            model.name,
                            field_name,
                            field.back_populates,
                        )
                    else:
                        logger.debug(
                            "No back-populates field name for %s.%s",
                            model.name,
                            field_name,
                        )

                    foreign_key = model_fields.get("%s_id" % field.name, None)
                    model_fields[field.name] = relationship(
                        field.field_type, foreign_keys=[foreign_key], **kwargs
                    )

            elif isinstance(field, StatikManyToManyField):
                association_table = get_or_create_association_table(
                    model.name, field.field_type
                )

                kwargs = {"secondary": association_table}
                if field.back_populates is not None:
                    kwargs["back_populates"] = field.back_populates

                logger.debug(
                    "Creating model ManyToMany field %s.%s -> %s (%s)",
                    model.name,
                    field.name,
                    field.field_type,
                    kwargs,
                )
                model_fields[field.name] = relationship(field.field_type, **kwargs)

        else:
            raise InvalidFieldTypeError(model.name, field.name)

    Model = type(str(model.name), (Base,), model_fields)

    logger.debug("Model %s fields = %s", model.name, model_fields)

    # add the model class reference to the global scope
    set_global(model.name, Model)
    return Model
Ejemplo n.º 34
0
def test_taker_init(createcmtdata, schedule, highfee, toomuchcoins, minmakers,
                    notauthed, ignored, nocommit):
    #these tests do not trigger utxo_retries
    oldtakerutxoretries = jm_single().config.get("POLICY", "taker_utxo_retries")
    oldtakerutxoamtpercent = jm_single().config.get("POLICY", "taker_utxo_amtpercent")
    jm_single().config.set("POLICY", "taker_utxo_retries", "20")
    def clean_up():
        jm_single().config.set("POLICY", "minimum_makers", oldminmakers)
        jm_single().config.set("POLICY", "taker_utxo_retries", oldtakerutxoretries)
        jm_single().config.set("POLICY", "taker_utxo_amtpercent", oldtakerutxoamtpercent)
    oldminmakers = jm_single().config.get("POLICY", "minimum_makers")
    jm_single().config.set("POLICY", "minimum_makers", str(minmakers))
    taker = get_taker(schedule)
    orderbook = copy.deepcopy(t_orderbook) 
    if highfee:
        for o in orderbook:
            #trigger high-fee warning; but reset in next step
            o['cjfee'] = '1.0'
    if ignored:
        taker.ignored_makers = ignored
    if nocommit:
        jm_single().config.set("POLICY", "taker_utxo_amtpercent", nocommit)
    if schedule[0][1] == 0.2:
        #triggers calc-ing amount based on a fraction
        jm_single().mincjamount = 50000000 #bigger than 40m = 0.2 * 200m
        res = taker.initialize(orderbook)
        assert res[0]
        assert res[1] == jm_single().mincjamount
        return clean_up()
    res = taker.initialize(orderbook)
    if toomuchcoins or ignored:
        assert not res[0]
        return clean_up()
    if nocommit:
        print(str(res))
        assert res[0] == "commitment-failure"
        return clean_up()
    taker.orderbook = copy.deepcopy(t_chosen_orders) #total_cjfee unaffected, all same
    maker_response = copy.deepcopy(t_maker_response)
    if notauthed:
        #Doctor one of the maker response data fields
        maker_response["J659UPUSLLjHJpaB"][1] = "xx" #the auth pub
    if schedule[0][1] == 199850000:
        #triggers negative change
        #makers offer 3000 txfee; we estimate ~ 147*10 + 2*34 + 10=1548 bytes
        #times 30k = 46440, so we pay 43440, plus maker fees = 3*0.0002*200000000
        #roughly, gives required selected = amt + 163k, hence the above =
        #2btc - 150k sats = 199850000 (tweaked because of aggressive coin selection)
        #simulate the effect of a maker giving us a lot more utxos
        taker.utxos["dummy_for_negative_change"] = ["a", "b", "c", "d", "e"]
        with pytest.raises(ValueError) as e_info:
            res = taker.receive_utxos(maker_response)
        return clean_up()
    if schedule[0][1] == 199850001:
        #our own change is greater than zero but less than dust
        #use the same edge case as for negative change, don't add dummy inputs
        #(because we need tx creation to complete), but trigger case by
        #bumping dust threshold
        jm_single().BITCOIN_DUST_THRESHOLD = 14000
        res = taker.receive_utxos(maker_response)
        #should have succeeded to build tx
        assert res[0]
        #change should be none
        assert not taker.my_change_addr
        return clean_up()        
    if schedule[0][1] == 199599800:
        #need to force negative fees to make this feasible
        for k, v in iteritems(taker.orderbook):
            v['cjfee'] = '-0.002'
        #            change_amount = (total_input - self.cjamount -
        #                     self.orderbook[nick]['txfee'] + real_cjfee)
        #suppose change amount is 1000 (sub dust), then solve for x;
        #given that real_cjfee = -0.002*x
        #change = 200000000 - x - 1000 - 0.002*x
        #x*1.002 = 1999999000; x = 199599800
        res = taker.receive_utxos(maker_response)
        assert not res[0]
        assert res[1] == "Not enough counterparties responded to fill, giving up"
        return clean_up()
    if schedule[0][3] == "mxeLuX8PP7qLkcM8uarHmdZyvP1b5e1Ynf":
        #to trigger rounding error for sweep (change non-zero),
        #modify the total_input via the values in self.input_utxos;
        #the amount to trigger a 2 satoshi change is found by trial-error.
        #TODO note this test is not adequate, because the code is not;
        #the code does not *DO* anything if a condition is unexpected.
        taker.input_utxos = copy.deepcopy(t_utxos_by_mixdepth)[0]
        for k,v in iteritems(taker.input_utxos):
            v["value"] = int(0.999805228 * v["value"])
        res = taker.receive_utxos(maker_response)
        assert res[0]
        return clean_up()

    res = taker.receive_utxos(maker_response)
    if minmakers != 2:
        assert not res[0]
        assert res[1] == "Not enough counterparties responded to fill, giving up"
        return clean_up()
        
    assert res[0]
    #re-calling will trigger "finished" code, since schedule is "complete".
    res = taker.initialize(orderbook)
    assert not res[0]

    #some exception cases: no coinjoin address, no change address:
    #donations not yet implemented:
    taker.my_cj_addr = None
    with pytest.raises(NotImplementedError) as e_info:
        taker.prepare_my_bitcoin_data()
    with pytest.raises(NotImplementedError) as e_info:
        a = taker.coinjoin_address()
    taker.wallet.inject_addr_get_failure = True
    taker.my_cj_addr = "dummy"
    assert not taker.prepare_my_bitcoin_data()
    #clean up
    return clean_up()
Ejemplo n.º 35
0
 def mut_add(self, xs, ys):
     return {k : v.mut_add(xs[k], ys[k])
             for k, v in iteritems(self.shape)}
Ejemplo n.º 36
0
 def zeros(self):
     return {k : v.zeros() for k, v in iteritems(self.shape)}
Ejemplo n.º 37
0
 def __init__(self, value):
     self.shape = {k : vspace(v) for k, v in iteritems(value)}
     self.size  = sum(s.size for s in self.shape.values())
Ejemplo n.º 38
0
    def get_list_queryset(self, queryset):
        lookup_params = dict([
            (smart_str(k)[len(FILTER_PREFIX):], v)
            for k, v in self.admin_view.params.items()
            if smart_str(k).startswith(FILTER_PREFIX) and v != ''
        ])
        for p_key, p_val in iteritems(lookup_params):
            if p_val == "False":
                lookup_params[p_key] = False
        use_distinct = False

        # for clean filters
        self.admin_view.has_query_param = bool(lookup_params)
        self.admin_view.clean_query_url = self.admin_view.get_query_string(
            remove=[
                k for k in self.request.GET.keys()
                if k.startswith(FILTER_PREFIX)
            ])

        # Normalize the types of keys
        if not self.free_query_filter:
            for key, value in lookup_params.items():
                if not self.lookup_allowed(key, value):
                    raise SuspiciousOperation("Filtering by %s not allowed" %
                                              key)

        self.filter_specs = []
        if self.list_filter:
            for list_filter in self.list_filter:
                if callable(list_filter):
                    # This is simply a custom list filter class.
                    spec = list_filter(self.request, lookup_params, self.model,
                                       self)
                else:
                    field_path = None
                    field_parts = []
                    if isinstance(list_filter, (tuple, list)):
                        # This is a custom FieldListFilter class for a given field.
                        field, field_list_filter_class = list_filter
                    else:
                        # This is simply a field name, so use the default
                        # FieldListFilter class that has been registered for
                        # the type of the given field.
                        field, field_list_filter_class = list_filter, filter_manager.create
                    if not isinstance(field, models.Field):
                        field_path = field
                        field_parts = get_fields_from_path(
                            self.model, field_path)
                        field = field_parts[-1]
                    spec = field_list_filter_class(field,
                                                   self.request,
                                                   lookup_params,
                                                   self.model,
                                                   self.admin_view,
                                                   field_path=field_path)

                    if len(field_parts) > 1:
                        # Add related model name to title
                        spec.title = "%s %s" % (field_parts[-2].name,
                                                spec.title)

                    # Check if we need to use distinct()
                    use_distinct = (use_distinct or lookup_needs_distinct(
                        self.opts, field_path))
                if spec and spec.has_output():
                    try:
                        new_qs = spec.do_filte(queryset)
                    except ValidationError as e:
                        new_qs = None
                        self.admin_view.message_user(
                            _("<b>Filtering error:</b> %s") % e.messages[0],
                            'error')
                    if new_qs is not None:
                        queryset = new_qs

                    self.filter_specs.append(spec)

        self.has_filters = bool(self.filter_specs)
        self.admin_view.filter_specs = self.filter_specs
        obj = filter(lambda f: f.is_used, self.filter_specs)
        if six.PY3:
            obj = list(obj)
        self.admin_view.used_filter_num = len(obj)

        try:
            for key, value in lookup_params.items():
                use_distinct = (use_distinct
                                or lookup_needs_distinct(self.opts, key))
        except FieldDoesNotExist as e:
            raise IncorrectLookupParameters(e)

        try:
            # fix a bug by david: In demo, quick filter by IDC Name() cannot be used.
            if isinstance(queryset, models.query.QuerySet) and lookup_params:
                new_lookup_parames = dict()
                for k, v in lookup_params.iteritems():
                    list_v = v.split(',')
                    if len(list_v) > 0:
                        new_lookup_parames.update({k: list_v})
                    else:
                        new_lookup_parames.update({k: v})
                queryset = queryset.filter(**new_lookup_parames)
        except (SuspiciousOperation, ImproperlyConfigured):
            raise
        except Exception as e:
            raise IncorrectLookupParameters(e)
        else:
            if not isinstance(queryset, models.query.QuerySet):
                pass

        query = self.request.GET.get(SEARCH_VAR, '')

        # Apply keyword searches.
        def construct_search(field_name):
            if field_name.startswith('^'):
                return "%s__istartswith" % field_name[1:]
            elif field_name.startswith('='):
                return "%s__iexact" % field_name[1:]
            elif field_name.startswith('@'):
                return "%s__search" % field_name[1:]
            else:
                return "%s__icontains" % field_name

        if self.search_fields and query:
            orm_lookups = [
                construct_search(str(search_field))
                for search_field in self.search_fields
            ]
            for bit in query.split():
                or_queries = [
                    models.Q(**{orm_lookup: bit}) for orm_lookup in orm_lookups
                ]
                queryset = queryset.filter(reduce(operator.or_, or_queries))
            if not use_distinct:
                for search_spec in orm_lookups:
                    if lookup_needs_distinct(self.opts, search_spec):
                        use_distinct = True
                        break
            self.admin_view.search_query = query

        if use_distinct:
            return queryset.distinct()
        else:
            return queryset
Ejemplo n.º 39
0
 def update_attributes(self, data):
     for k, v in iteritems(data):
         setattr(self, k, v)
Ejemplo n.º 40
0
 def load_data(self):
     mainLog = _make_logger(method_name='QueueConfigMapper.load_data')
     with self.lock:
         # check if to update
         timeNow_timestamp = time.time()
         if self.lastUpdate is not None:
             last_reload_timestamp = self._get_last_reload_time()
             if (last_reload_timestamp is not None
                     and self.lastUpdate is not None
                     and datetime.datetime.utcfromtimestamp(
                         last_reload_timestamp) < self.lastUpdate
                     and timeNow_timestamp - last_reload_timestamp <
                     self.updateInterval):
                 return
     # start
     with self.lock:
         # update timesatmp of last reload, lock with check interval
         got_timesatmp_update_lock = self.dbProxy.get_process_lock(
             'qconf_reload', 'qconf_universal', self.updateInterval)
         if got_timesatmp_update_lock:
             retVal = self._update_last_reload_time()
             if retVal:
                 mainLog.debug('updated last reload timestamp')
             else:
                 mainLog.warning(
                     'failed to update last reload timestamp. Skipped')
         else:
             mainLog.debug(
                 'did not get qconf_reload timestamp lock. Skipped to update last reload timestamp'
             )
         # init
         newQueueConfig = dict()
         localTemplatesDict = dict()
         remoteTemplatesDict = dict()
         finalTemplatesDict = dict()
         localQueuesDict = dict()
         remoteQueuesDict = dict()
         dynamicQueuesDict = dict()
         allQueuesNameList = set()
         getQueuesDynamic = False
         invalidQueueList = set()
         pandaQueueDict = PandaQueuesDict()
         # get resolver
         resolver = self._get_resolver()
         if resolver is None:
             mainLog.debug('No resolver is configured')
         # load config json from cacher (RT & RQ)
         queueConfigJson_cacher = self._load_config_from_cache()
         if queueConfigJson_cacher is not None:
             for queueName, queueDict in iteritems(queueConfigJson_cacher):
                 if queueDict.get('isTemplateQueue') is True \
                     or queueName.endswith('_TEMPLATE'):
                     # is RT
                     queueDict['isTemplateQueue'] = True
                     queueDict.pop('templateQueueName', None)
                     remoteTemplatesDict[queueName] = queueDict
                 else:
                     # is RQ
                     queueDict['isTemplateQueue'] = False
                     remoteQueuesDict[queueName] = queueDict
         # load config from local json file (LT & LQ)
         queueConfigJson_local = self._load_config_from_file()
         if queueConfigJson_local is not None:
             for queueName, queueDict in iteritems(queueConfigJson_local):
                 if queueDict.get('isTemplateQueue') is True \
                     or queueName.endswith('_TEMPLATE'):
                     # is LT
                     queueDict['isTemplateQueue'] = True
                     queueDict.pop('templateQueueName', None)
                     localTemplatesDict[queueName] = queueDict
                 else:
                     # is LQ
                     queueDict['isTemplateQueue'] = False
                     localQueuesDict[queueName] = queueDict
         else:
             mainLog.warning(
                 'Failed to load config from local json file. Skipped')
         # fill in final template (FT)
         finalTemplatesDict.update(remoteTemplatesDict)
         finalTemplatesDict.update(localTemplatesDict)
         finalTemplatesDict.pop(None, None)
         # remove queues with invalid templateQueueName
         for acr, queuesDict in [('RQ', remoteQueuesDict),
                                 ('LQ', localQueuesDict)]:
             for queueName, queueDict in iteritems(queuesDict.copy()):
                 templateQueueName = queueDict.get('templateQueueName')
                 if templateQueueName is not None \
                     and templateQueueName not in finalTemplatesDict:
                     del queuesDict[queueName]
                     mainLog.warning(
                         'Invalid templateQueueName "{0}" for {1} ({2}). Skipped'
                         .format(templateQueueName, queueName, acr))
         # get queue names from resolver and fill in dynamic queue (DQ)
         if resolver is not None \
             and 'DYNAMIC' in harvester_config.qconf.queueList:
             getQueuesDynamic = True
             dynamicQueuesNameList = resolver.get_all_queue_names()
             for queueName in dynamicQueuesNameList.copy():
                 queueDict = dict()
                 # template and default template via workflow
                 templateQueueName = None
                 resolver_harvester_template = None
                 if resolver is not None:
                     resolver_harvester_template = resolver.get_harvester_template(
                         queueName)
                     resolver_type, resolver_workflow = resolver.get_type_workflow(
                         queueName)
                 if resolver_harvester_template:
                     templateQueueName = resolver_harvester_template
                 elif not (resolver_type is None
                           or resolver_workflow is None):
                     templateQueueName = '{pq_type}.{workflow}'.format(
                         pq_type=resolver_type, workflow=resolver_workflow)
                 else:
                     templateQueueName = harvester_config.qconf.defaultTemplateQueueName
                 if templateQueueName not in finalTemplatesDict:
                     # remove queues with invalid templateQueueName
                     dynamicQueuesNameList.discard(queueName)
                     mainLog.warning(
                         'Invalid templateQueueName "{0}" for {1} (DQ). Skipped'
                         .format(templateQueueName, queueName))
                     continue
                 # parameters
                 resolver_harvester_params = resolver.get_harvester_params(
                     queueName)
                 for key, val in iteritems(resolver_harvester_params):
                     if key in self.dynamic_queue_generic_attrs:
                         queueDict[key] = val
                 # fill in dynamic queue configs
                 queueDict['templateQueueName'] = templateQueueName
                 queueDict['isTemplateQueue'] = False
                 dynamicQueuesDict[queueName] = queueDict
         # fill in all queue name list (names of RQ + DQ + LQ)
         allQueuesNameList |= set(remoteQueuesDict)
         allQueuesNameList |= set(dynamicQueuesDict)
         allQueuesNameList |= set(localQueuesDict)
         allQueuesNameList.discard(None)
         # set attributes
         for queueName in allQueuesNameList:
             # sources or queues and templates
             queueSourceList = []
             templateSourceList = []
             # prepare templateQueueName
             templateQueueName = None
             for queuesDict in [
                     remoteQueuesDict, dynamicQueuesDict, localQueuesDict
             ]:
                 if queueName not in queuesDict:
                     continue
                 tmp_queueDict = queuesDict[queueName]
                 tmp_templateQueueName = tmp_queueDict.get(
                     'templateQueueName')
                 if tmp_templateQueueName is not None:
                     templateQueueName = tmp_templateQueueName
             # prepare queueDict
             queueDict = dict()
             if templateQueueName in finalTemplatesDict:
                 queueDict.update(
                     copy.deepcopy(finalTemplatesDict[templateQueueName]))
             for acr, templatesDict in [('RT', remoteTemplatesDict),
                                        ('LT', localTemplatesDict)]:
                 if templateQueueName in templatesDict:
                     templateSourceList.append(acr)
             # update queueDict
             for acr, queuesDict in [('RQ', remoteQueuesDict),
                                     ('DQ', dynamicQueuesDict),
                                     ('LQ', localQueuesDict)]:
                 if queueName not in queuesDict:
                     continue
                 queueSourceList.append(acr)
                 tmp_queueDict = queuesDict[queueName]
                 for key, val in iteritems(tmp_queueDict):
                     val = copy.deepcopy(val)
                     if key in self.updatable_plugin_attrs \
                         and isinstance(queueDict.get(key), dict) \
                         and isinstance(val, dict):
                         # update plugin parameters instead of overwriting whole plugin section
                         queueDict[key].update(val)
                     else:
                         queueDict[key] = val
             # record sources of the queue config and its templates in log
             if templateQueueName:
                 mainLog.debug(
                     ('queue {queueName} comes from {queueSource} '
                      '(with template {templateName} '
                      'from {templateSource})').format(
                          queueName=queueName,
                          templateName=templateQueueName,
                          queueSource=','.join(queueSourceList),
                          templateSource=','.join(templateSourceList)))
             else:
                 mainLog.debug(
                     'queue {queueName} comes from {queueSource}'.format(
                         queueName=queueName,
                         queueSource=','.join(queueSourceList)))
             # prepare queueConfig
             if queueName in newQueueConfig:
                 queueConfig = newQueueConfig[queueName]
             else:
                 queueConfig = QueueConfig(queueName)
             # queueName = siteName/resourceType
             queueConfig.siteName = queueConfig.queueName.split('/')[0]
             if queueConfig.siteName != queueConfig.queueName:
                 queueConfig.resourceType = queueConfig.queueName.split(
                     '/')[-1]
             # get common attributes
             commonAttrDict = dict()
             if isinstance(queueDict.get('common'), dict):
                 commonAttrDict = queueDict.get('common')
             # according to queueDict
             for key, val in iteritems(queueDict):
                 if isinstance(val,
                               dict) and 'module' in val and 'name' in val:
                     # plugin attributes
                     val = copy.deepcopy(val)
                     # fill in common attributes for all plugins
                     for c_key, c_val in iteritems(commonAttrDict):
                         if c_key not in val and c_key not in ('module',
                                                               'name'):
                             val[c_key] = c_val
                     # check module and class name
                     try:
                         _t3mP_1Mp0R7_mO6U1e__ = importlib.import_module(
                             val['module'])
                         _t3mP_1Mp0R7_N4m3__ = getattr(
                             _t3mP_1Mp0R7_mO6U1e__, val['name'])
                     except Exception as _e:
                         invalidQueueList.add(queueConfig.queueName)
                         mainLog.error(
                             'Module or class not found. Omitted {0} in queue config ({1})'
                             .format(queueConfig.queueName, _e))
                         continue
                     else:
                         del _t3mP_1Mp0R7_mO6U1e__
                         del _t3mP_1Mp0R7_N4m3__
                     # fill in siteName and queueName
                     if 'siteName' not in val:
                         val['siteName'] = queueConfig.siteName
                     if 'queueName' not in val:
                         val['queueName'] = queueConfig.queueName
                     # middleware
                     if 'middleware' in val and val[
                             'middleware'] in queueDict:
                         # keep original config
                         val['original_config'] = copy.deepcopy(val)
                         # overwrite with middleware config
                         for m_key, m_val in iteritems(
                                 queueDict[val['middleware']]):
                             val[m_key] = m_val
                 setattr(queueConfig, key, val)
             # delete isTemplateQueue attribute
             try:
                 if getattr(queueConfig, 'isTemplateQueue'):
                     mainLog.error(
                         'Internal error: isTemplateQueue is True. Omitted {0} in queue config'
                         .format(queueConfig.queueName))
                     invalidQueueList.add(queueConfig.queueName)
                 else:
                     delattr(queueConfig, 'isTemplateQueue')
             except AttributeError as _e:
                 mainLog.error(
                     'Internal error with attr "isTemplateQueue". Omitted {0} in queue config ({1})'
                     .format(queueConfig.queueName, _e))
                 invalidQueueList.add(queueConfig.queueName)
             # get Panda Queue Name
             if resolver is not None:
                 queueConfig.pandaQueueName = resolver.get_panda_queue_name(
                     queueConfig.siteName)
             # additional criteria for getJob
             if queueConfig.getJobCriteria is not None:
                 tmpCriteria = dict()
                 for tmpItem in queueConfig.getJobCriteria.split(','):
                     tmpKey, tmpVal = tmpItem.split('=')
                     tmpCriteria[tmpKey] = tmpVal
                 if len(tmpCriteria) == 0:
                     queueConfig.getJobCriteria = None
                 else:
                     queueConfig.getJobCriteria = tmpCriteria
             # nullify job attributes if NoJob mapType
             if queueConfig.mapType == WorkSpec.MT_NoJob:
                 for attName in [
                         'nQueueLimitJob', 'nQueueLimitJobRatio',
                         'nQueueLimitJobMax', 'nQueueLimitJobMin'
                 ]:
                     setattr(queueConfig, attName, None)
             # heartbeat suppression
             if queueConfig.truePilot and queueConfig.noHeartbeat == '':
                 queueConfig.noHeartbeat = 'running,transferring,finished,failed'
             # set unique name
             queueConfig.set_unique_name()
             # put into new queue configs
             newQueueConfig[queueName] = queueConfig
             # Check existence of mandatory attributes
             if queueName in newQueueConfig:
                 queueConfig = newQueueConfig[queueName]
                 missing_attr_list = []
                 for _attr in self.mandatory_attrs:
                     if not hasattr(queueConfig, _attr):
                         invalidQueueList.add(queueConfig.queueName)
                         missing_attr_list.append(_attr)
                 if missing_attr_list:
                     mainLog.error(
                         'Missing mandatory attributes {0} . Omitted {1} in queue config'
                         .format(','.join(missing_attr_list),
                                 queueConfig.queueName))
         # delete invalid queues
         for invalidQueueName in invalidQueueList:
             if invalidQueueName in newQueueConfig:
                 del newQueueConfig[invalidQueueName]
         # auto blacklisting
         autoBlacklist = False
         if resolver is not None and hasattr(harvester_config.qconf, 'autoBlacklist') and \
                 harvester_config.qconf.autoBlacklist:
             autoBlacklist = True
         # get queue dumps
         queueConfigDumps = self.dbProxy.get_queue_config_dumps()
         # get active queues
         activeQueues = dict()
         for queueName, queueConfig in iteritems(newQueueConfig):
             # get status
             if queueConfig.queueStatus is None and autoBlacklist:
                 queueConfig.queueStatus = resolver.get_queue_status(
                     queueName)
             # get dynamic information
             if 'DYNAMIC' in harvester_config.qconf.queueList:
                 # UPS queue
                 if resolver is not None and resolver.is_ups_queue(
                         queueName):
                     queueConfig.runMode = 'slave'
                     queueConfig.mapType = 'NoJob'
             # set online if undefined
             if queueConfig.queueStatus is None:
                 queueConfig.queueStatus = 'online'
             queueConfig.queueStatus = queueConfig.queueStatus.lower()
             # look for configID
             dumpSpec = QueueConfigDumpSpec()
             dumpSpec.queueName = queueName
             dumpSpec.set_data(vars(queueConfig))
             if dumpSpec.dumpUniqueName in queueConfigDumps:
                 dumpSpec = queueConfigDumps[dumpSpec.dumpUniqueName]
             else:
                 # add dump
                 dumpSpec.creationTime = datetime.datetime.utcnow()
                 dumpSpec.configID = self.dbProxy.get_next_seq_number(
                     'SEQ_configID')
                 tmpStat = self.dbProxy.add_queue_config_dump(dumpSpec)
                 if not tmpStat:
                     dumpSpec.configID = self.dbProxy.get_config_id_dump(
                         dumpSpec)
                     if dumpSpec.configID is None:
                         mainLog.error(
                             'failed to get configID for {0}'.format(
                                 dumpSpec.dumpUniqueName))
                         continue
                 queueConfigDumps[dumpSpec.dumpUniqueName] = dumpSpec
             queueConfig.configID = dumpSpec.configID
             # ignore offline
             if queueConfig.queueStatus == 'offline':
                 continue
             # filter for pilot version
             if hasattr(harvester_config.qconf, 'pilotVersion') and \
                 pandaQueueDict[queueConfig.siteName].get('pilot_version') != str(harvester_config.qconf.pilotVersion):
                 continue
             if 'ALL' not in harvester_config.qconf.queueList and \
                     'DYNAMIC' not in harvester_config.qconf.queueList and \
                     queueName not in harvester_config.qconf.queueList:
                 continue
             activeQueues[queueName] = queueConfig
         self.queueConfig = newQueueConfig
         self.activeQueues = activeQueues
         newQueueConfigWithID = dict()
         for dumpSpec in queueConfigDumps.values():
             queueConfig = QueueConfig(dumpSpec.queueName)
             queueConfig.update_attributes(dumpSpec.data)
             queueConfig.configID = dumpSpec.configID
             newQueueConfigWithID[dumpSpec.configID] = queueConfig
         self.queueConfigWithID = newQueueConfigWithID
         self.lastUpdate = datetime.datetime.utcnow()
     # update database
     if self.toUpdateDB:
         self.dbProxy.fill_panda_queue_table(self.activeQueues.keys(), self)
         mainLog.debug('updated to DB')
     # done
     mainLog.debug('done')
Ejemplo n.º 41
0
 def assertInstanceEqual(self, expected, inst):
     for field_name, field_value in iteritems(expected):
         self.assertEqual(field_value, getattr(inst, field_name))
Ejemplo n.º 42
0
    def set_def_id(self, def_ids):
        # Sort AMQP definitions by their names.
        def_ids = sorted(iteritems(def_ids), key=itemgetter(1))

        for id, name in def_ids:
            self.fields['def_id'].choices.append([id, name])
Ejemplo n.º 43
0
 def __init__(self, **kwargs):
     self.currencies = []
     for currency, fee in iteritems(kwargs):
         self.currencies.append(currency)
         setattr(self, currency, Decimal(fee))
Ejemplo n.º 44
0
def main():
    parser = create_argument_parser()
    args = parser.parse_args()

    if not args.config:
        args.print_usage()
        return

    # Configuration

    with open(args.config) as config:
        options = config_to_options(config.read())

    # Override settings with command line options

    if args.max_results:
        options['settings']['max_results'] = args.max_results

    if args.quantiles:
        try:
            quantiles = [float(s.strip()) for s in quantiles_raw.split(',')]
            options['settings']['quantiles'] = quantiles
        except (
                AttributeError,
                ValueError,
        ):
            print("Invalid value for --quantiles: " + args.quantiles)
            args.print_usage()
            return
    quantiles = options['settings']['quantiles']

    if args.charts_from:
        options['settings']['charts_from'] = args.charts_from
    if args.charts_to:
        options['settings']['charts_to'] = args.charts_to

    output_format = args.format.lower() if args.format else 'csv'

    throughput_window_end = parse_relative_date(
        args.throughput_window_end
    ) if args.throughput_window_end else datetime.date.today()
    throughput_window_days = args.throughput_window

    # Query JIRA

    try:
        jira = get_jira_client(options['connection'])

        q = CycleTimeQueries(jira, **options['settings'])

        print("Fetching issues (this could take some time)")
        cycle_data = pd.DataFrame()
        size_data = pd.DataFrame()
        cycle_data, size_data = q.cycle_data(verbose=args.verbose,
                                             result_cycle=cycle_data,
                                             result_size=size_data)
        if args.points:
            print("Working out size changes of issues over time")
            df_size_history = q.size_history(size_data)
            df_size_history.to_csv(r'size_history.csv',
                                   sep='\t')  # Save to file.
        else:
            df_size_history = None
    except JIRAError as e:
        eprint(e)
        return 1

    #cfd_data = q.cfd(cycle_data)
    cfd_data = q.cfd(cycle_data,
                     size_history=df_size_history,
                     pointscolumn=args.points,
                     stacked=False)
    cfd_data_stackable = q.cfd(cycle_data,
                               size_history=df_size_history,
                               pointscolumn=args.points,
                               stacked=True)

    scatter_data = q.scatterplot(cycle_data)
    histogram_data = q.histogram(cycle_data)
    percentile_data = q.percentiles(cycle_data, percentiles=quantiles)

    #daily_throughput_data = q.throughput_data(
    #    cycle_data[cycle_data['completed_timestamp'] >= (throughput_window_end - datetime.timedelta(days=throughput_window_days))],
    #)

    if args.points:
        daily_throughput_data = q.throughput_data(
            cycle_data[cycle_data['completed_timestamp'] >= (
                throughput_window_end -
                datetime.timedelta(days=throughput_window_days))],
            pointscolumn=args.points)
    else:
        daily_throughput_data = q.throughput_data(
            cycle_data[cycle_data['completed_timestamp'] >= (
                throughput_window_end -
                datetime.timedelta(days=throughput_window_days))], )

    if options['settings']['statusmapping']:
        for key, state in iteritems(
                options['settings']['statusmapping']
        ):  #  # use .items() for python 3  and iteritems() for python 2
            if state == 'complete':
                done_column = key
            if state == 'final':
                final_column = key
            if state == 'committed':
                committed_column = key
            if state == 'backlog':
                backlog_column = key
    else:
        backlog_column = args.backlog_column or cfd_data.columns[0]
        committed_column = args.committed_column or cfd_data.columns[1]
        final_column = args.final_column or cfd_data.columns[-2]
        done_column = args.done_column or cfd_data.columns[-1]

    cycle_names = [s['name'] for s in q.settings['cycle']]
    field_names = sorted(options['settings']['fields'].keys())
    query_attribute_names = [q.settings['query_attribute']
                             ] if q.settings['query_attribute'] else []

    # Burnup forecast
    target = args.charts_burnup_forecast_target or None
    trials = args.charts_burnup_forecast_trials or 1000

    # TODO - parameterise historical throughput
    #try:
    if args.points:
        burnup_forecast_data = q.burnup_forecast(cfd_data,
                                                 daily_throughput_data,
                                                 trials=trials,
                                                 target=target,
                                                 backlog_column=backlog_column,
                                                 done_column=done_column,
                                                 percentiles=quantiles,
                                                 sized='Sized')
    else:
        burnup_forecast_data = q.burnup_forecast(cfd_data,
                                                 daily_throughput_data,
                                                 trials=trials,
                                                 target=target,
                                                 backlog_column=backlog_column,
                                                 done_column=done_column,
                                                 percentiles=quantiles,
                                                 sized='')

    #except Exception as e:
    #    print("Warning: Failed to calculate burnup forecast")
    #    burnup_forecast_data = None

    # Write files

    if args.output:
        print("Writing cycle data to", args.output)

        header = ['ID', 'Link', 'Name'] + cycle_names + [
            'Type', 'Status', 'Resolution'
        ] + field_names + query_attribute_names
        columns = ['key', 'url', 'summary'] + cycle_names + [
            'issue_type', 'status', 'resolution'
        ] + field_names + query_attribute_names

        if output_format == 'json':
            values = [header] + [
                map(to_json_string, row)
                for row in cycle_data[columns].values.tolist()
            ]
            with open(args.output, 'w') as out:
                out.write(json.dumps(values))
        elif output_format == 'xlsx':
            cycle_data.to_excel(args.output,
                                'Cycle data',
                                columns=columns,
                                header=header,
                                index=False)
        else:
            cycle_data.to_csv(args.output,
                              columns=columns,
                              header=header,
                              date_format='%Y-%m-%d',
                              index=False,
                              sep='\t')

    if args.records:
        if output_format == 'json':
            print("Writing cycle data as JSON records")
            cycle_data.to_json(args.records,
                               date_format='iso',
                               orient='records')
        else:
            print(
                "Warning: Ignoring cycle data as JSON records. Use --format json"
            )

    if args.size_history:
        print("Writing issue size history data to", args.size_history)
        if output_format == 'json':
            size_data.to_json(args.size_history, date_format='iso')
        elif output_format == 'xlsx':
            size_data.to_excel(args.size_history, 'SIZES')
        else:
            size_data.to_csv(args.size_history,
                             columns=['key', 'fromDate', 'toDate', 'size'],
                             sep='\t',
                             date_format='%Y-%m-%d')

    if args.cfd:
        print("Writing Cumulative Flow Diagram data to", args.cfd)
        if output_format == 'json':
            cfd_data.to_json(args.cfd, date_format='iso')
        elif output_format == 'xlsx':
            cfd_data.to_excel(args.cfd, 'CFD')
        else:
            cfd_data.to_csv(args.cfd, sep='\t')

    if args.scatterplot:
        print("Writing cycle time scatter plot data to", args.scatterplot)
        if output_format == 'json':
            scatter_data.to_json(args.scatterplot, date_format='iso')
        elif output_format == 'xlsx':
            scatter_data.to_excel(args.scatterplot, 'Scatter', index=False)
        else:
            scatter_data.to_csv(args.scatterplot, index=False, sep='\t')

    if args.percentiles:
        print("Writing cycle time percentiles", args.percentiles)
        if output_format == 'json':
            percentile_data.to_json(args.percentiles, date_format='iso')
        elif output_format == 'xlsx':
            percentile_data.to_frame(name='percentiles').to_excel(
                args.percentiles, 'Percentiles', header=True)
        else:
            percentile_data.to_csv(args.percentiles, header=True, sep='\t')

    if args.histogram:
        print("Writing cycle time histogram data to", args.histogram)
        if output_format == 'json':
            histogram_data.to_json(args.histogram, date_format='iso')
        elif output_format == 'xlsx':
            histogram_data.to_frame(name='histogram').to_excel(args.histogram,
                                                               'Histogram',
                                                               header=True)
        else:
            histogram_data.to_csv(args.histogram, header=True, sep='\t')

    if args.throughput:
        print("Writing throughput data to", args.throughput)
        if output_format == 'json':
            daily_throughput_data.to_json(args.throughput, date_format='iso')
        elif output_format == 'xlsx':
            daily_throughput_data.to_excel(args.throughput,
                                           'Throughput',
                                           header=True)
        else:
            daily_throughput_data.to_csv(args.throughput,
                                         header=True,
                                         sep='\t')

    if args.burnup_forecast and burnup_forecast_data is not None:
        print("Writing burnup forecast data to", args.burnup_forecast)
        if output_format == 'json':
            burnup_forecast_data.to_json(args.burnup_forecast,
                                         date_format='iso')
        elif output_format == 'xlsx':
            burnup_forecast_data.to_excel(args.burnup_forecast_data,
                                          'Forecast',
                                          header=True)
        else:
            burnup_forecast_data.to_csv(args.burnup_forecast_data,
                                        header=True,
                                        sep='\t')

    # Output charts (if we have the right things installed)
    if charting.HAVE_CHARTING:

        charts_from = parse_relative_date(
            options['settings']['charts_from']
        ) if options['settings']['charts_from'] is not None else None
        charts_to = parse_relative_date(
            options['settings']['charts_to']
        ) if options['settings']['charts_to'] is not None else None

        cycle_data_sliced = cycle_data
        if charts_from is not None:
            cycle_data_sliced = cycle_data[
                cycle_data['completed_timestamp'] >= charts_from]
        if charts_to is not None:
            cycle_data_sliced = cycle_data[
                cycle_data['completed_timestamp'] <= charts_to]

        cfd_data_sliced = cfd_data[slice(charts_from, charts_to)]
        cfd_data_stackable_sliced = cfd_data_stackable[slice(
            charts_from, charts_to)]

        charting.set_context()

        if args.charts_scatterplot:
            print("Drawing scatterplot in", args.charts_scatterplot)
            charting.set_style('darkgrid')
            try:
                ax = charting.cycle_time_scatterplot(
                    cycle_data_sliced,
                    percentiles=quantiles,
                    title=args.charts_scatterplot_title)
            except charting.UnchartableData as e:
                print("** WARNING: Did not draw chart:", e)
            else:
                fig = ax.get_figure()
                fig.savefig(args.charts_scatterplot,
                            bbox_inches='tight',
                            dpi=300)

        if args.charts_histogram:
            print("Drawing histogram in", args.charts_histogram)
            charting.set_style('darkgrid')
            try:
                ax = charting.cycle_time_histogram(
                    cycle_data_sliced,
                    percentiles=quantiles,
                    title=args.charts_histogram_title)
            except charting.UnchartableData as e:
                print("** WARNING: Did not draw chart:", e)
            else:
                fig = ax.get_figure()
                fig.savefig(args.charts_histogram,
                            bbox_inches='tight',
                            dpi=300)

        if args.charts_cfd:
            print("Drawing CFD in", args.charts_cfd)
            charting.set_style('whitegrid')
            try:
                if args.points:
                    ax = charting.cfd(cfd_data_stackable_sliced,
                                      title=args.charts_cfd_title,
                                      pointscolumn=args.points)
                else:
                    ax = charting.cfd(cfd_data_sliced,
                                      title=args.charts_cfd_title)
            except charting.UnchartableData as e:
                print("** WARNING: Did not draw chart:", e)
            else:
                fig = ax.get_figure()
                fig.savefig(args.charts_cfd, bbox_inches='tight', dpi=300)

        if args.charts_throughput:
            print("Drawing throughput chart in", args.charts_throughput)
            charting.set_style('darkgrid')
            try:
                ax = charting.throughput_trend_chart(
                    daily_throughput_data, title=args.charts_throughput_title)
            except charting.UnchartableData as e:
                print("** WARNING: Did not draw chart:", e)
            else:
                fig = ax.get_figure()
                fig.savefig(args.charts_throughput,
                            bbox_inches='tight',
                            dpi=300)

        if args.charts_burnup:
            print("Drawing burnup chart in", args.charts_burnup)
            charting.set_style('whitegrid')
            try:
                if args.points:
                    ax = charting.burnup(cfd_data_sliced,
                                         backlog_column=backlog_column,
                                         done_column=done_column,
                                         title=args.charts_burnup_title,
                                         sized='Sized')
                else:
                    ax = charting.burnup(cfd_data_sliced,
                                         backlog_column=backlog_column,
                                         done_column=done_column,
                                         title=args.charts_burnup_title,
                                         sized='')

            except charting.UnchartableData as e:
                print("** WARNING: Did not draw chart:", e)
            else:
                fig = ax.get_figure()
                fig.savefig(args.charts_burnup, bbox_inches='tight', dpi=300)

        if args.charts_burnup_forecast:
            target = args.charts_burnup_forecast_target or None
            trials = args.charts_burnup_forecast_trials or 100
            deadline = parse_relative_date(
                args.charts_burnup_forecast_deadline
            ) if args.charts_burnup_forecast_deadline else None
            deadline_confidence = args.charts_burnup_forecast_deadline_confidence

            print("Drawing burnup foreacst chart in",
                  args.charts_burnup_forecast)
            charting.set_style('whitegrid')
            try:
                if args.points:
                    ax = charting.burnup_forecast(
                        cfd_data_sliced,
                        daily_throughput_data,
                        trials=trials,
                        target=target,
                        backlog_column=backlog_column,
                        done_column=done_column,
                        percentiles=quantiles,
                        deadline=deadline,
                        deadline_confidence=deadline_confidence,
                        title=args.charts_burnup_forecast_title,
                        sized='Sized')
                else:
                    ax = charting.burnup_forecast(
                        cfd_data_sliced,
                        daily_throughput_data,
                        trials=trials,
                        target=target,
                        backlog_column=backlog_column,
                        done_column=done_column,
                        percentiles=quantiles,
                        deadline=deadline,
                        deadline_confidence=deadline_confidence,
                        title=args.charts_burnup_forecast_title,
                        sized='')
            except charting.UnchartableData as e:
                print("** WARNING: Did not draw chart:", e)
            else:
                fig = ax.get_figure()
                fig.savefig(args.charts_burnup_forecast,
                            bbox_inches='tight',
                            dpi=300)

        if args.charts_wip:
            print("Drawing WIP chart in", args.charts_wip)
            charting.set_style('darkgrid')
            try:
                ax = charting.wip_chart(q.cfd(
                    cycle_data[cycle_data[backlog_column] >= (
                        datetime.date.today() - datetime.timedelta(
                            weeks=(args.charts_wip_window or 6)))]),
                                        start_column=committed_column,
                                        end_column=final_column,
                                        title=args.charts_wip_title)
            except charting.UnchartableData as e:
                print("** WARNING: Did not draw chart:", e)
            else:
                fig = ax.get_figure()
                fig.savefig(args.charts_wip, bbox_inches='tight', dpi=300)

        if args.charts_ageing_wip:
            print("Drawing ageing WIP chart in", args.charts_ageing_wip)
            charting.set_style('whitegrid')
            try:
                ax = charting.ageing_wip_chart(
                    cycle_data,
                    start_column=committed_column,
                    end_column=final_column,
                    done_column=done_column,
                    title=args.charts_ageing_wip_title)
            except charting.UnchartableData as e:
                print("** WARNING: Did not draw chart:", e)
            else:
                fig = ax.get_figure()
                fig.savefig(args.charts_ageing_wip,
                            bbox_inches='tight',
                            dpi=300)

        if args.charts_net_flow:
            print("Drawing net flow chart in", args.charts_net_flow)
            charting.set_style('darkgrid')
            try:
                ax = charting.net_flow_chart(q.cfd(
                    cycle_data[cycle_data[backlog_column] >= (
                        datetime.date.today() - datetime.timedelta(
                            weeks=(args.charts_net_flow_window or 6)))]),
                                             start_column=committed_column,
                                             end_column=done_column,
                                             title=args.charts_net_flow_title)
            except charting.UnchartableData as e:
                print("** WARNING: Did not draw chart:", e)
            else:
                fig = ax.get_figure()
                fig.savefig(args.charts_net_flow, bbox_inches='tight', dpi=300)

    print("Done")
Ejemplo n.º 45
0
 def __init__(self,
              xsizes,
              intersects,
              outf='cairotest.pdf',
              width=1024,
              height=1024,
              bgcol='embl_gray125',
              cols=None,
              interscols=None,
              ysizes=None,
              ycols=None,
              skip=3.5,
              margin=24,
              mincircle=5,
              cellpadding=4):
     for key, val in iteritems(locals()):
         setattr(self, key, val)
     self.colors = {
         'embl_green': (115, 179, 96, 255),
         'embl_blue': (0, 102, 102, 255),
         'embl_yellow': (250, 183, 0, 255),
         'embl_red': (227, 62, 62, 255),
         'embl_black': (0, 0, 0, 255),
         'embl_gray875': (32, 32, 32, 255),
         'embl_gray75': (64, 64, 64, 255),
         'embl_gray625': (96, 96, 96, 255),
         'embl_gray50': (128, 128, 128, 255),
         'embl_gray25': (192, 192, 192, 255),
         'embl_gray125': (224, 224, 224, 255),
         'white': (255, 255, 255, 255)
     }
     # positions of circle labels:
     self.clabels = [(0.5, math.sqrt(3) / -2.0), (math.sqrt(3) / 2.0, -0.5),
                     (math.sqrt(2) / 2.0, math.sqrt(2) / -2.0)]
     self.palette = [
         self.colors[x]
         for x in ['embl_green', 'embl_blue', 'embl_yellow']
     ]
     self.fontpal = [
         self.colors[x] for x in ['embl_gray875', 'white', 'embl_gray875']
     ]
     self.font = 'HelveticaNeueLT Std Lt'
     self.bgcol = self.bgcol if type(self.bgcol) is tuple else self.colors[
         self.bgcol]
     # set parameters for x:
     # list of column labels (first elements of tuples in x list)
     self.xlabs = [x[0] for x in self.xsizes]
     # colors of sets in column headers
     self.xcols = self.get_colors(self.xsizes)
     # set sizes:
     self.xsizes = self.get_sizes(self.xsizes)
     # same for y:
     self.ylabs = [y[0] for y in self.ysizes
                   ] if self.ysizes is not None else self.xlabs
     self.ycols = self.get_colors(self.ysizes) \
         if self.ysizes is not None else self.xcols
     self.ysizes = self.get_sizes(self.ysizes) \
         if self.ysizes is not None else self.xsizes
     # margin:
     # margin is either a single integer, or a tuple of 4 integers:
     self.margin = self.margin if type(self.margin) is tuple else (
         self.margin, ) * 4
     # table:
     # proportions of cell sizes:
     self.xcellsize = [3, 1] + [3] * len(self.xsizes)
     self.ycellsize = [3, 1] + [3] * len(self.ysizes)
     # sizes of table cells:
     self.xcoo = self.cells(self.xcellsize, self.margin[0],
                            self.width - self.margin[1])
     self.ycoo = self.cells(self.ycellsize, self.margin[2],
                            self.height - self.margin[3])
     # width and height of diagram cells:
     self.cellw = self.xcoo[0]
     self.cellh = self.ycoo[0]
     # largest circle fit in the cells:
     self.maxcircle = min(self.cellw, self.cellh) / 2 - 2 * self.cellpadding
     self.maxarea = pow(self.maxcircle, 2) * math.pi
     self.minarea = pow(self.mincircle, 2) * math.pi
     # scaling circle sizes between min and max circle size
     self.xcircsize = self.scale_sizes(self.xsizes)
     self.ycircsize = self.scale_sizes(self.ysizes)
     ssize = self.scale_sizes([x['size'] for x in self.intersects.values()])
     for i, k in enumerate(self.intersects.keys()):
         self.intersects[k]['ssize'] = ssize[i]
         if 'color' not in self.intersects[k]:
             self.intersects[k]['color'] = self.palette[0:len(ssize[i])]
Ejemplo n.º 46
0
 def __init__(self,
              graph=None,
              filename=None,
              graphix_dir="pdf",
              graphix_format="pdf",
              name=None,
              title_text=None,
              title_font_family=None,
              title_font_size=None,
              title_color='#646567',
              size=None,
              layout="fruchterman_reingold",
              layout_param=None,
              vertex_label=None,
              vertex_size=None,
              vertex_label_size='degree_label_size',
              edge_width=None,
              vertex_color='#6EA945',
              vertex_label_color='#007B7F',
              vertex_alpha='AA',
              vertex_frame_color='#FFFFFF00',
              vertex_frame_width=0,
              edge_label=None,
              edge_label_size=None,
              edge_label_color='#007B7F',
              edge_curved=None,
              edge_color='#818284',
              edge_alpha='AA',
              autocurve=None,
              vertex_label_font="sans-serif",
              edge_label_font="sans-serif",
              edge_arrow_size=1.0,
              edge_arrow_width=1.0,
              palettes={},
              bbox=None,
              margin=10,
              small=None,
              dimensions=(1280, 1280),
              grouping=None,
              **kwargs):
     # setting parameters:
     for key, val in iteritems(locals()):
         setattr(self, key, val)
     self.default_alpha = {
         'vertex_color': 'AA',
         'edge_color': 'AA',
         'vertex_label_color': 'FF',
         'vertex_frame_color': '00',
         'edge_label_color': 'FF'
     }
     self.default_vertex_label_size = 6.0
     self.plots = []
     self.session = gen_session_id()
     self.loglevel = 'INFO'
     self.ownlog = logn.logw(self.session, self.loglevel)
     self.name = self.name if self.name is not None else self.session
     self.label_sizes = {
         'small': (15.0, 13.7),
         'medium': (13.0, 10.0),
         'large': (9.0, 6.0)
     }
     self.palettes = {
         'vertex': ['#6EA945', '#007B7F', '#FCCC06', '#DA0025', '#000000'],
         'edge': ['#007B7F', '#6EA945', '#DA0025'],
         'vertex_label': ['#454447'],
         'edge_label': ['#454447']
     }
     self.small_param = {
         'vertex_size': 21,
         'edge_width': 0.051,
         'autocurve': True,
         'vertex_label_dist': 1.5
     }
     self.medium_param = {
         'vertex_size': 7,
         'edge_width': 0.051,
         'autocurve': True,
         'vertex_label_dist': 1.33,
         'edge_label_size': 1.0
     }
     self.large_param = {
         'vertex_size': 2,
         'edge_width': 0.051,
         'vertex_label_dist': 1.0,
         'edge_label_size': 1.0
     }
     self.layout_defaults = {
         'fruchterman_reingold': {
             'repulserad': self.graph.vcount()**2.8,
             'maxiter': 1000,
             'area': self.graph.vcount()**2.3
         }
     }
     self.update_page()
     self.update_graph(graph)
Ejemplo n.º 47
0
 def iteritems(self):
     return iteritems(self.dictionary)
Ejemplo n.º 48
0
 def set_defaults(self, preset):
     if hasattr(self, preset):
         for k, v in iteritems(getattr(self, preset)):
             self.set_param(k, v)
Ejemplo n.º 49
0
 def _attributes(self):
     return {
         k: v
         for k, v in iteritems(vars(self))
         if not (k.startswith('_') or k == 'id')
     }
Ejemplo n.º 50
0
 def __reduce__(self):
     return dict, tuple(), None, None, iter(iteritems(self.dictionary))
Ejemplo n.º 51
0
 def asDict(self):
     retval = {}
     for interface, handler in iteritems(self.handlers):
         retval.update(handler.asDict())
     return retval
Ejemplo n.º 52
0
def mergePackageDependenciesList(package, isDependencies=True):
    packageDepDict = dict()
    if isDependencies:
        routineDeps = package.getPackageRoutineDependencies()
        globalDeps = package.getPackageGlobalDependencies()
        globalRtnDeps = package.getPackageGlobalRoutineDependencies()
        globalGblDeps = package.getPackageGlobalGlobalDependencies()
        fileManDeps = package.getPackageFileManFileDependencies()
        dbCallDeps = package.getPackageFileManDbCallDependencies()
        optionDeps = package.getPackageComponentDependencies()
    else:
        routineDeps = package.getPackageRoutineDependents()
        globalDeps = package.getPackageGlobalDependents()
        globalRtnDeps = package.getPackageGlobalRoutineDependendents()
        globalGblDeps = package.getPackageGlobalGlobalDependents()
        fileManDeps = package.getPackageFileManFileDependents()
        dbCallDeps = package.getPackageFileManDbCallDependents()
        optionDeps = {}
    for (package, depTuple) in iteritems(routineDeps):
        if package not in packageDepDict:
            packageDepDict[package] = [
                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
            ]
        packageDepDict[package][0] = len(depTuple[0])
        packageDepDict[package][1] = len(depTuple[1])
    for (package, depTuple) in iteritems(globalDeps):
        if package not in packageDepDict:
            packageDepDict[package] = [
                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
            ]
        packageDepDict[package][2] = len(depTuple[0])
        packageDepDict[package][3] = len(depTuple[1])
    for (package, depTuple) in iteritems(fileManDeps):
        if package not in packageDepDict:
            packageDepDict[package] = [
                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
            ]
        packageDepDict[package][4] = len(depTuple[0])
        packageDepDict[package][5] = len(depTuple[1])
    for (package, depTuple) in iteritems(dbCallDeps):
        if package not in packageDepDict:
            packageDepDict[package] = [
                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
            ]
        packageDepDict[package][6] = len(depTuple[0])
        packageDepDict[package][7] = len(depTuple[1])
    for (package, depTuple) in iteritems(optionDeps):
        if package not in packageDepDict:
            packageDepDict[package] = [
                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
            ]
        packageDepDict[package][8] = len(depTuple[0])
        packageDepDict[package][9] = len(depTuple[1])
    for (package, depTuple) in iteritems(globalRtnDeps):
        if package not in packageDepDict:
            packageDepDict[package] = [
                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
            ]
        packageDepDict[package][10] = len(depTuple[0])
        packageDepDict[package][11] = len(depTuple[1])
    for (package, depTuple) in iteritems(globalGblDeps):
        if package not in packageDepDict:
            packageDepDict[package] = [
                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
            ]
        packageDepDict[package][12] = len(depTuple[0])
        packageDepDict[package][13] = len(depTuple[1])
    return packageDepDict
Ejemplo n.º 53
0
        def _convert_to_variant_record(self, record, infos, formats):
            """Converts the PyVCF record to a :class:`Variant` object.

      Args:
        record (:class:`~vcf.model._Record`): An object containing info about a
          variant.
        infos (dict): The PyVCF dict storing INFO extracted from the VCF header.
          The key is the info key and the value is :class:`~vcf.parser._Info`.
        formats (dict): The PyVCF dict storing FORMAT extracted from the VCF
          header. The key is the FORMAT key and the value is
          :class:`~vcf.parser._Format`.
      Returns:
        A :class:`Variant` object from the given record.
      """
            variant = Variant()
            variant.reference_name = record.CHROM
            variant.start = record.start
            variant.end = record.end
            variant.reference_bases = (
                record.REF if record.REF != MISSING_FIELD_VALUE else None)
            # ALT fields are classes in PyVCF (e.g. Substitution), so need convert
            # them to their string representations.
            variant.alternate_bases.extend([str(r) for r in record.ALT
                                            if r] if record.ALT else [])
            variant.names.extend(record.ID.split(';') if record.ID else [])
            variant.quality = record.QUAL
            # PyVCF uses None for '.' and an empty list for 'PASS'.
            if record.FILTER is not None:
                variant.filters.extend(
                    record.FILTER if record.FILTER else [PASS_FILTER])
            for k, v in iteritems(record.INFO):
                # Special case: END info value specifies end of the record, so adjust
                # variant.end and do not include it as part of variant.info.
                if k == END_INFO_KEY:
                    variant.end = v
                    continue
                field_count = None
                if k in infos:
                    field_count = self._get_field_count_as_string(infos[k].num)
                variant.info[k] = VariantInfo(data=v, field_count=field_count)
            for sample in record.samples:
                call = VariantCall()
                call.name = sample.sample
                for allele in sample.gt_alleles or [MISSING_GENOTYPE_VALUE]:
                    if allele is None:
                        allele = MISSING_GENOTYPE_VALUE
                    call.genotype.append(int(allele))
                phaseset_from_format = (getattr(
                    sample.data, PHASESET_FORMAT_KEY) if PHASESET_FORMAT_KEY
                                        in sample.data._fields else None)
                # Note: Call is considered phased if it contains the 'PS' key regardless
                # of whether it uses '|'.
                if phaseset_from_format or sample.phased:
                    call.phaseset = (str(phaseset_from_format)
                                     if phaseset_from_format else
                                     DEFAULT_PHASESET_VALUE)
                for field in sample.data._fields:
                    # Genotype and phaseset (if present) are already included.
                    if field in (GENOTYPE_FORMAT_KEY, PHASESET_FORMAT_KEY):
                        continue
                    data = getattr(sample.data, field)
                    # Convert single values to a list for cases where the number of fields
                    # is unknown. This is to ensure consistent types across all records.
                    # Note: this is already done for INFO fields in PyVCF.
                    if (field in formats
                            and formats[field].num is None and isinstance(
                                data, (int, float, long, str, unicode, bool))):
                        data = [data]
                    call.info[field] = data
                variant.calls.append(call)
            return variant
Ejemplo n.º 54
0
    raise ValueError("Unable to parse client id from session_id: %s" %
                     session_id)


status_map = {
    rdf_flows.GrrStatus.ReturnedStatus.OK: FlowStatus.Status.OK,
    rdf_flows.GrrStatus.ReturnedStatus.IOERROR: FlowStatus.Status.IOERROR,
    rdf_flows.GrrStatus.ReturnedStatus.CLIENT_KILLED:
    FlowStatus.Status.CLIENT_KILLED,
    rdf_flows.GrrStatus.ReturnedStatus.NETWORK_LIMIT_EXCEEDED:
    FlowStatus.Status.NETWORK_LIMIT_EXCEEDED,
    rdf_flows.GrrStatus.ReturnedStatus.GENERIC_ERROR: FlowStatus.Status.ERROR,
}

inv_status_map = {v: k for k, v in iteritems(status_map)}


def FlowResponseForLegacyResponse(legacy_msg):
    """Helper function to convert legacy client replies to flow responses."""
    if legacy_msg.type == legacy_msg.Type.MESSAGE:
        response = FlowResponse(client_id=_ClientIDFromSessionID(
            legacy_msg.session_id),
                                flow_id=legacy_msg.session_id.Basename(),
                                request_id=legacy_msg.request_id,
                                response_id=legacy_msg.response_id,
                                payload=legacy_msg.payload)
    elif legacy_msg.type == legacy_msg.Type.STATUS:
        legacy_status = legacy_msg.payload
        if legacy_status.status not in status_map:
            raise ValueError("Unable to convert returned status: %s" %
Ejemplo n.º 55
0
 def _list(self, dir_or_prefix):
   for path, size in iteritems(self._files):
     if path.startswith(dir_or_prefix):
       yield FileMetadata(path, size)
Ejemplo n.º 56
0
    def init_flat_sio(self,
                      cid,
                      sio,
                      data_format,
                      transport,
                      wsgi_environ,
                      required_list,
                      _sio_container=(tuple, list)):
        """ Initializes flat SIO requests, i.e. not list ones.
        """
        self.is_xml = data_format == SIMPLE_IO.FORMAT.XML
        self.data_format = data_format
        self.transport = transport
        self._wsgi_environ = wsgi_environ

        optional_list = getattr(sio, 'input_optional', [])
        optional_list = optional_list if isinstance(
            optional_list, _sio_container) else [optional_list]

        path_prefix = getattr(sio, 'request_elem', 'request')
        default_value = getattr(sio, 'default_value', NO_DEFAULT_VALUE)
        use_text = getattr(sio, 'use_text', True)
        use_channel_params_only = getattr(sio, 'use_channel_params_only',
                                          False)
        self.encrypt_secrets = getattr(sio, 'encrypt_secrets', True)

        if self.simple_io_config:
            self.has_simple_io_config = True
            self.bool_parameter_prefixes = self.simple_io_config.get(
                'bool_parameter_prefixes', [])
            self.int_parameters = self.simple_io_config.get(
                'int_parameters', [])
            self.int_parameter_suffixes = self.simple_io_config.get(
                'int_parameter_suffixes', [])
            self.bytes_to_str_encoding = self.simple_io_config['bytes_to_str'][
                'encoding']
        else:
            self.payload = self.raw_request

        required_params = {}

        if required_list:

            required_list = required_list if isinstance(
                required_list, _sio_container) else [required_list]

            # Needs to check for this exact default value to prevent a FutureWarning in 'if not self.payload'
            if self.payload == '' and not self.channel_params:
                raise ZatoException(cid, 'Missing input')

            required_params.update(
                self.get_params(required_list, use_channel_params_only,
                                path_prefix, default_value, use_text))

        if optional_list:
            optional_params = self.get_params(optional_list,
                                              use_channel_params_only,
                                              path_prefix, default_value,
                                              use_text, False)
        else:
            optional_params = {}

        self.input.update(required_params)
        self.input.update(optional_params)

        for param, value in iteritems(self.channel_params):
            if param not in self.input:
                self.input[param] = value
Ejemplo n.º 57
0
 def clean_up(self, servermap):
     for ip, port in iteritems(servermap):
         result = ClientGen.call(ip, port, 'STOP')
         log.info('stop server @{}:{}, {}'.format(ip, port, result))
Ejemplo n.º 58
0
def dictionaries_eq(a, b):
    return set(iteritems(a)) == set(iteritems(b))
Ejemplo n.º 59
0
def build_shoe_diagram(config):
    builder = DiagramBuilder()

    station = builder.AddSystem(ManipulationDiagram(config))
    station.add_rope_and_ground(include_ground=False)
    if 'arms' in config['env']:
        station.add_arms_from_config(config)
    parser = Parser(station.mbp, station.sg)
    shoe_dir = os.path.dirname(os.path.abspath(__file__))
    model_file = os.path.join(shoe_dir, "model/shoe.sdf")
    shoe_model = parser.AddModelFromFile(model_file, "shoe")
    if config["env"]["visualization"]:
        station.connect_to_drake_visualizer()
    visualizer = None
    if "meshcat" in config["env"] and config["env"]["meshcat"]:
        visualizer = station.connect_to_meshcat()
    if config["env"]["parameterization"] == "closed":
        left_rope_point = station.add_vis_object("left_rope", [1, 0, 0, 1])
        right_rope_point = station.add_vis_object("right_rope", [0, 1, 0, 1])
        left_target_point = station.add_vis_object("left_target_point",
                                                   [1, 0, 0, 1])
        right_target_point = station.add_vis_object("right_target_point",
                                                    [0, 1, 0, 1])
    if config["env"]["rgbd_sensors"]["enabled"]:
        station.add_rgbd_sensors_from_config(config)

    station.finalize()

    post_finalize_rope_settings(config, station.mbp, station.sg)

    targets = {}

    if 'arms' in config['env']:
        gripper_info = {}
        for arm_name, arm_config in iteritems(config['env']['arms']):
            # Add PID Control
            gripper = station.mbp.GetBodyByName("body",
                                                station.model_ids[arm_name])
            gripper_info[arm_name] = gripper.index()

            # Initialize targets from file
            init = config["env"]["arms"][arm_name]["rpy"][:]
            init.extend(config["env"]["arms"][arm_name]["pos"])
            targets[arm_name] = builder.AddSystem(ConstantVectorSource(init))
            width_init = config["env"]["arms"][arm_name]["grip"]
            targets[f"{arm_name}_width"] = builder.AddSystem(
                ConstantVectorSource([width_init]))
        pid = builder.AddSystem(SpatialHandController(gripper_info))
        builder.Connect(station.GetOutputPort(f"body_poses"),
                        pid.GetInputPort("body_positions"))
        sp_control = builder.AddSystem(
            SetpointController(
                gripper_info, {
                    "position": [0.005, 0.005, 0.005, 0.0003, 0.0003, 0.0003],
                    "width": 0.001
                }))
        for arm_name, arm_config in iteritems(config['env']['arms']):
            builder.Connect(targets[arm_name].get_output_port(0),
                            sp_control.GetInputPort(f"{arm_name}_target"))
            builder.Connect(sp_control.GetOutputPort(f"{arm_name}_setpoint"),
                            pid.GetInputPort(f"{arm_name}_desired"))
            builder.Connect(
                targets[f"{arm_name}_width"].get_output_port(0),
                sp_control.GetInputPort(f"{arm_name}_width_target"))
            builder.Connect(
                sp_control.GetOutputPort(f"{arm_name}_width_setpoint"),
                station.GetInputPort(f"{arm_name}_position"))
        builder.Connect(pid.GetOutputPort("spatial_forces_vector"),
                        station.GetInputPort("spatial_input"))
    diagram = builder.Build()

    simulator = Simulator(diagram)
    sim_context = simulator.get_mutable_context()
    station_context = diagram.GetMutableSubsystemContext(station, sim_context)

    systems = {
        "station": station,
        "targets": targets,
        "sp_control": sp_control,
        "pid": pid
    }
    if config["env"]["parameterization"] == "closed":
        systems["left_rope"] = left_rope_point
        systems["right_rope"] = right_rope_point
        systems["left_target_point"] = left_target_point
        systems["right_target_point"] = right_target_point
    if 'arms' in config['env']:
        values = {}
        for arm_name, arm_config in iteritems(config['env']['arms']):
            station.GetInputPort(f"{arm_name}_force_limit").FixValue(
                station_context, 40.)
    simulator.set_target_realtime_rate(config['env']['target_realtime_rate'])
    reset_simulator_from_config(config, simulator, diagram, systems)

    return simulator, diagram, systems, visualizer
Ejemplo n.º 60
0
    def get_datasources(self):
        """
        Get datasource or create it if it does not exist

        :return: None
        """
        headers = {"Authorization": "Bearer " + self.api_key}
        try:
            url = "%s://%s:%s/api/datasources" % (self.scheme, self.host,
                                                  self.port)
            response = requests.get(url, headers=headers, timeout=10)
        except requests.exceptions.SSLError as e:
            print("[cron_grafana] SSL connection error to grafana %s: %s" %
                  (self.name, e))
            current_app.logger.error(
                "[cron_grafana] SSL connection error to grafana %s "
                "for dashboard creation: %s", self.name, e)
            return
        except requests.exceptions.RequestException as e:
            print("[cron_grafana] Connection error to grafana %s: %s" %
                  (self.name, e))
            current_app.logger.error(
                "[cron_grafana] Connection error to grafana %s "
                "for dashboard creation: %s", self.name, e)
            self.connection = False
            return
        resp = response.json()
        if 'message' in resp:
            print("----------")
            print("Grafana message: %s" % resp['message'])
            print("----------")
            return

        # get existing datasource in grafana
        self.datasources = {}
        for datasource in iter(resp):
            self.datasources[datasource['name']] = {
                'id': datasource['id'],
                'ts_id': None
            }

        # associate the datasource to timeseries data
        for _, timeserie in iteritems(self.timeseries):
            ds_name = 'alignak-' + timeserie['type'] + '-' + timeserie['name']
            if ds_name in self.datasources.keys():
                self.datasources[ds_name]['ts_id'] = str(timeserie['_id'])
                continue

            # Missing datasource, create it
            # Note that no created datasource is the default one
            if timeserie['type'] == 'influxdb':
                data = {
                    "name":
                    ds_name,
                    "type":
                    "influxdb",
                    "typeLogoUrl":
                    "",
                    "access":
                    "proxy",
                    "url":
                    "http://" + timeserie['address'] + ":" +
                    str(timeserie['port']),
                    "password":
                    timeserie['password'],
                    "user":
                    timeserie['login'],
                    "database":
                    timeserie['database'],
                    "basicAuth":
                    False,
                    "basicAuthUser":
                    "",
                    "basicAuthPassword":
                    "",
                    "withCredentials":
                    False,
                    "isDefault":
                    False,
                    "jsonData": {}
                }
            elif timeserie['type'] == 'graphite':
                data = {
                    "name":
                    ds_name,
                    "type":
                    "graphite",
                    "access":
                    "proxy",
                    "url":
                    "http://%s:%s" % (timeserie['graphite_address'],
                                      timeserie['graphite_port']),
                    "basicAuth":
                    False,
                    "basicAuthUser":
                    "",
                    "basicAuthPassword":
                    "",
                    "withCredentials":
                    False,
                    "isDefault":
                    False,
                    "jsonData": {}
                }

            # Request datasource creation
            response = requests.post(self.scheme + '://' + self.host + ':' +
                                     self.port + '/api/datasources',
                                     json=data,
                                     headers=headers)
            resp = response.json()
            # resp is as: {u'message': u'Datasource added', u'id': 4}
            if 'id' not in resp and 'message' in resp:
                current_app.logger.info("Grafana message: %s", resp['message'])
                return
            current_app.logger.info(
                "[grafana-%s] datasource created: '%s': id = %s", self.name,
                ds_name, resp['id'])
            self.datasources[ds_name] = {
                'id': resp['id'],
                'ts_id': str(timeserie['_id'])
            }

        current_app.logger.info("[grafana-%s] available datasources:",
                                self.name)
        for ds_name, datasource in iteritems(self.datasources):
            current_app.logger.info("- %s: %s", ds_name, datasource)