Esempio n. 1
0
  def _CheckOSStats(self, label, attribute, counts):

    fd = aff4.FACTORY.Open(
        "aff4:/stats/ClientFleetStats/%s" % label, token=self.token)
    histogram = fd.Get(attribute)

    # There should be counts[0] instances in 1 day actives.
    self.assertEqual(histogram[0].title, "1 day actives for %s label" % label)
    self.assertEqual(len(histogram[0]), counts[0])

    # There should be counts[1] instances in 7 day actives.
    self.assertEqual(histogram[1].title, "7 day actives for %s label" % label)
    self.assertEqual(len(histogram[1]), counts[1])

    # There should be counts[2] instances in 14 day actives for linux and
    # windows.
    self.assertEqual(histogram[2].title, "14 day actives for %s label" % label)
    all_labels = []
    for item in histogram[2]:
      all_labels.append(item.label)
      self.assertEqual(item.y_value, counts[2][item.label])
    self.assertItemsEqual(all_labels, list(iterkeys(counts[2])))

    # There should be counts[3] instances in 30 day actives for linux and
    # windows.
    self.assertEqual(histogram[3].title, "30 day actives for %s label" % label)
    all_labels = []
    for item in histogram[3]:
      all_labels.append(item.label)
      self.assertEqual(item.y_value, counts[3][item.label])
    self.assertItemsEqual(all_labels, list(iterkeys(counts[3])))
Esempio n. 2
0
    def __init__(self, json):
        self.raw = json
        super(JSON_Map, self).__init__(json)

        cls = self.__class__

        # Map the json structure to event dispatcher properties
        # but only those attributes which do not already exist in the object
        properties = JSON_Map.map_attributes(self, json)

        self._python_properties = set()
        for c in cls.__mro__:
            for attr_name, attr in iteritems(c.__dict__):
                if isinstance(attr, property):
                    self._python_properties.add(attr_name)

        self._json_maps = {}
        for attr_name, attr in iteritems(self.__dict__):
            if isinstance(attr, JSON_Map) and attr_name in json:
                self._json_maps[attr_name] = attr

        with self.temp_unbind_all(*iterkeys(self.event_dispatcher_properties)):
            for key in iterkeys(properties):
                if key in json:
                    setattr(self, key, json[key])
        self.bind(**{p: partial(self._update_raw , p) for p in properties})
Esempio n. 3
0
  def testIndexCreate(self):
    spacing = 10
    with utils.Stubber(sequential_collection.IndexedSequentialCollection,
                       "INDEX_SPACING", spacing):

      urn = "aff4:/sequential_collection/testIndexCreate"
      collection = self._TestCollection(urn)
      # TODO(amoser): Without using a mutation pool, this test is really
      # slow on MySQL data store.
      with data_store.DB.GetMutationPool() as pool:
        for i in range(10 * spacing):
          collection.StaticAdd(urn, rdfvalue.RDFInteger(i), mutation_pool=pool)

      # It is too soon to build an index, check that we don't.
      self.assertEqual(collection._index, None)
      self.assertEqual(collection.CalculateLength(), 10 * spacing)
      self.assertEqual(list(iterkeys(collection._index)), [0])

      now = time.time() * 1e6
      twenty_seconds_ago = (time.time() - 20) * 1e6

      # Push the clock forward 10m, and we should build an index on access.
      with test_lib.FakeTime(rdfvalue.RDFDatetime.Now() +
                             rdfvalue.Duration("10m")):
        # Read from start doesn't rebuild index (lazy rebuild)
        _ = collection[0]
        self.assertEqual(list(iterkeys(collection._index)), [0])

        self.assertEqual(collection.CalculateLength(), 10 * spacing)
        self.assertEqual(
            sorted(iterkeys(collection._index)),
            [i * spacing for i in xrange(10)])
        for index in collection._index:
          if not index:
            continue
          timestamp, suffix = collection._index[index]
          self.assertLessEqual(twenty_seconds_ago, timestamp)
          self.assertLessEqual(timestamp, now)
          self.assertTrue(0 <= suffix <= 0xFFFFFF)

      # Now check that the index was persisted to aff4 by re-opening
      # and checking that a read from head does load full index
      # (optimistic load):

      collection = self._TestCollection(
          "aff4:/sequential_collection/testIndexCreate")
      self.assertEqual(collection._index, None)
      _ = collection[0]
      self.assertEqual(
          sorted(iterkeys(collection._index)),
          [i * spacing for i in xrange(10)])
      for index in collection._index:
        if not index:
          continue
        timestamp, suffix = collection._index[index]
        self.assertLessEqual(twenty_seconds_ago, timestamp)
        self.assertLessEqual(timestamp, now)
        self.assertTrue(0 <= suffix <= 0xFFFFFF)
Esempio n. 4
0
  def WriteClientMetadata(self,
                          client_id,
                          certificate=None,
                          fleetspeak_enabled=None,
                          first_seen=None,
                          last_ping=None,
                          last_clock=None,
                          last_ip=None,
                          last_foreman=None,
                          cursor=None):
    """Write metadata about the client."""
    placeholders = []
    values = collections.OrderedDict()

    placeholders.append("%(client_id)s")
    values["client_id"] = db_utils.ClientIDToInt(client_id)

    if certificate:
      placeholders.append("%(certificate)s")
      values["certificate"] = certificate.SerializeToString()
    if fleetspeak_enabled is not None:
      placeholders.append("%(fleetspeak_enabled)s")
      values["fleetspeak_enabled"] = fleetspeak_enabled
    if first_seen is not None:
      placeholders.append("FROM_UNIXTIME(%(first_seen)s)")
      values["first_seen"] = mysql_utils.RDFDatetimeToTimestamp(first_seen)
    if last_ping is not None:
      placeholders.append("FROM_UNIXTIME(%(last_ping)s)")
      values["last_ping"] = mysql_utils.RDFDatetimeToTimestamp(last_ping)
    if last_clock:
      placeholders.append("FROM_UNIXTIME(%(last_clock)s)")
      values["last_clock"] = mysql_utils.RDFDatetimeToTimestamp(last_clock)
    if last_ip:
      placeholders.append("%(last_ip)s")
      values["last_ip"] = last_ip.SerializeToString()
    if last_foreman:
      placeholders.append("FROM_UNIXTIME(%(last_foreman)s)")
      values["last_foreman"] = mysql_utils.RDFDatetimeToTimestamp(last_foreman)

    updates = []
    for column in iterkeys(values):
      updates.append("{column} = VALUES({column})".format(column=column))

    query = """
    INSERT INTO clients ({columns})
    VALUES ({placeholders})
    ON DUPLICATE KEY UPDATE {updates}
    """.format(
        columns=", ".join(iterkeys(values)),
        placeholders=", ".join(placeholders),
        updates=", ".join(updates))

    cursor.execute(query, values)
Esempio n. 5
0
  def testExternalFileStoreSubmissionIsTriggeredWhenFileIsSentToFileStore(
      self, add_file_mock):
    if not data_store.RelationalDBEnabled():
      self.skipTest("Relational filestore has to be enabled for this test.")

    client_mock = action_mocks.GetFileClientMock()
    pathspec = rdf_paths.PathSpec(
        pathtype=rdf_paths.PathSpec.PathType.OS,
        path=os.path.join(self.base_path, "test_img.dd"))

    flow_test_lib.TestFlowHelper(
        compatibility.GetName(transfer.GetFile),
        client_mock,
        token=self.token,
        client_id=self.client_id,
        pathspec=pathspec)

    add_file_mock.assert_called_once()
    args = add_file_mock.call_args_list[0][0]
    hash_id = list(iterkeys(args[0]))[0]
    self.assertIsInstance(hash_id, rdf_objects.SHA256HashID)
    self.assertEqual(
        args[0][hash_id].client_path,
        db.ClientPath.FromPathSpec(self.client_id.Basename(), pathspec))
    self.assertNotEmpty(args[0][hash_id].blob_refs)
    for blob_ref in args[0][hash_id].blob_refs:
      self.assertIsInstance(blob_ref, rdf_objects.BlobReference)
Esempio n. 6
0
def ParseHeader(table):
  """Parses header of osquery output.

  Args:
    table: A table in a "parsed JSON" representation.

  Returns:
    A parsed `rdf_osquery.OsqueryHeader` instance.
  """
  precondition.AssertIterableType(table, dict)

  prototype = None  # type: List[Text]

  for row in table:
    columns = list(iterkeys(row))
    if prototype is None:
      prototype = columns
    elif prototype != columns:
      message = "Expected columns '{expected}', got '{actual}' for table {json}"
      message = message.format(expected=prototype, actual=columns, json=table)
      raise ValueError(message)

  result = rdf_osquery.OsqueryHeader()
  for name in prototype or []:
    result.columns.append(rdf_osquery.OsqueryColumn(name=name))
  return result
Esempio n. 7
0
  def testRunWMI(self):
    wmi_obj = self.windows.win32com.client.GetObject.return_value
    mock_query_result = mock.MagicMock()
    mock_query_result.Properties_ = []
    mock_config = client_test_lib.WMIWin32NetworkAdapterConfigurationMock
    wmi_properties = iteritems(mock_config.__dict__)
    for key, value in wmi_properties:
      keyval = mock.MagicMock()
      keyval.Name, keyval.Value = key, value
      mock_query_result.Properties_.append(keyval)

    wmi_obj.ExecQuery.return_value = [mock_query_result]

    result_list = list(self.windows.RunWMIQuery("select blah"))
    self.assertEqual(len(result_list), 1)

    result = result_list.pop()
    self.assertTrue(isinstance(result, rdf_protodict.Dict))
    nest = result["NestingTest"]

    self.assertEqual(nest["one"]["two"], [3, 4])
    self.assertTrue("Unsupported type" in nest["one"]["broken"])
    self.assertTrue(isinstance(nest["one"]["three"], rdf_protodict.Dict))

    self.assertEqual(nest["four"], [])
    self.assertEqual(nest["five"], "astring")
    self.assertEqual(nest["six"], [None, None, ""])
    self.assertEqual(nest["seven"], None)
    self.assertItemsEqual(iterkeys(nest["rdfvalue"]), ["a"])

    self.assertEqual(result["GatewayCostMetric"], [0, 256])
    self.assertTrue(isinstance(result["OpaqueObject"], basestring))
    self.assertTrue("Unsupported type" in result["OpaqueObject"])
Esempio n. 8
0
  def StoreBlobs(self, contents, token=None):
    """Creates or overwrites blobs."""

    contents_by_digest = {
        hashlib.sha256(content).hexdigest(): content for content in contents
    }

    urns = {self._BlobUrn(digest): digest for digest in contents_by_digest}

    mutation_pool = data_store.DB.GetMutationPool()

    existing = aff4.FACTORY.MultiOpen(
        urns, aff4_type=aff4.AFF4MemoryStreamBase, mode="r", token=token)

    for blob_urn, digest in iteritems(urns):
      if blob_urn in existing:
        logging.debug("Blob %s already stored.", digest)
        continue

      fd = aff4.FACTORY.Create(
          blob_urn,
          aff4.AFF4UnversionedMemoryStream,
          mode="w",
          token=token,
          mutation_pool=mutation_pool)
      content = contents_by_digest[digest]
      fd.Write(content)
      fd.Close()

      logging.debug("Got blob %s (length %s)", digest, len(content))

    mutation_pool.Flush()

    return list(iterkeys(contents_by_digest))
Esempio n. 9
0
File: build.py Progetto: google/grr
  def WriteBuildYaml(self, fd, build_timestamp=True):
    """Write build spec to fd."""
    output = {
        "Client.build_environment":
            rdf_client.Uname.FromCurrentSystem().signature(),
        "Template.build_type":
            config.CONFIG.Get("ClientBuilder.build_type", context=self.context),
        "Template.version_major":
            config.CONFIG.Get("Source.version_major", context=self.context),
        "Template.version_minor":
            config.CONFIG.Get("Source.version_minor", context=self.context),
        "Template.version_revision":
            config.CONFIG.Get("Source.version_revision", context=self.context),
        "Template.version_release":
            config.CONFIG.Get("Source.version_release", context=self.context),
        "Template.arch":
            config.CONFIG.Get("Client.arch", context=self.context)
    }

    if build_timestamp:
      output["Client.build_time"] = rdfvalue.RDFDatetime.Now()
    else:
      self.REQUIRED_BUILD_YAML_KEYS.remove("Client.build_time")

    for key, value in iteritems(output):
      output[key] = str(value)

    output["Template.build_context"] = self.context

    output_keys = set(iterkeys(output))
    if output_keys != self.REQUIRED_BUILD_YAML_KEYS:
      raise RuntimeError("Bad build.yaml: expected %s, got %s" %
                         (self.REQUIRED_BUILD_YAML_KEYS, output_keys))
    fd.write(yaml.Dump(output).encode("utf-8"))
Esempio n. 10
0
    def emit(self, event, *args, **kw):
        """
        Sends a Socket.IO ``event`` packet with the equivalent of
        ``[ event ] + args`` as the ``packet_obj`` argument.

        :param str event: the event name

        :param *args: optional event arguments

        :param str path: this parameter can only be provided as a keyword
            argument; if provided it will set the path for the ``event``
            packet

        :param callable callback: this parameter can only be provided as a
            keyword argument; if provided, it will be called with the
            results of any ``ack`` packet received that corresponds to the
            transmitted ``event`` packet (note: whether or not ``acks``
            are sent in response to ``event`` packets is application-
            specific; if no ``ack`` is sent, ``callback`` will never be
            called)

        :returns: a :class:`twisted.internet.defer.Deferred` whose
            callback is fired with a `None` argument after the packet is
            sent
        """
        if set(kw).difference(( 'callback', 'path' )):
            raise TypeError('unexpected keyword argument(s): {}'.format(', '.join(iterkeys(kw))))

        callback = kw.get('callback', None)
        path = kw.get('path', '/')
        args_obj = [ event ]
        args_obj.extend(args)

        return self.sendsiopacket(SIO_TYPE_EVENT, args_obj, packet_path=path, ack_callback=callback)
Esempio n. 11
0
  def _PostRequest( self, request, request_data ):
    """Send a raw request with the supplied request block, and
    return the server's response. If the server is not running, it is started.

    This method is useful where the query block is not supplied, i.e. where just
    the files are being updated.

    The request block should contain the optional query block only. The file
    data are added automatically."""

    if not self._ServerIsRunning():
      raise ValueError( 'Not connected to server' )

    def MakeIncompleteFile( name, file_data ):
      return {
        'type': 'full',
        'name': name,
        'text': file_data[ 'contents' ],
      }

    file_data = request_data.get( 'file_data', {} )

    full_request = {
      'files': [ MakeIncompleteFile( x, file_data[ x ] )
                 for x in iterkeys( file_data ) ],
    }
    full_request.update( request )

    response = requests.post( self._GetServerAddress(),
                              json = full_request )

    if response.status_code != http.client.OK:
      raise RuntimeError( response.text )

    return response.json()
Esempio n. 12
0
def expand_memoize_args(kwargs):
    global memoize_default_options
    kwargs = set_defaults(kwargs, memoize_default_options)
    if any(x not in memoize_default_options for x in iterkeys(kwargs)):
        raise TypeError("Received unexpected arguments to @memoize")

    return kwargs
Esempio n. 13
0
  def ReadPathInfosHistories(self, client_id, path_type, components_list):
    """Reads a collection of hash and stat entries for given paths."""
    results = {}

    for components in components_list:
      try:
        path_record = self.path_records[(client_id, path_type, components)]
      except KeyError:
        results[components] = []
        continue

      entries_by_ts = {}
      for ts, stat_entry in path_record.GetStatEntries():
        pi = rdf_objects.PathInfo(
            path_type=path_type,
            components=components,
            timestamp=ts,
            stat_entry=stat_entry)
        entries_by_ts[ts] = pi

      for ts, hash_entry in path_record.GetHashEntries():
        try:
          pi = entries_by_ts[ts]
        except KeyError:
          pi = rdf_objects.PathInfo(
              path_type=path_type, components=components, timestamp=ts)
          entries_by_ts[ts] = pi

        pi.hash_entry = hash_entry

      results[components] = [
          entries_by_ts[k] for k in sorted(iterkeys(entries_by_ts))
      ]

    return results
Esempio n. 14
0
 def _call_hmset(self, command, *args, **kwargs):
     if self.indexable:
         current = self.proxy_get()
         _to_deindex = dict((k, current[k]) for k in iterkeys(kwargs) if k in current)
         self.deindex(_to_deindex)
         self.index(kwargs)
     return self._traverse_command(command, kwargs)
Esempio n. 15
0
    def delete(self, query, commit=True):
        """
        Delete documents in a collection.

        :param query: query parameters. Here `query` can be a :class:`~solrcloudpy.parameters.SearchOptions` instance, or a dictionary
        :type query: SearchOptions
        :type query: dict
        :param commit: whether to commit the change or not
        :type commit: bool
        :return: the response
        :rtype: SolrResponse
        :raise: SolrException
        """
        if 'q' not in iterkeys(query):
            raise ValueError("query should have a 'q' parameter")

        if hasattr(query, 'commonparams'):
            q = list(query.commonparams['q'])
            q = q[0]
        else:
            q = query['q']

        m = json.dumps({"delete": {"query": "%s" % q}})

        response = self._update(m)
        if commit:
            self.commit()
        return response
Esempio n. 16
0
File: client.py Progetto: bhyvex/grr
def main(unused_args):
  # Allow per platform configuration.
  config.CONFIG.AddContext(contexts.CLIENT_CONTEXT,
                           "Context applied when we run the client process.")

  client_startup.ClientInit()

  if flags.FLAGS.install:
    installer.RunInstaller()

  errors = config.CONFIG.Validate(["Client", "CA", "Logging"])

  if errors and list(iterkeys(errors.keys)) != ["Client.private_key"]:
    raise config_lib.ConfigFormatError(errors)

  if config.CONFIG["Client.fleetspeak_enabled"]:
    raise ValueError(
        "This is not a Fleetspeak client, yet 'Client.fleetspeak_enabled' is "
        "set to 'True'.")

  enrollment_necessary = not config.CONFIG.Get("Client.private_key")
  # Instantiating the client will create a private_key so we need to use a flag.
  client = comms.GRRHTTPClient(
      ca_cert=config.CONFIG["CA.certificate"],
      private_key=config.CONFIG.Get("Client.private_key", default=None))

  if enrollment_necessary:
    logging.info("No private key found, starting enrollment.")
    client.InitiateEnrolment()

  if flags.FLAGS.break_on_start:
    pdb.set_trace()
  else:
    client.Run()
Esempio n. 17
0
def ReplaceChunks( chunks ):
  """Apply the source file deltas supplied in |chunks| to arbitrary files.
  |chunks| is a list of changes defined by ycmd.responses.FixItChunk,
  which may apply arbitrary modifications to arbitrary files.

  If a file specified in a particular chunk is not currently open in a visible
  buffer (i.e., one in a window visible in the current tab), we:
    - issue a warning to the user that we're going to open new files (and offer
      her the option to abort cleanly)
    - open the file in a new split, make the changes, then hide the buffer.

  If for some reason a file could not be opened or changed, raises RuntimeError.
  Otherwise, returns no meaningful value."""

  # We apply the edits file-wise for efficiency, and because we must track the
  # file-wise offset deltas (caused by the modifications to the text).
  chunks_by_file = _SortChunksByFile( chunks )

  # We sort the file list simply to enable repeatable testing
  sorted_file_list = sorted( iterkeys( chunks_by_file ) )

  # Make sure the user is prepared to have her screen mutilated by the new
  # buffers
  num_files_to_open = _GetNumNonVisibleFiles( sorted_file_list )

  if num_files_to_open > 0:
    if not Confirm(
            FIXIT_OPENING_BUFFERS_MESSAGE_FORMAT.format( num_files_to_open ) ):
      return

  # Store the list of locations where we applied changes. We use this to display
  # the quickfix window showing the user where we applied changes.
  locations = []

  for filepath in sorted_file_list:
    ( buffer_num, close_window ) = _OpenFileInSplitIfNeeded( filepath )

    ReplaceChunksInBuffer( chunks_by_file[ filepath ],
                           vim.buffers[ buffer_num ],
                           locations )

    # When opening tons of files, we don't want to have a split for each new
    # file, as this simply does not scale, so we open the window, make the
    # edits, then hide the window.
    if close_window:
      # Some plugins (I'm looking at you, syntastic) might open a location list
      # for the window we just opened. We don't want that location list hanging
      # around, so we close it. lclose is a no-op if there is no location list.
      vim.command( 'lclose' )

      # Note that this doesn't lose our changes. It simply "hides" the buffer,
      # which can later be re-accessed via the quickfix list or `:ls`
      vim.command( 'hide' )

  # Open the quickfix list, populated with entries for each location we changed.
  if locations:
    SetQuickFixList( locations )

  PostVimMessage( 'Applied {0} changes'.format( len( chunks ) ),
                  warning = False )
Esempio n. 18
0
def ReplaceChunks(chunks):
    """Apply the source file deltas supplied in |chunks| to arbitrary files.
  |chunks| is a list of changes defined by ycmd.responses.FixItChunk,
  which may apply arbitrary modifications to arbitrary files.

  If a file specified in a particular chunk is not currently open in a visible
  buffer (i.e., one in a window visible in the current tab), we:
    - issue a warning to the user that we're going to open new files (and offer
      her the option to abort cleanly)
    - open the file in a new split, make the changes, then hide the buffer.

  If for some reason a file could not be opened or changed, raises RuntimeError.
  Otherwise, returns no meaningful value."""

    # We apply the edits file-wise for efficiency, and because we must track the
    # file-wise offset deltas (caused by the modifications to the text).
    chunks_by_file = _SortChunksByFile(chunks)

    # We sort the file list simply to enable repeatable testing
    sorted_file_list = sorted(iterkeys(chunks_by_file))

    # Make sure the user is prepared to have her screen mutilated by the new
    # buffers
    num_files_to_open = _GetNumNonVisibleFiles(sorted_file_list)

    if num_files_to_open > 0:
        if not Confirm(
                FIXIT_OPENING_BUFFERS_MESSAGE_FORMAT.format(
                    num_files_to_open)):
            return

    # Store the list of locations where we applied changes. We use this to display
    # the quickfix window showing the user where we applied changes.
    locations = []

    for filepath in sorted_file_list:
        (buffer_num, close_window) = _OpenFileInSplitIfNeeded(filepath)

        ReplaceChunksInBuffer(chunks_by_file[filepath],
                              vim.buffers[buffer_num], locations)

        # When opening tons of files, we don't want to have a split for each new
        # file, as this simply does not scale, so we open the window, make the
        # edits, then hide the window.
        if close_window:
            # Some plugins (I'm looking at you, syntastic) might open a location list
            # for the window we just opened. We don't want that location list hanging
            # around, so we close it. lclose is a no-op if there is no location list.
            vim.command('lclose')

            # Note that this doesn't lose our changes. It simply "hides" the buffer,
            # which can later be re-accessed via the quickfix list or `:ls`
            vim.command('hide')

    # Open the quickfix list, populated with entries for each location we changed.
    if locations:
        SetQuickFixList(locations)

    PostVimMessage('Applied {0} changes'.format(len(chunks)), warning=False)
Esempio n. 19
0
 def MultiWritePathInfos(self, path_infos):
     """Writes a collection of path info records for specified clients."""
     try:
         self._MultiWritePathInfos(path_infos)
     except MySQLdb.IntegrityError as error:
         client_ids = list(iterkeys(path_infos))
         raise db.AtLeastOneUnknownClientError(client_ids=client_ids,
                                               cause=error)
Esempio n. 20
0
 def _call_hmset(self, command, *args, **kwargs):
     if self.indexable:
         current = self.proxy_get()
         _to_deindex = dict(
             (k, current[k]) for k in iterkeys(kwargs) if k in current)
         self.deindex(_to_deindex)
         self.index(kwargs)
     return self._traverse_command(command, kwargs)
Esempio n. 21
0
    def Handle(self, unused_args, token=None):
        _ = token

        result = ApiListAff4AttributeDescriptorsResult()
        for name in sorted(iterkeys(aff4.Attribute.NAMES)):
            result.items.append(ApiAff4AttributeDescriptor(name=name))

        return result
Esempio n. 22
0
 def _InsertValueIntoDb(self, table_name, schema, value, db_cursor):
   sql_dict = self._ConvertToCanonicalSqlDict(schema, value.ToPrimitiveDict())
   buf = io.StringIO()
   buf.write(u"INSERT INTO \"%s\" (\n  " % table_name)
   buf.write(u",\n  ".join(["\"%s\"" % k for k in iterkeys(sql_dict)]))
   buf.write(u"\n)")
   buf.write(u"VALUES (%s);" % u",".join([u"?"] * len(sql_dict)))
   db_cursor.execute(buf.getvalue(), list(itervalues(sql_dict)))
Esempio n. 23
0
 def _InsertValueIntoDb(self, table_name, schema, value, db_cursor):
   sql_dict = self._ConvertToCanonicalSqlDict(schema, value.ToPrimitiveDict())
   buf = io.StringIO()
   buf.write(u"INSERT INTO \"%s\" (\n  " % table_name)
   buf.write(u",\n  ".join(["\"%s\"" % k for k in iterkeys(sql_dict)]))
   buf.write(u"\n)")
   buf.write(u"VALUES (%s);" % u",".join([u"?"] * len(sql_dict)))
   db_cursor.execute(buf.getvalue(), list(itervalues(sql_dict)))
  def testAllOtherMethodsAreNotAccessChecked(self):
    unchecked_methods = (
        set(iterkeys(self.router.__class__.GetAnnotatedMethods())) - set(
            self.ACCESS_CHECKED_METHODS))
    self.assertTrue(unchecked_methods)

    for method_name in unchecked_methods:
      self.CheckMethodIsNotAccessChecked(getattr(self.router, method_name))
Esempio n. 25
0
 def LoadChecks(self):
   """Load the checks, returning the names of the checks that were loaded."""
   checks.CheckRegistry.Clear()
   check_configs = ("sshd.yaml", "sw.yaml", "unix_login.yaml")
   cfg_dir = os.path.join(config.CONFIG["Test.data_dir"], "checks")
   chk_files = [os.path.join(cfg_dir, f) for f in check_configs]
   checks.LoadChecksFromFiles(chk_files)
   return list(iterkeys(checks.CheckRegistry.checks))
Esempio n. 26
0
def Poll_Diagnostics_ProjectWide_Eclipse_test(app):
    filepath = TestLauncher
    contents = ReadFile(filepath)

    # Poll until we receive _all_ the diags asynchronously
    to_see = sorted(iterkeys(DIAG_MATCHERS_PER_FILE))
    seen = dict()

    try:
        for message in PollForMessages(app, {
                'filepath': filepath,
                'contents': contents
        }):
            print('Message {0}'.format(pformat(message)))
            if 'diagnostics' in message:
                seen[message['filepath']] = True
                if message['filepath'] not in DIAG_MATCHERS_PER_FILE:
                    raise AssertionError(
                        'Received diagnostics for unexpected file {0}. '
                        'Only expected {1}'.format(message['filepath'],
                                                   to_see))
                assert_that(
                    message,
                    has_entries({
                        'diagnostics':
                        DIAG_MATCHERS_PER_FILE[message['filepath']],
                        'filepath':
                        message['filepath']
                    }))

            if sorted(iterkeys(seen)) == to_see:
                break
            else:
                print('Seen diagnostics for {0}, still waiting for {1}'.format(
                    json.dumps(sorted(iterkeys(seen)), indent=2),
                    json.dumps([x for x in to_see if x not in seen],
                               indent=2)))

            # Eventually PollForMessages will throw a timeout exception and we'll fail
            # if we don't see all of the expected diags
    except PollForMessagesTimeoutException as e:
        raise AssertionError(
            str(e) + 'Timed out waiting for full set of diagnostics. '
            'Expected to see diags for {0}, but only saw {1}.'.format(
                json.dumps(to_see, indent=2),
                json.dumps(sorted(iterkeys(seen)), indent=2)))
Esempio n. 27
0
 def LoadChecks(self):
     """Load the checks, returning the names of the checks that were loaded."""
     checks.CheckRegistry.Clear()
     check_configs = ("sshd.yaml", "sw.yaml", "unix_login.yaml")
     cfg_dir = os.path.join(config.CONFIG["Test.data_dir"], "checks")
     chk_files = [os.path.join(cfg_dir, f) for f in check_configs]
     checks.LoadChecksFromFiles(chk_files)
     return list(iterkeys(checks.CheckRegistry.checks))
Esempio n. 28
0
  def Handle(self, unused_args, token=None):
    _ = token

    result = ApiListAff4AttributeDescriptorsResult()
    for name in sorted(iterkeys(aff4.Attribute.NAMES)):
      result.items.append(ApiAff4AttributeDescriptor(name=name))

    return result
Esempio n. 29
0
def ScheduleSystemCronFlows(names=None, token=None):
  """Schedule all the SystemCronFlows found."""

  if data_store.RelationalDBReadEnabled(category="cronjobs"):
    return cronjobs.ScheduleSystemCronJobs(names=names)

  errors = []
  for name in config.CONFIG["Cron.disabled_system_jobs"]:
    try:
      cls = registry.FlowRegistry.FlowClassByName(name)
    except ValueError:
      errors.append("No such flow: %s." % name)
      continue

    if not aff4.issubclass(cls, SystemCronFlow):
      errors.append("Disabled system cron job name doesn't correspond to "
                    "a flow inherited from SystemCronFlow: %s" % name)

  if names is None:
    names = iterkeys(registry.FlowRegistry.FLOW_REGISTRY)

  for name in names:
    cls = registry.FlowRegistry.FlowClassByName(name)

    if not aff4.issubclass(cls, SystemCronFlow):
      continue

    cron_args = rdf_cronjobs.CreateCronJobFlowArgs(
        periodicity=cls.frequency,
        lifetime=cls.lifetime,
        allow_overruns=cls.allow_overruns)
    cron_args.flow_runner_args.flow_name = name

    if cls.enabled:
      enabled = name not in config.CONFIG["Cron.disabled_system_jobs"]
    else:
      enabled = False

    job_urn = CronManager.CRON_JOBS_PATH.Add(name)
    with aff4.FACTORY.Create(
        job_urn,
        aff4_type=CronJob,
        mode="rw",
        token=token,
        force_new_version=False) as cron_job:

      # If the cronjob was already present we don't want to overwrite the
      # original start_time.
      existing_cron_args = cron_job.Get(cron_job.Schema.CRON_ARGS)

      if cron_args != existing_cron_args:
        cron_job.Set(cron_job.Schema.CRON_ARGS(cron_args))

      cron_job.Set(cron_job.Schema.DISABLED(not enabled))

  if errors:
    raise ValueError(
        "Error(s) while parsing Cron.disabled_system_jobs: %s" % errors)
Esempio n. 30
0
def ScheduleSystemCronFlows(names=None, token=None):
  """Schedule all the SystemCronFlows found."""

  if data_store.RelationalDBEnabled():
    return cronjobs.ScheduleSystemCronJobs(names=names)

  errors = []
  for name in config.CONFIG["Cron.disabled_system_jobs"]:
    try:
      cls = registry.AFF4FlowRegistry.FlowClassByName(name)
    except ValueError:
      errors.append("No such flow: %s." % name)
      continue

    if not issubclass(cls, SystemCronFlow):
      errors.append("Disabled system cron job name doesn't correspond to "
                    "a flow inherited from SystemCronFlow: %s" % name)

  if names is None:
    names = iterkeys(registry.AFF4FlowRegistry.FLOW_REGISTRY)

  for name in names:
    cls = registry.AFF4FlowRegistry.FlowClassByName(name)

    if not issubclass(cls, SystemCronFlow):
      continue

    cron_args = rdf_cronjobs.CreateCronJobFlowArgs(
        periodicity=cls.frequency,
        lifetime=cls.lifetime,
        allow_overruns=cls.allow_overruns)
    cron_args.flow_runner_args.flow_name = name

    if cls.enabled:
      enabled = name not in config.CONFIG["Cron.disabled_system_jobs"]
    else:
      enabled = False

    job_urn = CronManager.CRON_JOBS_PATH.Add(name)
    with aff4.FACTORY.Create(
        job_urn,
        aff4_type=CronJob,
        mode="rw",
        token=token,
        force_new_version=False) as cron_job:

      # If the cronjob was already present we don't want to overwrite the
      # original start_time.
      existing_cron_args = cron_job.Get(cron_job.Schema.CRON_ARGS)

      if cron_args != existing_cron_args:
        cron_job.Set(cron_job.Schema.CRON_ARGS(cron_args))

      cron_job.Set(cron_job.Schema.DISABLED(not enabled))

  if errors:
    raise ValueError("Error(s) while parsing Cron.disabled_system_jobs: %s" %
                     errors)
Esempio n. 31
0
  def _ValidateRequiredAttributes(self):
    required = set(self.TYPE_MAP[self.type].get("required_attributes", []))
    provided = set(iterkeys(self.attributes))
    missing = required.difference(provided)

    if missing:
      quoted = ("'%s'" % attribute for attribute in missing)
      detail = "missing required attributes: %s" % ", ".join(quoted)
      raise ArtifactSourceSyntaxError(self, detail)
Esempio n. 32
0
    def _unschedule(self, job):
        """ Actually unschedules a job. Must be called with self.lock held.
        """
        # The job could have been renamed so we need to unschedule it by the previous name, if there is one
        name = job.old_name if job.old_name else job.name
        found = False
        job.keep_running = False

        if name in iterkeys(self.jobs):
            del self.jobs[name]
            found = True

        if name in iterkeys(self.job_greenlets):
            self.job_greenlets[name].kill(block=False, timeout=2.0)
            del self.job_greenlets[name]
            found = True

        return found
Esempio n. 33
0
def test_metrics():
    actual = [1, 0, 0, 1, 1, 0, 0, 1, 0, 1]
    predicted = [0.8, 0.2, 0.6, 0.3, 0.1, 0.2, 0.3, 0.9, 0.2, 0.7]
    metrics = evaluate_metrics(actual, predicted)
    metrics = {key: round(metrics[key], 3) for key in iterkeys(metrics)}
    assert metrics['accuracy'] == 0.700
    assert metrics['avglogloss'] == 0.613
    assert metrics['auroc'] == 0.740
    assert metrics['F1'] == 0.667
Esempio n. 34
0
    def testAllOtherMethodsAreNotAccessChecked(self):
        unchecked_methods = (
            set(iterkeys(self.router.__class__.GetAnnotatedMethods())) -
            set(self.ACCESS_CHECKED_METHODS))
        self.assertTrue(unchecked_methods)

        for method_name in unchecked_methods:
            self.CheckMethodIsNotAccessChecked(
                getattr(self.router, method_name))
Esempio n. 35
0
  def _ValidateRequiredAttributes(self):
    required = set(self.TYPE_MAP[self.type].get("required_attributes", []))
    provided = set(iterkeys(self.attributes))
    missing = required.difference(provided)

    if missing:
      quoted = ("'%s'" % attribute for attribute in missing)
      detail = "missing required attributes: %s" % ", ".join(quoted)
      raise ArtifactSourceSyntaxError(self, detail)
Esempio n. 36
0
def Diagnostics_Poll_test(app):
    filepath = PathToTestFile('goto.go')
    contents = ReadFile(filepath)

    # Poll until we receive _all_ the diags asynchronously.
    to_see = sorted(iterkeys(DIAG_MATCHERS_PER_FILE))
    seen = {}

    try:
        for message in PollForMessages(app, {
                'filepath': filepath,
                'contents': contents,
                'filetype': 'go'
        }):
            if message['diagnostics'][0]['text'].endswith(
                    "is not part of a package"):
                continue
            print('Message {}'.format(pformat(message)))
            if 'diagnostics' in message:
                seen[message['filepath']] = True
                if message['filepath'] not in DIAG_MATCHERS_PER_FILE:
                    raise AssertionError(
                        'Received diagnostics for unexpected file {}. '
                        'Only expected {}'.format(message['filepath'], to_see))
                assert_that(
                    message,
                    has_entries({
                        'diagnostics':
                        DIAG_MATCHERS_PER_FILE[message['filepath']],
                        'filepath':
                        message['filepath']
                    }))

            if sorted(iterkeys(seen)) == to_see:
                break

            # Eventually PollForMessages will throw a timeout exception and we'll fail
            # if we don't see all of the expected diags.
    except PollForMessagesTimeoutException as e:
        raise AssertionError(
            str(e) + 'Timed out waiting for full set of diagnostics. '
            'Expected to see diags for {}, but only saw {}.'.format(
                json.dumps(to_see, indent=2),
                json.dumps(sorted(iterkeys(seen)), indent=2)))
Esempio n. 37
0
    def invoke_by_impl_name(self, impl_name, payload='', channel=CHANNEL.INVOKE, data_format=DATA_FORMAT.DICT,
        transport=None, serialize=False, as_bunch=False, timeout=None, raise_timeout=True, **kwargs):
        """ Invokes a service synchronously by its implementation name (full dotted Python name).
        """
        if self.component_enabled_target_matcher:

            orig_impl_name = impl_name
            impl_name, target = self.extract_target(impl_name)

            # It's possible we are being invoked through self.invoke or self.invoke_by_id
            target = target or kwargs.get('target', '')

            if not self._worker_store.target_matcher.is_allowed(target):
                raise ZatoException(self.cid, 'Invocation target `{}` not allowed ({})'.format(target, orig_impl_name))

        if self.component_enabled_invoke_matcher:
            if not self._worker_store.invoke_matcher.is_allowed(impl_name):
                raise ZatoException(self.cid, 'Service `{}` (impl_name) cannot be invoked'.format(impl_name))

        if self.impl_name == impl_name:
            msg = 'A service cannot invoke itself, name:[{}]'.format(self.name)
            self.logger.error(msg)
            raise ZatoException(self.cid, msg)

        service, is_active = self.server.service_store.new_instance(impl_name)
        if not is_active:
            raise Inactive(service.get_name())

        set_response_func = kwargs.pop('set_response_func', service.set_response_data)

        invoke_args = (set_response_func, service, payload, channel, data_format, transport, self.server,
            self.broker_client, self._worker_store, kwargs.pop('cid', self.cid), self.request.simple_io_config)

        kwargs.update({'serialize':serialize, 'as_bunch':as_bunch})

        try:
            if timeout:
                try:
                    g = spawn(self.update_handle, *invoke_args, **kwargs)
                    return g.get(block=True, timeout=timeout)
                except Timeout:
                    g.kill()
                    logger.warn('Service `%s` timed out (%s)', service.name, self.cid)
                    if raise_timeout:
                        raise
            else:
                out = self.update_handle(*invoke_args, **kwargs)
                if kwargs.get('skip_response_elem') and hasattr(out, 'keys'):
                    keys = list(iterkeys(out))
                    response_elem = keys[0]
                    return out[response_elem]
                else:
                    return out
        except Exception:
            logger.warn('Could not invoke `%s`, e:`%s`', service.name, format_exc())
            raise
Esempio n. 38
0
def main():
    """
    Evaluates your predictions. This loads the dev labels and your predictions, and then evaluates them, printing the
    results for a variety of metrics to the screen.
    """

    # test_metrics()

    parser = argparse.ArgumentParser(
        description='Duolingo shared task evaluation script')
    parser.add_argument('--pred', help='Predictions file name', required=True)
    parser.add_argument('--key', help='Labelled keys', required=True)

    args = parser.parse_args()

    r = re.compile(r'(.*)-(.*)-(.*)\.pred')
    filename = os.path.basename(args.pred)
    m = r.match(filename)
    data, train, val = m.groups()
    config_file = '/Users/jilljenn/code/tensorflow-DeepFM/data/last_{:s}/y_pred-{:s}-{:s}.config.json'.format(
        data, train, val)
    if os.path.isfile(config_file):
        with open(config_file) as f:
            config = json.load(f)
        print('learning_rate={:f} embedding_size={:d} nb_epochs={:d}'.format(
            config['dfm_params']['learning_rate'],
            config['dfm_params']['embedding_size'],
            config['dfm_params']['epoch']))
        if 'finished_at_epoch' in config:
            print('Finished at:', config.get('finished_at_epoch'))
    else:
        sys.exit(0)
        print('No config file for {:s}'.format(filename))

    assert os.path.isfile(args.pred)

    # print('\nLoading labels for exercises...')
    labels = load_labels(args.key)

    # print('Loading predictions for exercises...')
    predictions = load_labels(args.pred)

    actual = []
    predicted = []

    for instance_id in iterkeys(labels):
        try:
            actual.append(labels[instance_id])
            predicted.append(predictions[instance_id])
        except KeyError:
            print('No prediction for instance ID ' + instance_id + '!')

    metrics = evaluate_metrics(actual, predicted)
    line = '\t'.join([('%s=%.3f' % (metric, value))
                      for (metric, value) in iteritems(metrics)])
    print('Metrics:\t' + line)
Esempio n. 39
0
def test_metrics():
    actual = [1, 0, 0, 1, 1, 0, 0, 1, 0, 1]
    predicted = [0.8, 0.2, 0.6, 0.3, 0.1, 0.2, 0.3, 0.9, 0.2, 0.7]
    metrics = evaluate_metrics(actual, predicted)
    metrics = {key: round(metrics[key], 3) for key in iterkeys(metrics)}
    assert metrics['accuracy'] == 0.700
    assert metrics['avglogloss'] == 0.613
    assert metrics['auroc'] == 0.740
    assert metrics['F1'] == 0.667
    print('Verified that our environment is calculating metrics correctly.')
Esempio n. 40
0
 def serialize_as_normalized(self):
     children = []
     for k in sorted(iterkeys(self.data)):
         for child in self._normalize_buckets(self.data, k):
             children.append(child)
     return {
         "level": "root",
         "key": None,
         "value": None,
         "children": children
     }
Esempio n. 41
0
  def testEventMetricGetsRendered(self):
    stats.STATS.RegisterEventMetric("api_method_latency")
    stats.STATS.RecordEvent("api_method_latency", 15)

    varz_json = json.loads(stats_server.BuildVarzJsonString())
    self.assertEqual(varz_json["api_method_latency"]["info"],
                     {"metric_type": "EVENT",
                      "value_type": "DISTRIBUTION"})
    self.assertItemsEqual(
        iterkeys(varz_json["api_method_latency"]["value"]),
        ["sum", "bins_heights", "counter"])
Esempio n. 42
0
  def testAllOtherMethodsAreNotImplemented(self):
    router = self._CreateRouter()

    unchecked_methods = (
        set(iterkeys(router.__class__.GetAnnotatedMethods())) -
        set(self.IMPLEMENTED_METHODS))
    self.assertTrue(unchecked_methods)

    for method_name in unchecked_methods:
      with self.assertRaises(NotImplementedError):
        getattr(router, method_name)(None, token=self.token)
Esempio n. 43
0
    def initialize(self, networkName, dataNumber):
        '''
        Initialize case class
        '''
        self.networkName = networkName
        self.dataNumber = dataNumber

        self.locationOfInterestManager.initialize()

        for uqsaMethodName in iterkeys(self.uqsaMethods):
            print("Info classUQSACase 96: running ", uqsaMethodName)
Esempio n. 44
0
    def testAllOtherMethodsAreNotImplemented(self):
        router = self._CreateRouter()

        unchecked_methods = (
            set(iterkeys(router.__class__.GetAnnotatedMethods())) -
            set(self.IMPLEMENTED_METHODS))
        self.assertTrue(unchecked_methods)

        for method_name in unchecked_methods:
            with self.assertRaises(NotImplementedError):
                getattr(router, method_name)(None, token=self.token)
Esempio n. 45
0
    def testMetricWithMultipleFieldsGetsRendered(self):
        stats_collector_instance.Get().RecordEvent(
            "api_method_latency", 15, fields=["Foo", "http", "SUCCESS"])

        varz_json = json.loads(stats_server.BuildVarzJsonString())
        self.assertEqual(
            varz_json["api_method_latency"]["info"], {
                "metric_type":
                "EVENT",
                "value_type":
                "DISTRIBUTION",
                "fields_defs": [["method_name", "STR"], ["protocol", "STR"],
                                ["status", "STR"]]
            })

        api_method_latency_value = varz_json["api_method_latency"]["value"]
        self.assertEqual(list(iterkeys(api_method_latency_value)),
                         ["Foo:http:SUCCESS"])
        self.assertCountEqual(
            iterkeys(api_method_latency_value["Foo:http:SUCCESS"]),
            ["sum", "bins_heights", "counter"])
Esempio n. 46
0
def Poll_Diagnostics_ProjectWide_Eclipse_test( app ):
  filepath = TestLauncher
  contents = ReadFile( filepath )

  # Poll until we receive _all_ the diags asynchronously
  to_see = sorted( iterkeys( DIAG_MATCHERS_PER_FILE ) )
  seen = {}

  try:
    for message in PollForMessages( app,
                                    { 'filepath': filepath,
                                      'contents': contents } ):
      print( 'Message {0}'.format( pformat( message ) ) )
      if 'diagnostics' in message:
        seen[ message[ 'filepath' ] ] = True
        if message[ 'filepath' ] not in DIAG_MATCHERS_PER_FILE:
          raise AssertionError(
            'Received diagnostics for unexpected file {0}. '
            'Only expected {1}'.format( message[ 'filepath' ], to_see ) )
        assert_that( message, has_entries( {
          'diagnostics': DIAG_MATCHERS_PER_FILE[ message[ 'filepath' ] ],
          'filepath': message[ 'filepath' ]
        } ) )

      if sorted( iterkeys( seen ) ) == to_see:
        break
      else:
        print( 'Seen diagnostics for {0}, still waiting for {1}'.format(
          json.dumps( sorted( iterkeys( seen ) ), indent=2 ),
          json.dumps( [ x for x in to_see if x not in seen ], indent=2 ) ) )

      # Eventually PollForMessages will throw a timeout exception and we'll fail
      # if we don't see all of the expected diags
  except PollForMessagesTimeoutException as e:
    raise AssertionError(
      str( e ) +
      'Timed out waiting for full set of diagnostics. '
      'Expected to see diags for {0}, but only saw {1}.'.format(
        json.dumps( to_see, indent=2 ),
        json.dumps( sorted( iterkeys( seen ) ), indent=2 ) ) )
Esempio n. 47
0
def _CompileFilters(config):
    """Given a filter config dictionary, return a list of compiled filters"""
    filters = []

    for filter_type in iterkeys(config):
        compiler = FILTER_COMPILERS.get(filter_type)

        if compiler is not None:
            for filter_config in _ListOf(config[filter_type]):
                compiledFilter = compiler(filter_config)
                filters.append(compiledFilter)

    return filters
Esempio n. 48
0
    def testRSAPublicKeyFailure(self):
        """Deliberately try to parse an invalid public key."""
        config.CONFIG.Initialize(data="""
[Client]
executable_signing_public_key = -----BEGIN PUBLIC KEY-----
        GpJgTFkTIAgX0Ih5lxoFB5TUjUfJFbBkSmKQPRA/IyuLBtCLQgwkTNkCAwEAAQ==
        -----END PUBLIC KEY-----
""")
        config.CONFIG.context = []

        errors = config.CONFIG.Validate("Client")
        self.assertCountEqual(list(iterkeys(errors)),
                              ["Client.executable_signing_public_key"])
Esempio n. 49
0
def _CompileFilters( config ):
  """Given a filter config dictionary, return a list of compiled filters"""
  filters = []

  for filter_type in iterkeys( config ):
    compiler = FILTER_COMPILERS.get( filter_type )

    if compiler is not None:
      for filter_config in _ListOf( config[ filter_type ] ):
        compiledFilter = compiler( filter_config )
        filters.append( compiledFilter )

  return filters
Esempio n. 50
0
    def testInvalidX509Certificates(self):
        """Deliberately try to parse an invalid certificate."""
        config.CONFIG.Initialize(data="""
[Frontend]
certificate = -----BEGIN CERTIFICATE-----
        MIIDczCCAVugAwIBAgIJANdK3LO+9qOIMA0GCSqGSIb3DQEBCwUAMFkxCzAJBgNV
        uqnFquJfg8xMWHHJmPEocDpJT8Tlmbw=
        -----END CERTIFICATE-----
""")
        config.CONFIG.context = []

        errors = config.CONFIG.Validate("Frontend")
        self.assertCountEqual(list(iterkeys(errors)), ["Frontend.certificate"])
Esempio n. 51
0
  def testInvalidX509Certificates(self):
    """Deliberately try to parse an invalid certificate."""
    config.CONFIG.Initialize(data="""
[Frontend]
certificate = -----BEGIN CERTIFICATE-----
        MIIDczCCAVugAwIBAgIJANdK3LO+9qOIMA0GCSqGSIb3DQEBCwUAMFkxCzAJBgNV
        uqnFquJfg8xMWHHJmPEocDpJT8Tlmbw=
        -----END CERTIFICATE-----
""")
    config.CONFIG.context = []

    errors = config.CONFIG.Validate("Frontend")
    self.assertItemsEqual(list(iterkeys(errors)), ["Frontend.certificate"])
Esempio n. 52
0
  def testMultiReadStatsLimitsResultsByTimeRange(self):
    stats_collector_instance.Get().IncrementCounter("counter")
    self.stats_store.WriteStats(process_id="pid1", timestamp=42)
    self.stats_store.WriteStats(process_id="pid2", timestamp=42)
    self.stats_store.WriteStats(process_id="pid2", timestamp=44)

    stats_collector_instance.Get().IncrementCounter("counter")
    self.stats_store.WriteStats(process_id="pid1", timestamp=44)

    results = self.stats_store.MultiReadStats(timestamp=(43, 100))
    self.assertEqual(sorted(iterkeys(results)), ["pid1", "pid2"])
    self.assertEqual(results["pid1"]["counter"], [(2, 44)])
    self.assertEqual(results["pid2"]["counter"], [(1, 44)])
Esempio n. 53
0
  def testMultiReadClientsFullInfoFiltersClientsByLastPingTime(self):
    d = self.db

    base_time = rdfvalue.RDFDatetime.Now()
    cutoff_time = base_time - rdfvalue.Duration("1s")
    client_ids_to_ping = self._SetupLastPingClients(base_time)

    expected_client_ids = [
        cid for cid, ping in iteritems(client_ids_to_ping) if ping == base_time
    ]
    full_infos = d.MultiReadClientFullInfo(
        list(iterkeys(client_ids_to_ping)), min_last_ping=cutoff_time)
    self.assertItemsEqual(expected_client_ids, full_infos)
Esempio n. 54
0
  def testMultiReadStatsWorksCorrectly(self):
    stats_collector_instance.Get().IncrementCounter("counter")
    self.stats_store.WriteStats(process_id="pid1", timestamp=42)
    self.stats_store.WriteStats(process_id="pid2", timestamp=42)
    self.stats_store.WriteStats(process_id="pid2", timestamp=43)

    stats_collector_instance.Get().IncrementCounter("counter")
    self.stats_store.WriteStats(process_id="pid1", timestamp=43)

    results = self.stats_store.MultiReadStats()
    self.assertEqual(sorted(iterkeys(results)), ["pid1", "pid2"])
    self.assertEqual(results["pid1"]["counter"], [(1, 42), (2, 43)])
    self.assertEqual(results["pid2"]["counter"], [(1, 42), (1, 43)])
Esempio n. 55
0
  def testRSAPublicKeyFailure(self):
    """Deliberately try to parse an invalid public key."""
    config.CONFIG.Initialize(data="""
[Client]
executable_signing_public_key = -----BEGIN PUBLIC KEY-----
        GpJgTFkTIAgX0Ih5lxoFB5TUjUfJFbBkSmKQPRA/IyuLBtCLQgwkTNkCAwEAAQ==
        -----END PUBLIC KEY-----
""")
    config.CONFIG.context = []

    errors = config.CONFIG.Validate("Client")
    self.assertItemsEqual(
        list(iterkeys(errors)), ["Client.executable_signing_public_key"])
Esempio n. 56
0
    def can_invoke_admin_service(self):
        """ Returns a boolean flag indicating that we know what service to invoke, what cluster it is on
        and that all the required parameters were given in GET request. cluster_id doesn't have to be in GET,
        'cluster' will suffice.
        """
        input_elems = list(iterkeys(self.req.GET)) + list(
            iterkeys(self.req.zato.args))

        if not self.cluster_id:
            return False

        for elem in self.SimpleIO.input_required:
            if elem == 'cluster_id':
                continue
            if not elem in input_elems:
                return False
            value = self.req.GET.get(elem)
            if not value:
                value = self.req.zato.args.get(elem)
                if not value:
                    return False
        return True
Esempio n. 57
0
  def testEventMetricGetsRendered(self):
    stats_collector = prometheus_stats_collector.PrometheusStatsCollector(
        [stats_utils.CreateEventMetadata("api_method_latency")])
    with stats_test_utils.FakeStatsContext(stats_collector):
      stats_collector_instance.Get().RecordEvent("api_method_latency", 15)

      varz_json = json.loads(stats_server.BuildVarzJsonString())
      self.assertEqual(varz_json["api_method_latency"]["info"], {
          "metric_type": "EVENT",
          "value_type": "DISTRIBUTION"
      })
      self.assertCountEqual(
          iterkeys(varz_json["api_method_latency"]["value"]),
          ["sum", "bins_heights", "counter"])
Esempio n. 58
0
    def handle(self, _internal=('zato', 'pub.zato')):

        # Service name is given in URL path
        service_name = self.request.http.params.service_name

        # Are we invoking a Zato built-in service or a user-defined one?
        is_internal = service_name.startswith(_internal)  # type: bool

        # Before invoking a service that is potentially internal we need to confirm
        # that our channel can be used for such invocations.
        if is_internal:
            if self.channel.name not in self.server.fs_server_config.misc.service_invoker_allow_internal:
                self.logger.warn(
                    'Service `%s` could not be invoked; channel `%s` not among `%s` (service_invoker_allow_internal)',
                    service_name, self.channel.name, self.server.
                    fs_server_config.misc.service_invoker_allow_internal)
                self.response.data_format = 'text/plain'
                raise BadRequest(self.cid,
                                 'No such service `{}`'.format(service_name))

        # Make sure the service exists
        if self.server.service_store.has_service(service_name):

            # Depending on HTTP verb used, we may need to look up input in different places
            if self.request.http.method == 'GET':
                payload = self.request.http.GET
            else:
                payload = self.request.raw_request
                payload = loads(payload) if payload else None

            # Invoke the service now
            response = self.invoke(
                service_name,
                payload,
                wsgi_environ={'HTTP_METHOD': self.request.http.method})

            # All internal services wrap their responses in top-level elements that we need to shed here ..
            if is_internal and response:
                top_level = list(iterkeys(response))[0]
                response = response[top_level]

            # Assign response to outgoing payload
            self.response.payload = dumps(response)
            self.response.data_format = 'application/json'

        # No such service as given on input
        else:
            self.response.data_format = 'text/plain'
            raise BadRequest(self.cid,
                             'No such service `{}`'.format(service_name))
Esempio n. 59
0
  def _GetPlugins(self, base_class):
    items = []
    for name in sorted(iterkeys(base_class.classes)):
      cls = base_class.classes[name]
      # While technically a valid plugin, UnknownOutputPlugin is only used as
      # a placeholder when unserializing old and now-deleted output plugins.
      # No need to display it in the UI.
      if cls == output_plugin.UnknownOutputPlugin:
        continue

      if cls.description:
        items.append(ApiOutputPluginDescriptor().InitFromOutputPluginClass(cls))

    return items
Esempio n. 60
0
File: client.py Progetto: google/grr
def UpdateClientsFromFleetspeak(clients):
  """Updates ApiClient records to include info from Fleetspeak."""
  if not fleetspeak_connector.CONN or not fleetspeak_connector.CONN.outgoing:
    # FS not configured, or an outgoing connection is otherwise unavailable.
    return
  id_map = {}
  for client in clients:
    if client.fleetspeak_enabled:
      id_map[fleetspeak_utils.GRRIDToFleetspeakID(client.client_id)] = client
  if not id_map:
    return
  res = fleetspeak_connector.CONN.outgoing.ListClients(
      admin_pb2.ListClientsRequest(client_ids=list(iterkeys(id_map))))
  for read in res.clients:
    api_client = id_map[read.client_id]
    api_client.last_seen_at = fleetspeak_utils.TSToRDFDatetime(
        read.last_contact_time)
    api_client.last_clock = fleetspeak_utils.TSToRDFDatetime(read.last_clock)