Exemplo n.º 1
0
    def testSystemFlowsReportPluginWithNoActivityToReport(self):
        report = report_plugins.GetReportByName(
            server_report_plugins.SystemFlowsReportPlugin.__name__)

        now = rdfvalue.RDFDatetime().Now()
        month_duration = rdfvalue.Duration("30d")

        api_report_data = report.GetReportData(stats_api.ApiGetReportArgs(
            name=report.__class__.__name__,
            start_time=now - month_duration,
            duration=month_duration),
                                               token=self.token)

        self.assertEqual(
            api_report_data,
            rdf_report_plugins.ApiReportData(
                representation_type=rdf_report_plugins.ApiReportData.
                RepresentationType.STACK_CHART,
                stack_chart=rdf_report_plugins.ApiStackChartReportData(
                    x_ticks=[])))
Exemplo n.º 2
0
    def testMultipleNotificationsCanBeWrittenAndRead(self):
        d = self.db
        username = "******"
        d.WriteGRRUser(username)

        ns = [
            objects.UserNotification(
                username=username,
                notification_type=objects.UserNotification.Type.
                TYPE_CLIENT_INTERROGATED,
                state=objects.UserNotification.State.STATE_PENDING,
                timestamp=rdfvalue.RDFDatetime(42 + i),
                message="blah%d" % i) for i in range(10)
        ]
        for n in ns:
            d.WriteUserNotification(n)

        read_ns = d.ReadUserNotifications(username)
        self.assertEqual(len(read_ns), 10)
        self.assertEqual(ns, sorted(read_ns, key=lambda x: x.timestamp))
Exemplo n.º 3
0
  def BuildTable(self, start, end, request):
    query_string = request.REQ.get("q", "")
    if not query_string:
      raise RuntimeError("A query string must be provided.")

    hash_urn = rdfvalue.RDFURN("aff4:/files/hash/generic/sha256/").Add(
        query_string)

    for i, (_, value, timestamp) in enumerate(data_store.DB.ResolveRegex(
        hash_urn, "index:.*", token=request.token)):

      if i > end:
        break

      self.AddRow(row_index=i, File=value,
                  Client=aff4_grr.VFSGRRClient.ClientURNFromURN(value),
                  Timestamp=rdfvalue.RDFDatetime(timestamp))

    # We only display 50 entries.
    return False
Exemplo n.º 4
0
Arquivo: mem.py Projeto: tanner-g/grr
    def ListClientsForKeywords(self, keywords, start_time=None):
        keywords = set(keywords)
        keyword_mapping = {utils.SmartStr(kw): kw for kw in keywords}

        if start_time and not isinstance(start_time, rdfvalue.RDFDatetime):
            raise ValueError(
                "Time value must be rdfvalue.RDFDatetime, got: %s" %
                type(start_time))

        res = {}
        for k in keyword_mapping:
            res.setdefault(keyword_mapping[k], [])
            for client_id, timestamp in self.keywords.get(k, {}).items():
                if start_time is not None:
                    rdf_ts = rdfvalue.RDFDatetime().FromSecondsFromEpoch(
                        timestamp)
                    if rdf_ts < start_time:
                        continue
                res[keyword_mapping[k]].append(client_id)
        return res
Exemplo n.º 5
0
    def Layout(self, request, response):
        """Display the overview."""
        hunt_id = request.REQ.get("hunt_id")
        hunt_client = request.REQ.get("hunt_client")
        if hunt_id is not None and hunt_client is not None:
            try:
                self.client = aff4.FACTORY.Open(hunt_client,
                                                token=request.token,
                                                aff4_type="VFSGRRClient")
                self.last_checkin = rdfvalue.RDFDatetime(
                    self.client.Get(self.client.Schema.PING))

                h = dict(main="HostInformation", c=self.client.client_id)
                self.hash = urllib.urlencode(sorted(h.items()))
            except IOError as e:
                logging.error("Attempt to open client %s. Err %s", hunt_client,
                              e)

        return super(HuntClientOverviewRenderer,
                     self).Layout(request, response)
Exemplo n.º 6
0
  def ResolveMulti(self, subject, attributes, timestamp=None, limit=None,
                   token=None):
    """Resolves multiple attributes at once for one subject."""
    self.security_manager.CheckDataStoreAccess(
        token, [subject], self.GetRequiredResolveAccess(attributes))

    for attribute in attributes:
      query, args = self._BuildQuery(subject, attribute, timestamp, limit)
      result = self.ExecuteQuery(query, args)

      for row in result:
        value = self._Decode(attribute, row["value"])

        yield attribute, value, rdfvalue.RDFDatetime(row["timestamp"])

      if limit:
        limit -= len(result)

      if limit is not None and limit <= 0:
        break
Exemplo n.º 7
0
  def GetPendingGlobalNotifications(self):
    storage = aff4.FACTORY.Create(GlobalNotificationStorage.DEFAULT_PATH,
                                  aff4_type="GlobalNotificationStorage",
                                  mode="r", token=self.token)
    current_notifications = storage.GetNotifications()

    shown_notifications = self.Get(self.Schema.SHOWN_GLOBAL_NOTIFICATIONS,
                                   default=GlobalNotificationSet())

    result = []
    for notification in current_notifications:
      if notification in shown_notifications:
        continue

      current_time = rdfvalue.RDFDatetime().Now()
      if (notification.show_from + notification.duration >= current_time and
          current_time >= notification.show_from):
        result.append(notification)

    return result
Exemplo n.º 8
0
def GetStartTime(cron_cls):
    """Get start time for a SystemCronFlow class.

  If start_time_randomization is True in the class, randomise the start
  time to be between now and (now + frequency)

  Args:
    cron_cls: SystemCronFlow class
  Returns:
    rdfvalue.RDFDatetime
  """
    if not cron_cls.start_time_randomization:
        return rdfvalue.RDFDatetime.Now()

    now = rdfvalue.RDFDatetime.Now()
    window_ms = cron_cls.frequency.microseconds

    start_time_ms = random.randint(now.AsMicrosecondsSinceEpoch(),
                                   now.AsMicrosecondsSinceEpoch() + window_ms)
    return rdfvalue.RDFDatetime(start_time_ms)
Exemplo n.º 9
0
 def _MaybeWriteIndex(self, i, ts, mutation_pool):
     """Write index marker i."""
     if i > self._max_indexed and i % self.INDEX_SPACING == 0:
         # We only write the index if the timestamp is more than 5 minutes in the
         # past: hacky defense against a late write changing the count.
         if ts[0] < (rdfvalue.RDFDatetime().Now() -
                     self.INDEX_WRITE_DELAY).AsMicroSecondsFromEpoch():
             # We may be used in contexts were we don't have write access, so simply
             # give up in that case. TODO(user): Remove this when the ACL
             # system allows.
             try:
                 mutation_pool.Set(self.urn,
                                   self.INDEX_ATTRIBUTE_PREFIX + "%08x" % i,
                                   "%06x" % ts[1],
                                   timestamp=ts[0],
                                   replace=True)
                 self._index[i] = ts
                 self._max_indexed = max(i, self._max_indexed)
             except access_control.UnauthorizedAccess:
                 pass
Exemplo n.º 10
0
  def testMostActiveUsersReportPluginWithNoActivityToReport(self):
    report = report_plugins.GetReportByName(
        server_report_plugins.MostActiveUsersReportPlugin.__name__)

    now = rdfvalue.RDFDatetime().Now()
    month_duration = rdfvalue.Duration("30d")

    api_report_data = report.GetReportData(
        stats_api.ApiGetReportArgs(
            name=report.__class__.__name__,
            start_time=now - month_duration,
            duration=month_duration),
        token=self.token)

    self.assertEqual(
        api_report_data,
        rdf_report_plugins.ApiReportData(
            representation_type=rdf_report_plugins.ApiReportData.
            RepresentationType.PIE_CHART,
            pie_chart=rdf_report_plugins.ApiPieChartReportData(data=[])))
Exemplo n.º 11
0
    def testDeletesTempWithAgeOlderThanGivenAge(self):
        with test_lib.ConfigOverrider(
            {"DataRetention.tmp_ttl": rdfvalue.Duration("300s")}):

            with test_lib.FakeTime(40 + 60 * self.NUM_TMP):
                flow.GRRFlow.StartFlow(
                    flow_name=data_retention.CleanTemp.__name__,
                    sync=True,
                    token=self.token)
                latest_timestamp = rdfvalue.RDFDatetime().Now()

            tmp_urns = list(
                aff4.FACTORY.Open("aff4:/tmp",
                                  token=self.token).ListChildren())
            self.assertEqual(len(tmp_urns), 5)

            for tmp_urn in tmp_urns:
                self.assertLess(tmp_urn.age, latest_timestamp)
                self.assertGreaterEqual(
                    tmp_urn.age, latest_timestamp - rdfvalue.Duration("300s"))
Exemplo n.º 12
0
  def ScheduleKillNotification(self):
    """Schedules a kill notification for this flow."""
    # Create a notification for the flow in the future that
    # indicates that this flow is in progess. We'll delete this
    # notification when we're done with processing completed
    # requests. If we're stuck for some reason, the notification
    # will be delivered later and the stuck flow will get
    # terminated.
    stuck_flows_timeout = rdfvalue.Duration(config_lib.CONFIG[
        "Worker.stuck_flows_timeout"])
    kill_timestamp = (rdfvalue.RDFDatetime().Now() + stuck_flows_timeout)
    with queue_manager.QueueManager(token=self.token) as manager:
      manager.QueueNotification(
          session_id=self.session_id,
          in_progress=True,
          timestamp=kill_timestamp)

    # kill_timestamp may get updated via flow.HeartBeat() calls, so we
    # have to store it in the context.
    self.context.kill_timestamp = kill_timestamp
Exemplo n.º 13
0
    def Start(self):
        self.start_time = rdfvalue.RDFDatetime().Now()

        exceptions_by_hunt = {}
        if not self.state.args.max_running_time:
            self.state.args.max_running_time = rdfvalue.Duration("%ds" % int(
                ProcessHuntResultCollectionsCronFlow.lifetime.seconds * 0.6))

        while not self.CheckIfRunningTooLong():
            count = self.ProcessOneHunt(exceptions_by_hunt)
            if not count:
                break

        if exceptions_by_hunt:
            e = ResultsProcessingError()
            for hunt_urn, exceptions_by_plugin in exceptions_by_hunt.items():
                for plugin, exceptions in exceptions_by_plugin.items():
                    for exception in exceptions:
                        e.RegisterSubException(hunt_urn, plugin, exception)
            raise e
Exemplo n.º 14
0
    def GetReportData(self, get_report_args, token):
        """Filter the last week of flows."""
        ret = report_plugins.ApiReportData(
            representation_type=report_plugins.ApiReportData.
            RepresentationType.STACK_CHART)

        try:
            now = rdfvalue.RDFDatetime().Now()
            week_duration = rdfvalue.Duration("7d")
            offset = week_duration * ClientsActivityReportPlugin.WEEKS
            client_activity = {}

            try:
                logs_gen = report_utils.GetAuditLogFiles(offset, now, token)
            except ValueError:  # Couldn't find any logs..
                logs_gen = iter(())

            for fd in logs_gen:
                for week in range(ClientsActivityReportPlugin.WEEKS):
                    start = now - week * week_duration
                    for event in fd.GenerateItems():
                        if start <= event.timestamp < (start + week_duration):
                            weekly_activity = client_activity.setdefault(
                                event.client, [[x, 0] for x in range(
                                    -ClientsActivityReportPlugin.WEEKS, 0, 1)])
                            weekly_activity[-week][1] += 1

            ret.stack_chart.data = sorted(
                (report_plugins.ApiReportDataSeries2D(
                    label=str(client),
                    points=[
                        report_plugins.ApiReportDataPoint2D(x=x, y=y)
                        for x, y in client_data
                    ]) for client, client_data in client_activity.iteritems()
                 if client),
                key=lambda series: series.label)

        except IOError:
            pass

        return ret
Exemplo n.º 15
0
  def testStuckNotificationGetsDeletedAfterTheFlowIsTerminated(self):
    worker_obj = worker.GRRWorker(token=self.token)
    initial_time = rdfvalue.RDFDatetime().FromSecondsFromEpoch(100)
    stuck_flows_timeout = flow_runner.FlowRunner.stuck_flows_timeout

    try:
      with test_lib.FakeTime(initial_time.AsSecondsFromEpoch()):
        session_id = flow.GRRFlow.StartFlow(
            flow_name=WorkerStuckableTestFlow.__name__,
            client_id=self.client_id,
            token=self.token,
            sync=False)

        # Process all messages
        worker_obj.RunOnce()
        # Wait until worker thread starts processing the flow.
        WorkerStuckableTestFlow.WaitUntilWorkerStartsProcessing()

      # Set the time to max worker flow duration + 1 minute. The flow is
      # currently blocked because of the way how semaphores are set up.
      # Worker should consider the flow to be stuck and terminate it.
      future_time = (
          initial_time + rdfvalue.Duration("1m") + stuck_flows_timeout)
      with test_lib.FakeTime(future_time.AsSecondsFromEpoch()):
        worker_obj.RunOnce()

      killed_flow = aff4.FACTORY.Open(session_id, token=self.token)
      self.assertEqual(killed_flow.context.state,
                       rdf_flows.FlowContext.State.ERROR)
      self.assertEqual(killed_flow.context.status,
                       "Terminated by user test. Reason: Stuck in the worker")

      # Check that stuck notification has been removed.
      qm = queue_manager.QueueManager(token=self.token)
      notifications_by_priority = qm.GetNotificationsByPriority(queues.FLOWS)
      self.assertTrue(qm.STUCK_PRIORITY not in notifications_by_priority)
    finally:
      # Release the semaphore so that worker thread unblocks and finishes
      # processing the flow.
      WorkerStuckableTestFlow.LetWorkerFinishProcessing()
      worker_obj.thread_pool.Join()
Exemplo n.º 16
0
    def _VerifyHunt(self, hunt):
        results_metadata = aff4.FACTORY.Open(
            hunt.urn.Add("ResultsMetadata"),
            aff4_type=HuntResultsMetadata.__name__,
            token=hunt.token)

        results = []
        for plugin_id, (plugin_descriptor,
                        plugin_state) in results_metadata.Get(
                            results_metadata.Schema.OUTPUT_PLUGINS,
                            {}).items():
            plugin_obj = plugin_descriptor.GetPluginForState(plugin_state)

            plugin_verifiers = plugin_descriptor.GetPluginVerifiers()
            if not plugin_verifiers:
                new_results = [
                    output_plugin.OutputPluginVerificationResult(
                        status=output_plugin.OutputPluginVerificationResult.
                        Status.N_A,
                        status_message=("Plugin %s is not verifiable." %
                                        plugin_descriptor.plugin_name))
                ]
            else:
                new_results = []
                for plugin_verifier in plugin_verifiers:
                    new_results.append(
                        plugin_verifier.VerifyHuntOutput(plugin_obj, hunt))

            for result in new_results:
                result.timestamp = rdfvalue.RDFDatetime().Now()
                result.plugin_id = plugin_id
                result.plugin_descriptor = plugin_descriptor

                self.Log(
                    "Verification result for %s: (%s) %s." %
                    (utils.SmartStr(hunt.urn), utils.SmartStr(
                        result.status), utils.SmartStr(result.status_message)))

            results.extend(new_results)

        return results
Exemplo n.º 17
0
  def HandleFiltered(self, filter_func, args, token):
    fd = aff4.FACTORY.Open("aff4:/hunts", mode="r", token=token)
    children = list(fd.ListChildren())
    children.sort(key=operator.attrgetter("age"), reverse=True)

    if not args.active_within:
      raise ValueError("active_within filter has to be used when "
                       "any kind of filtering is done (to prevent "
                       "queries of death)")

    min_age = rdfvalue.RDFDatetime().Now() - args.active_within
    active_children = []
    for child in children:
      if child.age > min_age:
        active_children.append(child)
      else:
        break

    index = 0
    hunt_list = []
    active_children_map = {}
    for hunt in fd.OpenChildren(children=active_children):
      if (not isinstance(hunt, hunts.GRRHunt) or not hunt.state or
          not filter_func(hunt)):
        continue
      active_children_map[hunt.urn] = hunt

    for urn in active_children:
      try:
        hunt = active_children_map[urn]
      except KeyError:
        continue

      if index >= args.offset:
        hunt_list.append(hunt)

      index += 1
      if args.count and len(hunt_list) >= args.count:
        break

    return ApiListHuntsResult(items=self._BuildHuntList(hunt_list))
Exemplo n.º 18
0
    def GetClientConfig(self, context, validate=True):
        """Generates the client config file for inclusion in deployable binaries."""
        with utils.TempDirectory() as tmp_dir:
            # Make sure we write the file in yaml format.
            filename = os.path.join(
                tmp_dir,
                config_lib.CONFIG.Get("ClientBuilder.config_filename",
                                      context=context))

            new_config = config_lib.CONFIG.MakeNewConfig()
            new_config.Initialize(reset=True, data="")
            new_config.SetWriteBack(filename)

            # Only copy certain sections to the client. We enumerate all
            # defined options and then resolve those from the config in the
            # client's context. The result is the raw option as if the
            # client read our config file.
            for descriptor in sorted(config_lib.CONFIG.type_infos,
                                     key=lambda x: x.name):
                if descriptor.name in self.SKIP_OPTION_LIST:
                    continue

                if descriptor.section in self.CONFIG_SECTIONS:
                    value = config_lib.CONFIG.GetRaw(descriptor.name,
                                                     context=context,
                                                     default=None)

                    if value is not None:
                        logging.debug("Copying config option to client: %s",
                                      descriptor.name)

                        new_config.SetRaw(descriptor.name, value)

            new_config.Set("Client.deploy_time",
                           str(rdfvalue.RDFDatetime().Now()))
            new_config.Write()

            if validate:
                self.ValidateEndConfig(new_config)

            return open(filename, "rb").read()
Exemplo n.º 19
0
  def BuildTable(self, start_row, end_row, request):
    """Add all the notifications to this table."""
    row_index = 0
    search_term = request.REQ.get("sSearch")

    # We modify this object by changing the notification from pending to
    # shown.
    try:
      user_fd = aff4.FACTORY.Open(
          aff4.ROOT_URN.Add("users").Add(request.user),
          aff4_type=aff4_users.GRRUser,
          token=request.token)
    except IOError:
      return

    # Hack for sorting. Requires retrieval of all notifications.
    notifications = list(user_fd.ShowNotifications(reset=False))
    for notification in sorted(notifications,
                               key=lambda x: x.timestamp,
                               reverse=True):
      if row_index < start_row:
        continue
      if row_index > end_row:
        break

      if (search_term and
          search_term.lower() not in notification.message.lower()):
        continue

      row = {"Message": notification.message,
             "Target": self.FormatFromTemplate(
                 self.target_template,
                 hash=self.BuildHashFromNotification(notification),
                 notification_type=notification.type,
                 target=notification.subject),
             "Timestamp": rdfvalue.RDFDatetime(notification.timestamp)}
      self.AddRow(row, row_index)
      row_index += 1

    flow.GRRFlow.StartFlow(flow_name="ResetUserNotifications",
                           token=request.token)
Exemplo n.º 20
0
    def _CallClients(self, client_id_list):
        now = rdfvalue.RDFDatetime().Now()
        due = now + rdfvalue.Duration(
            config_lib.CONFIG["StatsHunt.CollectionInterval"])

        for client in aff4.FACTORY.MultiOpen(client_id_list, token=self.token):

            if client.Get(client.SchemaCls.SYSTEM) == "Windows":
                wmi_query = (
                    "Select * from Win32_NetworkAdapterConfiguration where"
                    " IPEnabled=1")
                self.CallClient("WmiQuery",
                                query=wmi_query,
                                next_state="StoreResults",
                                client_id=client.urn,
                                start_time=due)
            else:
                self.CallClient("EnumerateInterfaces",
                                next_state="StoreResults",
                                client_id=client.urn,
                                start_time=due)
Exemplo n.º 21
0
    def DeleteNotification(self, session_id, start=None, end=None):
        """This deletes the notification when all messages have been processed."""
        if not isinstance(session_id, rdfvalue.SessionID):
            raise RuntimeError(
                "Can only delete notifications for rdfvalue.SessionIDs.")

        if start is None:
            start = 0
        else:
            start = int(start)

        if end is None:
            end = self.frozen_timestamp or rdfvalue.RDFDatetime().Now()

        for queue_shard in self.GetAllNotificationShards(session_id.Queue()):
            data_store.DB.DeleteAttributes(
                queue_shard, [self.NOTIFY_PREDICATE_PREFIX % session_id],
                token=self.token,
                start=start,
                end=end,
                sync=True)
Exemplo n.º 22
0
  def GenerateUncompactedItems(self, max_reversed_results=0,
                               timestamp=None):
    if self.IsAttributeSet(self.Schema.DATA):
      freeze_timestamp = timestamp or rdfvalue.RDFDatetime().Now()
      results = []
      for _, value, _ in data_store.DB.ResolvePrefix(
          self.urn, self.Schema.DATA.predicate, token=self.token,
          timestamp=(0, freeze_timestamp)):

        if results is not None:
          results.append(self.Schema.DATA(value).payload)
          if max_reversed_results and len(results) > max_reversed_results:
            for result in results:
              yield result
            results = None
        else:
          yield self.Schema.DATA(value).payload

      if results is not None:
        for result in reversed(results):
          yield result
Exemplo n.º 23
0
    def testDeletesFlowsOlderThanGivenAge(self):
        config_lib.CONFIG.Set("DataRetention.cron_jobs_flows_ttl",
                              rdfvalue.Duration("150s"))
        # Only two iterations are supposed to survive, as they were running
        # every minute.
        with test_lib.FakeTime(40 + 60 * self.NUM_CRON_RUNS):
            flow.GRRFlow.StartFlow(
                flow_name=data_retention.CleanCronJobs.__name__,
                sync=True,
                token=self.token)
            latest_timestamp = rdfvalue.RDFDatetime().Now()

        for cron_urn in self.cron_jobs_urns:
            fd = aff4.FACTORY.Open(cron_urn, token=self.token)
            children = list(fd.ListChildren())
            self.assertEqual(len(children), 2)

            for child_urn in children:
                self.assertTrue(child_urn.age < latest_timestamp)
                self.assertTrue(child_urn.age > latest_timestamp -
                                rdfvalue.Duration("150s"))
Exemplo n.º 24
0
  def Add(self, category, label, age):
    """Adds another instance of this category into the active_days counter.

    We automatically count the event towards all relevant active_days. For
    example, if the category "Windows" was seen 8 days ago it will be counted
    towards the 30 day active, 14 day active but not against the 7 and 1 day
    actives.

    Args:
      category: The category name to account this instance against.
      label: Client label to which this should be applied.
      age: When this instance occurred.
    """
    now = rdfvalue.RDFDatetime().Now()
    category = utils.SmartUnicode(category)

    for active_time in self.active_days:
      self.categories[active_time].setdefault(label, {})
      if (now - age).seconds < active_time * 24 * 60 * 60:
        self.categories[active_time][label][category] = self.categories[
            active_time][label].get(category, 0) + 1
Exemplo n.º 25
0
    def testStuckFlowGetsTerminated(self):
        worker_obj = worker.GRRWorker(worker.DEFAULT_WORKER_QUEUE,
                                      token=self.token)
        initial_time = rdfvalue.RDFDatetime().FromSecondsFromEpoch(0)
        stuck_flows_timeout = rdfvalue.Duration(
            config_lib.CONFIG["Worker.stuck_flows_timeout"])

        try:
            with test_lib.FakeTime(initial_time.AsSecondsFromEpoch()):
                session_id = flow.GRRFlow.StartFlow(
                    flow_name="WorkerStuckableTestFlow",
                    client_id=self.client_id,
                    token=self.token,
                    sync=False)

                # Process all messages
                worker_obj.RunOnce()
                # Wait until worker thread starts processing the flow.
                WorkerStuckableTestFlow.WaitUntilWorkerStartsProcessing()

            # Set the time to max worker flow duration + 1 minute. The flow is
            # currently blocked because of the way how semaphores are set up.
            # Worker should consider the flow to be stuck and terminate it.
            future_time = (initial_time + rdfvalue.Duration("1m") +
                           stuck_flows_timeout)
            with test_lib.FakeTime(future_time.AsSecondsFromEpoch()):
                worker_obj.RunOnce()

        finally:
            # Release the semaphore so that worker thread unblocks and finishes
            # processing the flow.
            WorkerStuckableTestFlow.LetWorkerFinishProcessing()
            worker_obj.thread_pool.Join()

        killed_flow = aff4.FACTORY.Open(session_id, token=self.token)
        self.assertEqual(killed_flow.state.context.state,
                         rdfvalue.Flow.State.ERROR)
        self.assertEqual(
            killed_flow.state.context.status,
            "Terminated by user test. Reason: Stuck in the worker")
Exemplo n.º 26
0
    def Add(self, rdf_value, timestamp=None, suffix=None, **kwargs):
        """Adds an rdf value to the collection.

    Adds an rdf value to the collection. Does not require that the collection
    be locked.

    Args:
      rdf_value: The rdf value to add to the collection.

      timestamp: The timestamp (in microseconds) to store the rdf value
          at. Defaults to the current time.

      suffix: A 'fractional timestamp' suffix to reduce the chance of
          collisions. Defaults to a random number.

      **kwargs: Keyword arguments to pass through to the underlying database
        call.

    Raises:
      ValueError: rdf_value has unexpected type.

    """
        if not isinstance(rdf_value, self.RDF_TYPE):
            raise ValueError(
                "This collection only accepts values of type %s." %
                self.RDF_TYPE.__name__)

        if timestamp is None:
            timestamp = rdfvalue.RDFDatetime().Now()

        if isinstance(timestamp, rdfvalue.RDFDatetime):
            timestamp = timestamp.AsMicroSecondsFromEpoch()

        result_subject = self._MakeURN(timestamp, suffix)
        data_store.DB.Set(result_subject,
                          self.ATTRIBUTE,
                          rdf_value.SerializeToString(),
                          timestamp=timestamp,
                          token=self.token,
                          **kwargs)
Exemplo n.º 27
0
def GetClientTestTargets(client_ids=None, hostnames=None, token=None,
                         checkin_duration_threshold="20m"):
  """Get client urns for end-to-end tests.

  Args:
    client_ids: list of client id URN strings or rdf_client.ClientURNs
    hostnames: list of hostnames to search for
    token: access token
    checkin_duration_threshold: clients that haven't checked in for this long
                                will be excluded
  Returns:
    client_id_set: set of rdf_client.ClientURNs available for end-to-end tests.
  """

  if client_ids:
    client_ids = set(client_ids)
  else:
    client_ids = set(config_lib.CONFIG.Get("Test.end_to_end_client_ids"))

  if hostnames:
    hosts = set(hostnames)
  else:
    hosts = set(config_lib.CONFIG.Get("Test.end_to_end_client_hostnames"))

  if hosts:
    client_id_dict = client_index.GetClientURNsForHostnames(hosts, token=token)
    for client_list in client_id_dict.values():
      client_ids.update(client_list)

  client_id_set = set([rdf_client.ClientURN(x) for x in client_ids])
  duration_threshold = rdfvalue.Duration(checkin_duration_threshold)
  for client in aff4.FACTORY.MultiOpen(client_id_set, token=token):
    # Only test against client IDs that have checked in recently.  Test machines
    # tend to have lots of old client IDs hanging around that will cause lots of
    # waiting for timeouts in the tests.
    if (rdfvalue.RDFDatetime().Now() - client.Get(client.Schema.LAST) >
        duration_threshold):
      client_id_set.remove(client.urn)

  return client_id_set
Exemplo n.º 28
0
  def _QueryAndOwn(self, transaction, lease_seconds=100,
                   limit=1, user=""):
    """Does the real work of self.QueryAndOwn()."""
    tasks = []

    lease = long(lease_seconds * 1e6)

    ttl_exceeded_count = 0

    # Only grab attributes with timestamps in the past.
    for predicate, task, timestamp in transaction.ResolveRegex(
        self.TASK_PREDICATE_PREFIX % ".*",
        timestamp=(0, self.frozen_timestamp or rdfvalue.RDFDatetime().Now())):
      task = rdfvalue.GrrMessage(task)
      task.eta = timestamp
      task.last_lease = "%s@%s:%d" % (user,
                                      socket.gethostname(),
                                      os.getpid())
      # Decrement the ttl
      task.task_ttl -= 1
      if task.task_ttl <= 0:
        # Remove the task if ttl is exhausted.
        transaction.DeleteAttribute(predicate)
        ttl_exceeded_count += 1
        stats.STATS.IncrementCounter("grr_task_ttl_expired_count")
      else:
        if task.task_ttl != rdfvalue.GrrMessage.max_ttl - 1:
          stats.STATS.IncrementCounter("grr_task_retransmission_count")

        # Update the timestamp on the value to be in the future
        transaction.Set(predicate, task.SerializeToString(), replace=True,
                        timestamp=long(time.time() * 1e6) + lease)
        tasks.append(task)
        if len(tasks) >= limit:
          break

    if ttl_exceeded_count:
      logging.info("TTL exceeded for %d messages on queue %s",
                   ttl_exceeded_count, transaction.subject)
    return tasks
Exemplo n.º 29
0
    def testNoApplicableTests(self):
        """Try to run linux tests on windows."""
        self.SetupClients(1,
                          system="Windows",
                          os_version="6.1.7601SP1",
                          arch="AMD64")
        install_time = rdfvalue.RDFDatetime().Now()
        user = "******"
        userobj = rdf_client.User(username=user)
        interface = rdf_client.Interface(ifname="eth0")
        self.client = aff4.FACTORY.Create(self.client_id,
                                          "VFSGRRClient",
                                          mode="rw",
                                          token=self.token,
                                          age=aff4.ALL_TIMES)

        kb = self.client.Get(self.client.Schema.KNOWLEDGE_BASE)
        kb.users.Append(userobj)
        self.client.Set(self.client.Schema.HOSTNAME("hostname"))
        self.client.Set(self.client.Schema.OS_RELEASE("7"))
        self.client.Set(self.client.Schema.KERNEL("6.1.7601"))
        self.client.Set(self.client.Schema.FQDN("hostname.example.com"))
        self.client.Set(self.client.Schema.INSTALL_DATE(install_time))
        self.client.Set(self.client.Schema.KNOWLEDGE_BASE(kb))
        self.client.Set(self.client.Schema.USERNAMES([user]))
        self.client.Set(self.client.Schema.LAST_INTERFACES([interface]))
        self.client.Flush()

        args = endtoend.EndToEndTestFlowArgs(test_names=[
            "TestListDirectoryOSLinuxDarwin", "MockEndToEndTest",
            "TestListDirectoryOSLinuxDarwin"
        ])

        self.assertRaises(
            flow.FlowError, list,
            test_lib.TestFlowHelper("EndToEndTestFlow",
                                    self.client_mock,
                                    client_id=self.client_id,
                                    token=self.token,
                                    args=args))
Exemplo n.º 30
0
    def Run(self):
        # Add one "normal" cron job...
        with test_lib.FakeTime(42):
            self.CreateCronJob(
                flow_name=cron_system.GRRVersionBreakDown.__name__,
                periodicity="1d",
                lifetime="2h",
                description="foo",
                disabled=True,
                token=self.token)

        # ...one disabled cron job,
        with test_lib.FakeTime(84):
            self.CreateCronJob(flow_name=cron_system.OSBreakDown.__name__,
                               periodicity="7d",
                               lifetime="1d",
                               description="bar",
                               token=self.token)

        # ...and one failing cron job.
        with test_lib.FakeTime(126):
            cron_urn = self.CreateCronJob(
                flow_name=cron_system.LastAccessStats.__name__,
                periodicity="7d",
                lifetime="1d",
                token=self.token)

            for i in range(4):
                with test_lib.FakeTime(200 + i * 10):
                    with aff4.FACTORY.OpenWithLock(cron_urn,
                                                   token=self.token) as job:
                        job.Set(
                            job.Schema.LAST_RUN_TIME(
                                rdfvalue.RDFDatetime().Now()))
                        job.Set(
                            job.Schema.LAST_RUN_STATUS(
                                status=rdf_cronjobs.CronJobRunStatus.Status.
                                ERROR))

        self.Check("GET", "/api/cron-jobs")