Esempio n. 1
0
    def testClientMetadataInitialWrite(self):
        d = self.db

        client_id_1 = "C.fc413187fefa1dcf"
        # Typical initial FS enabled write
        d.WriteClientMetadata(client_id_1, fleetspeak_enabled=True)

        client_id_2 = "C.00413187fefa1dcf"
        # Typical initial non-FS write
        d.WriteClientMetadata(client_id_2,
                              certificate=CERT,
                              first_seen=rdfvalue.RDFDatetime(100000000),
                              fleetspeak_enabled=False)

        res = d.MultiReadClientMetadata([client_id_1, client_id_2])
        self.assertEqual(len(res), 2)

        m1 = res[client_id_1]
        self.assertIsInstance(m1, rdf_objects.ClientMetadata)
        self.assertTrue(m1.fleetspeak_enabled)

        m2 = res[client_id_2]
        self.assertIsInstance(m2, rdf_objects.ClientMetadata)
        self.assertFalse(m2.fleetspeak_enabled)
        self.assertEqual(m2.certificate, CERT)
        self.assertEqual(m2.first_seen, rdfvalue.RDFDatetime(100000000))
Esempio n. 2
0
 def _GetLastForemanRunTime(self, client_id):
   client = aff4.FACTORY.Open(client_id, mode="rw", token=self.token)
   try:
     return (client.Get(client.Schema.LAST_FOREMAN_TIME) or
             rdfvalue.RDFDatetime(0))
   except AttributeError:
     return rdfvalue.RDFDatetime(0)
Esempio n. 3
0
    def testIAddDuration(self):
        date = rdfvalue.RDFDatetime(1e9)
        date += rdfvalue.Duration("12h")
        self.assertEqual(date, 1e9 + 12 * 3600e6)

        date = rdfvalue.RDFDatetime(1e9)
        date += rdfvalue.Duration("-60s")
        self.assertEqual(date, 1e9 - 60e6)
Esempio n. 4
0
    def testIAddNumber(self):
        date = rdfvalue.RDFDatetime(1e9)
        date += 60
        self.assertEqual(date, 1e9 + 60e6)

        date = rdfvalue.RDFDatetime(1e9)
        date += 1000.23
        self.assertEqual(date, 1e9 + 1000230e3)

        date = rdfvalue.RDFDatetime(1e9)
        date += -10
        self.assertEqual(date, 1e9 - 10e6)
Esempio n. 5
0
    def testISubNumber(self):
        date = rdfvalue.RDFDatetime(1e9)
        date -= 60
        self.assertEqual(date, 1e9 - 60e6)

        date = rdfvalue.RDFDatetime(1e9)
        date -= -1000.23
        self.assertEqual(date, 1e9 + 1000230e3)

        date = rdfvalue.RDFDatetime(1e9)
        date -= 1e12
        self.assertEqual(date, 1e9 - 1e18)
Esempio n. 6
0
    def testISubDuration(self):
        date = rdfvalue.RDFDatetime(1e9)
        date -= rdfvalue.Duration("5m")
        self.assertEqual(date, 1e9 - 5 * 60e6)

        date = rdfvalue.RDFDatetime(1e9)
        date -= rdfvalue.Duration("-60s")
        self.assertEqual(date, 1e9 + 60e6)

        date = rdfvalue.RDFDatetime(1e9)
        date -= rdfvalue.Duration("1w")
        self.assertEqual(date, 1e9 - 7 * 24 * 3600e6)
Esempio n. 7
0
File: mem.py Progetto: qsdj/grr
    def _ParseTimeRange(self, timerange):
        """Parses a timerange argument and always returns non-None timerange."""
        if timerange is None:
            timerange = (None, None)

        from_time, to_time = timerange
        if not from_time:
            from_time = rdfvalue.RDFDatetime().FromSecondsSinceEpoch(0)

        if not to_time:
            to_time = rdfvalue.RDFDatetime().FromSecondsSinceEpoch(sys.maxsize)

        return (from_time, to_time)
Esempio n. 8
0
    def testReadWriteApprovalRequestWithEmptyNotifiedUsersEmailsAndGrants(
            self):
        d = self.db

        # Ensure that the requestor user exists.
        d.WriteGRRUser("requestor")

        client_id = "C.0000000050000001"
        approval_request = rdf_objects.ApprovalRequest(
            approval_type=rdf_objects.ApprovalRequest.ApprovalType.
            APPROVAL_TYPE_CLIENT,
            subject_id=client_id,
            requestor_username="******",
            reason="some test reason",
            expiration_time=rdfvalue.RDFDatetime(42))

        approval_id = d.WriteApprovalRequest(approval_request)
        self.assertTrue(approval_id)

        read_request = d.ReadApprovalRequest("requestor", approval_id)

        # Approval id and timestamp are generated in WriteApprovalRequest so we're
        # filling them into our model object ot make sure that equality check works.
        approval_request.approval_id = read_request.approval_id
        approval_request.timestamp = read_request.timestamp
        self.assertEqual(approval_request, read_request)
Esempio n. 9
0
def YamlLoader(string):
    """Load an AFF4 object from a serialized YAML representation."""
    representation = yaml.load(string)
    result_cls = aff4.FACTORY.AFF4Object(representation["aff4_class"])
    aff4_attributes = {}
    for predicate, values in representation["attributes"].items():
        attribute = aff4.Attribute.PREDICATES[predicate]
        tmp = aff4_attributes[attribute] = []

        for rdfvalue_cls_name, value, age in values:
            rdfvalue_cls = aff4.FACTORY.RDFValue(rdfvalue_cls_name)
            value = rdfvalue_cls(value, age=rdfvalue.RDFDatetime(age))
            tmp.append(value)

    # Ensure the object is dirty so when we save it, it can be written to the data
    # store.
    result = result_cls(urn=representation["_urn"],
                        clone=aff4_attributes,
                        mode="rw",
                        age=representation["age_policy"])

    result.new_attributes, result.synced_attributes = result.synced_attributes, {}

    result._dirty = True  # pylint: disable=protected-access

    return result
Esempio n. 10
0
  def Handle(self, args, token=None):
    if not args.timestamp:
      age = rdfvalue.RDFDatetime.Now()
    else:
      age = rdfvalue.RDFDatetime(args.timestamp)
    api_client = None
    if data_store.RelationalDBReadEnabled():
      info = data_store.REL_DB.ReadClientFullInfo(str(args.client_id))
      if info is None:
        raise api_call_handler_base.ResourceNotFoundError()

      if args.timestamp:
        # Assume that a snapshot for this particular timestamp exists.
        snapshots = data_store.REL_DB.ReadClientSnapshotHistory(
            str(args.client_id), timerange=(args.timestamp, args.timestamp))

        if snapshots:
          info.last_snapshot = snapshots[0]
          info.last_startup_info = snapshots[0].startup_info

      api_client = ApiClient().InitFromClientInfo(info)
    else:
      client = aff4.FACTORY.Open(
          args.client_id.ToClientURN(),
          aff4_type=aff4_grr.VFSGRRClient,
          age=age,
          token=token)
      api_client = ApiClient().InitFromAff4Object(client)
    UpdateClientsFromFleetspeak([api_client])
    return api_client
Esempio n. 11
0
    def WMITimeStrToRDFDatetime(self, timestr):
        """Return RDFDatetime from string like 20140825162259.000000-420.

    Args:
      timestr: WMI time string
    Returns:
      rdfvalue.RDFDatetime

    We have some timezone manipulation work to do here because the UTC offset is
    in minutes rather than +-HHMM
    """
        # We use manual parsing here because the time functions provided (datetime,
        # dateutil) do not properly deal with timezone information.
        offset_minutes = timestr[21:]
        year = timestr[:4]
        month = timestr[4:6]
        day = timestr[6:8]
        hours = timestr[8:10]
        minutes = timestr[10:12]
        seconds = timestr[12:14]
        microseconds = timestr[15:21]

        unix_seconds = calendar.timegm(
            map(int, [year, month, day, hours, minutes, seconds]))
        unix_seconds -= int(offset_minutes) * 60
        return rdfvalue.RDFDatetime(unix_seconds * 1e6 + int(microseconds))
Esempio n. 12
0
    def _AnalyzeKeywords(self, keywords):
        start_time = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration("180d")
        end_time = rdfvalue.RDFDatetime(self.LAST_TIMESTAMP)
        filtered_keywords = []
        unversioned_keywords = []

        for k in keywords:
            if k.startswith(self.START_TIME_PREFIX):
                try:
                    start_time = rdfvalue.RDFDatetime.FromHumanReadable(
                        k[self.START_TIME_PREFIX_LEN:])
                except ValueError:
                    pass
            elif k.startswith(self.END_TIME_PREFIX):
                try:
                    end_time = rdfvalue.RDFDatetime.FromHumanReadable(
                        k[self.END_TIME_PREFIX_LEN:], eoy=True)
                except (TypeError, ValueError):
                    pass
            elif k[0] == "+":
                kw = k[1:]
                filtered_keywords.append(kw)
                unversioned_keywords.append(kw)
            else:
                filtered_keywords.append(k)

        if not filtered_keywords:
            filtered_keywords.append(".")

        return start_time, end_time, filtered_keywords, unversioned_keywords
Esempio n. 13
0
    def testReadWriteApprovalRequestsWithFilledInUsersEmailsAndGrants(self):
        d = self.db

        # Ensure that the requestor user exists.
        d.WriteGRRUser("requestor")

        client_id = "C.0000000050000001"
        approval_request = rdf_objects.ApprovalRequest(
            approval_type=rdf_objects.ApprovalRequest.ApprovalType.
            APPROVAL_TYPE_CLIENT,
            subject_id=client_id,
            requestor_username="******",
            reason="some test reason",
            expiration_time=rdfvalue.RDFDatetime(42),
            notified_users=["user1", "user2", "user3"],
            email_cc_addresses=["*****@*****.**", "*****@*****.**"],
            grants=[
                rdf_objects.ApprovalGrant(grantor_username="******"),
                rdf_objects.ApprovalGrant(grantor_username="******")
            ])

        approval_id = d.WriteApprovalRequest(approval_request)

        read_request = d.ReadApprovalRequest("requestor", approval_id)

        self.assertEqual(sorted(approval_request.notified_users),
                         sorted(read_request.notified_users))
        self.assertEqual(sorted(approval_request.email_cc_addresses),
                         sorted(read_request.email_cc_addresses))
        self.assertEqual(
            sorted(g.grantor_username for g in approval_request.grants),
            sorted(g.grantor_username for g in read_request.grants))
Esempio n. 14
0
  def WriteBuffer(self, responses):
    """Write the hash received to the blob image."""

    # Note that hashes must arrive at this state in the correct order since they
    # are sent in the correct order (either via CallState or CallClient).
    index = responses.request_data["index"]
    if index not in self.state.pending_files:
      return

    # Failed to read the file - ignore it.
    if not responses.success:
      self._FileFetchFailed(index, responses.request.request.name)
      return

    response = responses.First()
    file_tracker = self.state.pending_files.get(index)
    if file_tracker:
      file_tracker.setdefault("blobs", []).append((response.data,
                                                   response.length))

      download_size = file_tracker["size_to_download"]
      if (response.length < self.CHUNK_SIZE or
          response.offset + response.length >= download_size):

        # Write the file to the data store.
        stat_entry = file_tracker["stat_entry"]
        urn = stat_entry.pathspec.AFF4Path(self.client_id)

        with aff4.FACTORY.Create(
            urn, aff4_grr.VFSBlobImage, mode="w", token=self.token) as fd:

          fd.SetChunksize(self.CHUNK_SIZE)
          fd.Set(fd.Schema.STAT(stat_entry))
          fd.Set(fd.Schema.PATHSPEC(stat_entry.pathspec))
          fd.Set(fd.Schema.CONTENT_LAST(rdfvalue.RDFDatetime().Now()))

          for digest, length in file_tracker["blobs"]:
            fd.AddBlob(digest, length)

          # Save some space.
          del file_tracker["blobs"]

        if data_store.RelationalDBWriteEnabled():
          client_id = self.client_id.Basename()
          path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry)
          data_store.REL_DB.WritePathInfos(client_id, [path_info])

        # File done, remove from the store and close it.
        self._ReceiveFetchedFile(file_tracker)

        # Publish the new file event to cause the file to be added to the
        # filestore.
        self.Publish("FileStore.AddFileToStore", urn)

        self.state.files_fetched += 1

        if not self.state.files_fetched % 100:
          self.Log("Fetched %d of %d files.", self.state.files_fetched,
                   self.state.files_to_fetch)
Esempio n. 15
0
 def testSubDuration(self):
     duration = rdfvalue.Duration("5m")
     date = rdfvalue.RDFDatetime(1e9)
     self.assertEqual(int(date - duration), 1e9 - 5 * 60e6)
     duration = rdfvalue.Duration("-60s")
     self.assertEqual(int(date - duration), 1e9 + 60e6)
     duration = rdfvalue.Duration("1w")
     self.assertEqual(int(date - duration), 1e9 - 7 * 24 * 3600e6)
Esempio n. 16
0
    def testCronJobRespectsStartTime(self):
        with test_lib.FakeTime(0):
            cron_manager = aff4_cronjobs.GetCronManager()
            start_time1 = rdfvalue.RDFDatetime(100 * 1000 * 1000)
            cron_args1 = rdf_cronjobs.CreateCronJobFlowArgs(
                start_time=start_time1)
            cron_args1.flow_runner_args.flow_name = "FakeCronJobRel"

            cron_args2 = rdf_cronjobs.CreateCronJobFlowArgs()
            cron_args2.flow_runner_args.flow_name = "FakeCronJobRel"

            cron_job_id1 = cron_manager.CreateJob(cron_args1)
            cron_job_id2 = cron_manager.CreateJob(cron_args2)

            cron_manager.RunOnce(token=self.token)

            cron_job1 = cron_manager.ReadJob(cron_job_id1, token=self.token)
            cron_job2 = cron_manager.ReadJob(cron_job_id2, token=self.token)

            self.assertEqual(cron_job1.cron_args.start_time, start_time1)

            # Flow without a start time should now be running
            self.assertFalse(
                cron_manager.JobIsRunning(cron_job1, token=self.token))
            self.assertTrue(
                cron_manager.JobIsRunning(cron_job2, token=self.token))

        # Move the clock past the start time
        with test_lib.FakeTime(500):

            cron_manager.RunOnce(token=self.token)

            cron_job1 = cron_manager.ReadJob(cron_job_id1, token=self.token)
            cron_job2 = cron_manager.ReadJob(cron_job_id2, token=self.token)

            # Start time should be the same
            self.assertEqual(cron_job1.cron_args.start_time, start_time1)

            # Now both should be running
            self.assertTrue(
                cron_manager.JobIsRunning(cron_job1, token=self.token))
            self.assertTrue(
                cron_manager.JobIsRunning(cron_job2, token=self.token))

            # Check setting a bad run id is handled.
            data_store.REL_DB.UpdateCronJob(cron_job2.job_id,
                                            current_run_id=12345)

            cron_job2 = cron_manager.ReadJob(cron_job_id2, token=self.token)
            self.assertFalse(
                cron_manager.JobIsRunning(cron_job2, token=self.token))

            # Job got updated right away.
            self.assertFalse(cron_job2.current_run_id)

            # DB also reflects the removed run id.
            cron_job2 = cron_manager.ReadJob(cron_job_id2, token=self.token)
            self.assertFalse(cron_job2.current_run_id)
Esempio n. 17
0
  def Handle(self, args, token=None):
    if not args.hunt_id:
      raise ValueError("hunt_id can't be None")

    if not args.client_id:
      raise ValueError("client_id can't be None")

    if not args.vfs_path:
      raise ValueError("vfs_path can't be None")

    if not args.timestamp:
      raise ValueError("timestamp can't be None")

    api_vfs.ValidateVfsPath(args.vfs_path)

    results = implementation.GRRHunt.ResultCollectionForHID(
        args.hunt_id.ToURN())

    expected_aff4_path = args.client_id.ToClientURN().Add(args.vfs_path)
    # TODO(user): should after_timestamp be strictly less than the desired
    # timestamp.
    timestamp = rdfvalue.RDFDatetime(int(args.timestamp) - 1)

    # If the entry corresponding to a given path is not found within
    # MAX_RECORDS_TO_CHECK from a given timestamp, we report a 404.
    for _, item in results.Scan(
        after_timestamp=timestamp.AsMicrosecondsSinceEpoch(),
        max_records=self.MAX_RECORDS_TO_CHECK):
      try:
        # Do not pass the client id we got from the caller. This will
        # get filled automatically from the hunt results and we check
        # later that the aff4_path we get is the same as the one that
        # was requested.
        aff4_path = export.CollectionItemToAff4Path(item, client_id=None)
      except export.ItemNotExportableError:
        continue

      if aff4_path != expected_aff4_path:
        continue

      try:
        aff4_stream = aff4.FACTORY.Open(
            aff4_path, aff4_type=aff4.AFF4Stream, token=token)
        if not aff4_stream.GetContentAge():
          break

        return api_call_handler_base.ApiBinaryStream(
            "%s_%s" % (args.client_id, utils.SmartStr(aff4_path.Basename())),
            content_generator=self._GenerateFile(aff4_stream),
            content_length=len(aff4_stream))
      except aff4.InstantiationError:
        break

    raise HuntFileNotFoundError(
        "File %s with timestamp %s and client %s "
        "wasn't found among the results of hunt %s" %
        (utils.SmartStr(args.vfs_path), utils.SmartStr(args.timestamp),
         utils.SmartStr(args.client_id), utils.SmartStr(args.hunt_id)))
Esempio n. 18
0
    def testHeartBeatingFlowIsNotTreatedAsStuck(self):
        worker_obj = worker_lib.GRRWorker(token=self.token)
        initial_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100)

        stuck_flows_timeout = flow_runner.FlowRunner.stuck_flows_timeout
        lease_timeout = rdfvalue.Duration(worker_lib.GRRWorker.flow_lease_time)

        WorkerStuckableTestFlow.Reset(heartbeat=True)
        try:
            with test_lib.FakeTime(initial_time.AsSecondsSinceEpoch()):
                session_id = flow.StartFlow(
                    flow_name=WorkerStuckableTestFlow.__name__,
                    client_id=self.client_id,
                    token=self.token,
                    sync=False)
                # Process all messages
                worker_obj.RunOnce()
                # Wait until worker thread starts processing the flow.
                WorkerStuckableTestFlow.WaitUntilWorkerStartsProcessing()

            # Increase the time in steps, using LetFlowHeartBeat/WaitForFlowHeartBeat
            # to control the flow execution that happens in the parallel thread.
            current_time = rdfvalue.RDFDatetime(initial_time)
            future_time = initial_time + stuck_flows_timeout + rdfvalue.Duration(
                "1m")
            while current_time <= future_time:
                current_time += lease_timeout - rdfvalue.Duration("1s")

                with test_lib.FakeTime(current_time.AsSecondsSinceEpoch()):
                    checked_flow = aff4.FACTORY.Open(session_id,
                                                     token=self.token)
                    WorkerStuckableTestFlow.LetFlowHeartBeat()
                    WorkerStuckableTestFlow.WaitForFlowHeartBeat(
                        last_heartbeat=current_time > future_time)
            # Now current_time is > future_time, where future_time is the time
            # when stuck flow should have been killed. Calling RunOnce() here,
            # because if the flow is going to be killed, it will be killed
            # during worker.RunOnce() call.
            with test_lib.FakeTime(current_time.AsSecondsSinceEpoch()):
                worker_obj.RunOnce()

            # Check that the flow wasn't killed forecfully.
            checked_flow = aff4.FACTORY.Open(session_id, token=self.token)
            self.assertEqual(checked_flow.context.state,
                             rdf_flow_runner.FlowContext.State.RUNNING)

        finally:
            # Release the semaphore so that worker thread unblocks and finishes
            # processing the flow.
            with test_lib.FakeTime(current_time.AsSecondsSinceEpoch()):
                WorkerStuckableTestFlow.LetWorkerFinishProcessing()
                worker_obj.thread_pool.Join()

        # Check that the flow has finished normally.
        checked_flow = aff4.FACTORY.Open(session_id, token=self.token)
        self.assertEqual(checked_flow.context.state,
                         rdf_flow_runner.FlowContext.State.TERMINATED)
Esempio n. 19
0
    def testRaisesIfVfsRootIsNotWhitelisted(self):
        args = hunt_plugin.ApiGetHuntFileArgs(
            hunt_id=self.hunt.urn.Basename(),
            client_id=self.client_id,
            vfs_path="flows/W:123456",
            timestamp=rdfvalue.RDFDatetime().Now())

        with self.assertRaises(ValueError):
            self.handler.Handle(args)
Esempio n. 20
0
def GetMostRecentClient(client_list, token=None):
    """Return most recent client from list of clients."""
    last = rdfvalue.RDFDatetime(0)
    client_urn = None
    for client in aff4.FACTORY.MultiOpen(client_list, token=token):
        client_last = client.Get(client.Schema.LAST)
        if client_last > last:
            last = client_last
            client_urn = client.urn
    return client_urn
Esempio n. 21
0
    def testMulNumber(self):
        date = rdfvalue.RDFDatetime(1e9)
        self.assertEqual(int(date * 3), 1e9 * 3)
        self.assertEqual(int(date * 1000.23), int(1e9 * 1000.23))
        self.assertEqual(int(date * (-10)), int(1e9 * (-10)))

        # Test rmul
        self.assertEqual(int(3 * date), 1e9 * 3)
        self.assertEqual(int(1000.23 * date), int(1e9 * 1000.23))
        self.assertEqual(int((-10) * date), int(1e9 * (-10)))
Esempio n. 22
0
  def testEvaluation(self):
    now = rdfvalue.RDFDatetime().Now()
    client = self.SetupTestClientObject(0, last_boot_time=now)
    info = data_store.REL_DB.ReadClientFullInfo(client.client_id)

    for f in foreman_rules.ForemanRegexClientRule.ForemanStringField.enum_dict:
      if f == "UNSET":
        continue

      r = foreman_rules.ForemanRegexClientRule(field=f, attribute_regex=".")
      r.Evaluate(info)
Esempio n. 23
0
  def testEvaluatesSizeLessThanEqualValueToFalse(self):
    now = rdfvalue.RDFDatetime().Now()
    client = self.SetupTestClientObject(0, last_boot_time=now)
    info = data_store.REL_DB.ReadClientFullInfo(client.client_id)

    r = foreman_rules.ForemanIntegerClientRule(
        field="LAST_BOOT_TIME",
        operator=foreman_rules.ForemanIntegerClientRule.Operator.LESS_THAN,
        value=now.AsSecondsSinceEpoch())

    # The values are the same, less than should not trigger.
    self.assertFalse(r.Evaluate(info))
Esempio n. 24
0
  def testEvaluatesSizeLessThanEqualValueToFalse(self):
    now = rdfvalue.RDFDatetime().Now()
    client_id = self.SetupClient(0, last_boot_time=now)
    client = aff4.FACTORY.Open(client_id, mode="rw", token=self.token)

    r = foreman_rules.ForemanIntegerClientRule(
        field="LAST_BOOT_TIME",
        operator=foreman_rules.ForemanIntegerClientRule.Operator.LESS_THAN,
        value=now.AsSecondsSinceEpoch())

    # The values are the same, less than should not trigger.
    self.assertFalse(r.Evaluate(client))
Esempio n. 25
0
  def testEvaluatesSizeGreaterThanSmallerValueToTrue(self):
    now = rdfvalue.RDFDatetime().Now()
    client_id = self.SetupClient(0, last_boot_time=now)
    client = aff4.FACTORY.Open(client_id, mode="rw", token=self.token)

    before_boot = now - 1

    r = foreman_rules.ForemanIntegerClientRule(
        field="LAST_BOOT_TIME",
        operator=foreman_rules.ForemanIntegerClientRule.Operator.GREATER_THAN,
        value=before_boot.AsSecondsSinceEpoch())

    self.assertTrue(r.Evaluate(client))
Esempio n. 26
0
  def testEvaluatesSizeGreaterThanSmallerValueToTrue(self):
    now = rdfvalue.RDFDatetime().Now()
    client = self.SetupTestClientObject(0, last_boot_time=now)
    info = data_store.REL_DB.ReadClientFullInfo(client.client_id)

    before_boot = now - 1

    r = foreman_rules.ForemanIntegerClientRule(
        field="LAST_BOOT_TIME",
        operator=foreman_rules.ForemanIntegerClientRule.Operator.GREATER_THAN,
        value=before_boot.AsSecondsSinceEpoch())

    self.assertTrue(r.Evaluate(info))
Esempio n. 27
0
  def testCronJobRespectsStartTime(self):
    with test_lib.FakeTime(0):
      cron_manager = aff4_cronjobs.GetCronManager()
      start_time1 = rdfvalue.RDFDatetime(100 * 1000 * 1000)
      cron_args1 = rdf_cronjobs.CreateCronJobFlowArgs(start_time=start_time1)
      cron_args1.flow_runner_args.flow_name = "FakeCronJob"

      cron_args2 = rdf_cronjobs.CreateCronJobFlowArgs()
      cron_args2.flow_runner_args.flow_name = "FakeCronJob"

      cron_job_id1 = cron_manager.CreateJob(cron_args1, token=self.token)
      cron_job_id2 = cron_manager.CreateJob(cron_args2, token=self.token)

      cron_manager.RunOnce(token=self.token)

      cron_job1 = cron_manager.ReadJob(cron_job_id1, token=self.token)
      cron_job2 = cron_manager.ReadJob(cron_job_id2, token=self.token)

      self.assertEqual(
          cron_job1.Get(cron_job1.Schema.CRON_ARGS).start_time, start_time1)

      # Flow without a start time should now be running
      self.assertFalse(cron_job1.IsRunning())
      self.assertTrue(cron_job2.IsRunning())

    # Move the clock past the start time
    with test_lib.FakeTime(500):

      cron_manager.RunOnce(token=self.token)

      cron_job1 = cron_manager.ReadJob(cron_job_id1, token=self.token)
      cron_job2 = cron_manager.ReadJob(cron_job_id2, token=self.token)

      # Start time should be the same
      self.assertEqual(
          cron_job1.Get(cron_job1.Schema.CRON_ARGS).start_time, start_time1)

      # Now both should be running
      self.assertTrue(cron_job1.IsRunning())
      self.assertTrue(cron_job2.IsRunning())

      # Check setting a bad flow urn is handled and removed
      with aff4.FACTORY.OpenWithLock(
          cron_job2.urn, aff4_type=aff4_cronjobs.CronJob,
          token=self.token) as cron_job2:

        cron_job2.Set(cron_job2.Schema.CURRENT_FLOW_URN("aff4:/does/not/exist"))
        self.assertFalse(cron_job2.IsRunning())

      cron_job2 = cron_manager.ReadJob(cron_job_id2, token=self.token)
      self.assertFalse(cron_job2.Get(cron_job2.Schema.CURRENT_FLOW_URN))
Esempio n. 28
0
  def testEvaluation(self):
    now = rdfvalue.RDFDatetime().Now()
    client = self.SetupTestClientObject(0, last_boot_time=now)
    info = data_store.REL_DB.ReadClientFullInfo(client.client_id)

    int_f = foreman_rules.ForemanIntegerClientRule.ForemanIntegerField
    for f in int_f.enum_dict:
      if f == "UNSET":
        continue

      r = foreman_rules.ForemanIntegerClientRule(
          field=f,
          operator=foreman_rules.ForemanIntegerClientRule.Operator.LESS_THAN,
          value=now.AsSecondsSinceEpoch())
      r.Evaluate(info)
Esempio n. 29
0
 def testClientMetadataSubsecond(self):
     client_id = "C.fc413187fefa1dcf"
     self.db.WriteClientMetadata(
         client_id,
         certificate=CERT,
         first_seen=rdfvalue.RDFDatetime(100000001),
         last_clock=rdfvalue.RDFDatetime(100000011),
         last_foreman=rdfvalue.RDFDatetime(100000021),
         last_ping=rdfvalue.RDFDatetime(100000031),
         fleetspeak_enabled=False)
     res = self.db.MultiReadClientMetadata([client_id])
     self.assertEqual(len(res), 1)
     m1 = res[client_id]
     self.assertEqual(m1.first_seen, rdfvalue.RDFDatetime(100000001))
     self.assertEqual(m1.clock, rdfvalue.RDFDatetime(100000011))
     self.assertEqual(m1.last_foreman_time, rdfvalue.RDFDatetime(100000021))
     self.assertEqual(m1.ping, rdfvalue.RDFDatetime(100000031))
Esempio n. 30
0
    def testMostActiveUsersReportPlugin(self):
        with test_lib.FakeTime(
                rdfvalue.RDFDatetime.FromHumanReadable("2012/12/14")):
            AddFakeAuditLog("Fake audit description 14 Dec.",
                            "C.123",
                            "User123",
                            token=self.token)

        with test_lib.FakeTime(
                rdfvalue.RDFDatetime.FromHumanReadable("2012/12/22")):
            for _ in xrange(10):
                AddFakeAuditLog("Fake audit description 22 Dec.",
                                "C.123",
                                "User123",
                                token=self.token)

            AddFakeAuditLog("Fake audit description 22 Dec.",
                            "C.456",
                            "User456",
                            token=self.token)

        report = report_plugins.GetReportByName(
            server_report_plugins.MostActiveUsersReportPlugin.__name__)

        with test_lib.FakeTime(
                rdfvalue.RDFDatetime.FromHumanReadable("2012/12/31")):

            now = rdfvalue.RDFDatetime().Now()
            month_duration = rdfvalue.Duration("30d")

            api_report_data = report.GetReportData(stats_api.ApiGetReportArgs(
                name=report.__class__.__name__,
                start_time=now - month_duration,
                duration=month_duration),
                                                   token=self.token)

            # pyformat: disable
            self.assertEqual(
                api_report_data,
                rdf_report_plugins.ApiReportData(
                    representation_type=rdf_report_plugins.ApiReportData.
                    RepresentationType.PIE_CHART,
                    pie_chart=rdf_report_plugins.ApiPieChartReportData(data=[
                        rdf_report_plugins.ApiReportDataPoint1D(
                            label="User123", x=11),
                        rdf_report_plugins.ApiReportDataPoint1D(
                            label="User456", x=1)
                    ])))