Exemplo n.º 1
0
  def Run(self):
    user_urn = aff4.ROOT_URN.Add("users").Add(self.token.username)
    with test_lib.FakeTime(42):
      with aff4.FACTORY.Create(
          user_urn, aff4_type=aff4_users.GRRUser, mode="w",
          token=self.token) as user_fd:
        user_fd.Set(user_fd.Schema.GUI_SETTINGS,
                    aff4_users.GUISettings(mode="ADVANCED", canary_mode=True))

    # Setup relational DB.
    data_store.REL_DB.WriteGRRUser(
        username=self.token.username, ui_mode="ADVANCED", canary_mode=True)

    self.Check("GetGrrUser")

    # Make user an admin and do yet another request.
    with aff4.FACTORY.Open(user_urn, mode="rw", token=self.token) as user_fd:
      user_fd.SetLabel("admin", owner="GRR")
    data_store.REL_DB.WriteGRRUser(
        username=self.token.username,
        user_type=rdf_objects.GRRUser.UserType.USER_TYPE_ADMIN)

    self.Check("GetGrrUser")
Exemplo n.º 2
0
def _SetupAndRunVersionBreakDownCronjob(token=None):
    with test_lib.FakeTime(44):
        manager = aff4_cronjobs.GetCronManager()

        if data_store.RelationalDBReadEnabled("cronjobs"):
            cron_job_name = compatibility.GetName(
                cron_system.GRRVersionBreakDownCronJob)
            cronjobs.ScheduleSystemCronJobs(names=[cron_job_name])
            manager.RunOnce()
            manager._GetThreadPool().Stop()
        else:
            cron_job_name = compatibility.GetName(
                cron_system.GRRVersionBreakDown)
            aff4_cronjobs.ScheduleSystemCronFlows(names=[cron_job_name],
                                                  token=token)
            manager.RunOnce(token=token)
            run_id = _GetRunId(cron_job_name, token=token)
            flow_test_lib.TestFlowHelper(rdfvalue.RDFURN(
                "aff4:/cron/%s/%s" % (cron_job_name, run_id)),
                                         token=token)
            manager.RunOnce(token=token)

        return cron_job_name
Exemplo n.º 3
0
  def testCronJobRunAllowsOverrunsWhenAllowOverrunsIsTrue(self):
    with test_lib.FakeTime(0):
      cron_manager = aff4_cronjobs.GetCronManager()
      cron_args = rdf_cronjobs.CreateCronJobArgs(
          allow_overruns=True, frequency="1h", flow_name="FakeCronJob")

      job_id = cron_manager.CreateJob(cron_args=cron_args, token=self.token)

      cron_manager.RunOnce(token=self.token)

      cron_job_runs = cron_manager.ReadJobRuns(job_id, token=self.token)
      self.assertEqual(len(cron_job_runs), 1)

      # Let an hour pass. Frequency is 1h (i.e. cron job iterations are
      # supposed to be started every hour), so the new flow should be started
      # by RunOnce(). Previous iteration flow hasn't finished yet, but
      # allow_overruns is True, so it's ok to start new iteration.
      time.time = lambda: 60 * 60 + 1

      cron_manager.RunOnce(token=self.token)

      cron_job_runs = cron_manager.ReadJobRuns(job_id, token=self.token)
      self.assertEqual(len(cron_job_runs), 2)
Exemplo n.º 4
0
  def testKeywordIndexTimestamps(self):
    index = aff4.FACTORY.Create(
        "aff4:/index2/",
        aff4_type=keyword_index.AFF4KeywordIndex,
        mode="rw",
        token=self.token)
    for i in range(50):
      with test_lib.FakeTime(1000 + i):
        index.AddKeywordsForName("C.%X" % i, ["popular_keyword1"])
    results = index.Lookup(["popular_keyword1"])
    self.assertEqual(len(results), 50)

    results = index.Lookup(["popular_keyword1"], start_time=1025 * 1000000)
    self.assertEqual(len(results), 25)

    results = index.Lookup(["popular_keyword1"], end_time=1024 * 1000000)
    self.assertEqual(len(results), 25)

    results = index.Lookup(
        ["popular_keyword1"],
        start_time=1025 * 1000000,
        end_time=1025 * 1000000)
    self.assertEqual(len(results), 1)
Exemplo n.º 5
0
    def testShows5LatestHunts(self):
        # Only hunts created in the last 31 days will get shown, so we have
        # to adjust their timestamps accordingly.
        timestamp = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration("1d")
        for i in range(20):
            with test_lib.FakeTime(timestamp + rdfvalue.Duration(1000 * i)):
                if i % 2 == 0:
                    descr = "foo-%d" % i
                    token = access_control.ACLToken(username="******")
                else:
                    descr = "bar-%d" % i
                    token = self.token
                self.CreateSampleHunt(descr, token=token)

        self.Open("/")
        for i in range(11, 20, 2):
            self.WaitUntil(
                self.IsElementPresent, "css=grr-user-dashboard "
                "div[name=RecentlyCreatedHunts]:contains('bar-%d')" % i)

        self.WaitUntilNot(
            self.IsElementPresent, "css=grr-user-dashboard "
            "div[name=RecentlyCreatedHunts]:contains('foo')")
Exemplo n.º 6
0
    def Run(self):
        # Fix the time to avoid regressions.
        with test_lib.FakeTime(42):
            if data_store.RelationalDBReadEnabled():
                client_obj = self.SetupTestClientObject(0)
                client_id = client_obj.client_id

                ip = rdf_client_network.NetworkAddress(
                    human_readable_address="192.168.100.42",
                    address_type=rdf_client_network.NetworkAddress.Family.INET)
                data_store.REL_DB.WriteClientMetadata(client_id, last_ip=ip)
            else:
                client_urn = self.SetupClient(0)
                client_id = client_urn.Basename()

                with aff4.FACTORY.Open(client_id, mode="rw",
                                       token=self.token) as grr_client:
                    grr_client.Set(
                        grr_client.Schema.CLIENT_IP("192.168.100.42"))

        self.Check("GetLastClientIPAddress",
                   args=client_plugin.ApiGetLastClientIPAddressArgs(
                       client_id=client_id))
Exemplo n.º 7
0
  def testSystemCronJobSetsStartTime(self):
    with test_lib.FakeTime(100):
      now = rdfvalue.RDFDatetime.Now()
      aff4_cronjobs.ScheduleSystemCronFlows(
          names=[
              DummySystemCronJob.__name__, DummySystemCronJobStartNow.__name__
          ],
          token=self.token)
      random_time = "DummySystemCronJob"
      no_random_time = "DummySystemCronJobStartNow"

      random_time_job = aff4_cronjobs.GetCronManager().ReadJob(
          random_time, token=self.token)

      no_random_time_job = aff4_cronjobs.GetCronManager().ReadJob(
          no_random_time, token=self.token)

      start_time_now = no_random_time_job.cron_args.start_time
      self.assertEqual(start_time_now, now)

      random_start_time = random_time_job.cron_args.start_time
      self.assertTrue(
          now < random_start_time < (now + DummySystemCronJob.frequency))
Exemplo n.º 8
0
    def testFlowAccess(self):
        """Tests access to flows."""
        client_id = self.SetupClient(0).Basename()

        self.assertRaises(grr_api_errors.AccessForbiddenError,
                          self.api.Client(client_id).CreateFlow,
                          name=flow_test_lib.SendingFlow.__name__)

        self.RequestAndGrantClientApproval(client_id,
                                           requestor=self.token.username)
        f = self.api.Client(client_id).CreateFlow(
            name=flow_test_lib.SendingFlow.__name__)

        # Move the clocks forward to make sure the approval expires.
        with test_lib.FakeTime(rdfvalue.RDFDatetime.Now() +
                               config.CONFIG["ACL.token_expiry"],
                               increment=1e-3):
            with self.assertRaises(grr_api_errors.AccessForbiddenError):
                self.api.Client(client_id).Flow(f.flow_id).Get()

            self.RequestAndGrantClientApproval(client_id,
                                               requestor=self.token.username)
            self.api.Client(client_id).Flow(f.flow_id).Get()
Exemplo n.º 9
0
  def Run(self):
    output_plugins = [
        rdf_output_plugin.OutputPluginDescriptor(
            plugin_name=test_plugins.DummyHuntTestOutputPlugin.__name__,
            plugin_args=test_plugins.DummyHuntTestOutputPlugin.args_type(
                filename_regex="blah!", fetch_binaries=True))
    ]
    with test_lib.FakeTime(42, increment=1):
      hunt_id = self.CreateHunt(
          description="the hunt",
          output_plugins=output_plugins,
          creator=self.test_username)
      hunt.StartHunt(hunt_id)

      self.client_ids = self.SetupClients(2)
      for client_id in self.client_ids:
        self.RunHunt(client_ids=[client_id], failrate=-1)

    self.Check(
        "ListHuntOutputPluginLogs",
        args=hunt_plugin.ApiListHuntOutputPluginLogsArgs(
            hunt_id=hunt_id, plugin_id="DummyHuntTestOutputPlugin_0"),
        replace={hunt_id: "H:123456"})
Exemplo n.º 10
0
    def testDeletesHuntsWithExpirationDateOlderThanGivenAge(self):
        with test_lib.ConfigOverrider(
            {"DataRetention.hunts_ttl": rdfvalue.Duration("150s")}):
            with test_lib.FakeTime(40 + 60 * self.NUM_HUNTS):
                flow.GRRFlow.StartFlow(
                    flow_name=data_retention.CleanHunts.__name__,
                    sync=True,
                    token=self.token)
                latest_timestamp = rdfvalue.RDFDatetime.Now()

            hunts_urns = list(
                aff4.FACTORY.Open("aff4:/hunts",
                                  token=self.token).ListChildren())
            self.assertEqual(len(hunts_urns), 2)

            for hunt_urn in hunts_urns:
                hunt_obj = aff4.FACTORY.Open(hunt_urn, token=self.token)
                runner = hunt_obj.GetRunner()

                self.assertLess(runner.context.expires, latest_timestamp)
                self.assertGreaterEqual(
                    runner.context.expires,
                    latest_timestamp - rdfvalue.Duration("150s"))
Exemplo n.º 11
0
    def testNotificationClaimsTimeout(self):
        collection_urn = rdfvalue.RDFURN(
            "aff4:/testNotificationClaimsTimeout/collection")
        for i in range(5):
            hunts_results.HuntResultCollection.StaticAdd(
                collection_urn, self.token, rdf_flows.GrrMessage(request_id=i))

        results_1 = hunts_results.HuntResultQueue.ClaimNotificationsForCollection(
            token=self.token)
        self.assertEqual(5, len(results_1[1]))

        # Check that we have a claim - that another read returns nothing.
        results_2 = hunts_results.HuntResultQueue.ClaimNotificationsForCollection(
            token=self.token)
        self.assertEqual(0, len(results_2[1]))

        # Push time forward past the default claim timeout, then we should be able
        # to re-read (and re-claim).
        with test_lib.FakeTime(rdfvalue.RDFDatetime.Now() +
                               rdfvalue.Duration("45m")):
            results_3 = hunts_results.HuntResultQueue.ClaimNotificationsForCollection(
                token=self.token)
        self.assertEqual(results_3, results_1)
Exemplo n.º 12
0
  def testYaraProcessScanWithMissesAndErrors(self):
    procs = [
        p for p in self.procs if p.pid in [101, 102, 103, 104, 105, 106, 107]
    ]
    with test_lib.FakeTime(
        rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch(123456789)):
      matches, errors, misses = self._RunYaraProcessScan(
          procs,
          include_misses_in_results=True,
          include_errors_in_results="ALL_ERRORS")

    self.assertLen(matches, 2)
    self.assertLen(errors, 2)
    self.assertLen(misses, 2)

    for scan_match in matches:
      for match in scan_match.match:
        self.assertEqual(match.rule_name, "test_rule")
        self.assertLen(match.string_matches, 1)
        for string_match in match.string_matches:
          self.assertEqual(string_match.data, b"1234")
          self.assertEqual(string_match.string_id, "$s1")
          self.assertIn(string_match.offset, [98, 1050])
Exemplo n.º 13
0
  def Run(self):
    client_urn = self.SetupClient(0)
    client_id = client_urn.Basename()
    self.file_path = "fs/os/c/bin/bash"

    fixture_test_lib.ClientFixture(client_urn, token=self.token)

    def ReplaceFlowId():
      if data_store.RelationalDBFlowsEnabled():
        flows = data_store.REL_DB.ReadAllFlowObjects(client_id)
        return {flows[0].flow_id: "W:ABCDEF"}
      else:
        flows_dir_fd = aff4.FACTORY.Open(
            client_urn.Add("flows"), token=self.token)
        flow_urn = list(flows_dir_fd.ListChildren())[0]
        return {flow_urn.Basename(): "W:ABCDEF"}

    with test_lib.FakeTime(42):
      self.Check(
          "UpdateVfsFileContent",
          args=vfs_plugin.ApiUpdateVfsFileContentArgs(
              client_id=client_id, file_path=self.file_path),
          replace=ReplaceFlowId)
Exemplo n.º 14
0
    def Run(self):
        output_plugins = [
            rdf_output_plugin.OutputPluginDescriptor(
                plugin_name=test_plugins.DummyHuntTestOutputPlugin.__name__,
                plugin_args=test_plugins.DummyHuntTestOutputPlugin.args_type(
                    filename_regex="blah!", fetch_binaries=True))
        ]

        with test_lib.FakeTime(42):
            if data_store.RelationalDBReadEnabled("hunts"):
                hunt_id = self.CreateHunt(description="the hunt",
                                          output_plugins=output_plugins)
                hunt.StartHunt(hunt_id)
            else:
                with self.CreateHunt(
                        description="the hunt",
                        output_plugins=output_plugins) as hunt_obj:
                    hunt_id = hunt_obj.urn.Basename()

        self.Check(
            "ListHuntOutputPlugins",
            args=hunt_plugin.ApiListHuntOutputPluginsArgs(hunt_id=hunt_id),
            replace={hunt_id: "H:123456"})
Exemplo n.º 15
0
    def FillClientStats(self, client_id):
        with aff4.FACTORY.Create(client_id.Add("stats"),
                                 aff4_type=aff4_stats.ClientStats,
                                 token=self.token,
                                 mode="rw") as stats_fd:

            for i in range(6):
                with test_lib.FakeTime((i + 1) * 10):
                    timestamp = int((i + 1) * 10 * 1e6)
                    st = rdf_client_stats.ClientStats()

                    sample = rdf_client_stats.CpuSample(timestamp=timestamp,
                                                        user_cpu_time=10 + i,
                                                        system_cpu_time=20 + i,
                                                        cpu_percent=10 + i)
                    st.cpu_samples.Append(sample)

                    sample = rdf_client_stats.IOSample(timestamp=timestamp,
                                                       read_bytes=10 + i,
                                                       write_bytes=10 + i * 2)
                    st.io_samples.Append(sample)

                    stats_fd.AddAttribute(stats_fd.Schema.STATS(st))
Exemplo n.º 16
0
  def testDelete(self):
    collection_urn = rdfvalue.RDFURN("aff4:/testDelete/collection")
    with data_store.DB.GetMutationPool(token=self.token) as pool:
      for i in range(5):
        hunts_results.HuntResultCollection.StaticAdd(
            collection_urn,
            rdf_flows.GrrMessage(request_id=i),
            mutation_pool=pool)

    results_1 = hunts_results.HuntResultQueue.ClaimNotificationsForCollection(
        token=self.token)
    self.assertEqual(5, len(results_1[1]))

    hunts_results.HuntResultQueue.DeleteNotifications(
        results_1[1], token=self.token)

    # Push time forward past the default claim timeout, then we should still
    # read nothing.
    with test_lib.FakeTime(rdfvalue.RDFDatetime.Now() +
                           rdfvalue.Duration("45m")):
      results_2 = hunts_results.HuntResultQueue.ClaimNotificationsForCollection(
          token=self.token)
    self.assertEqual(0, len(results_2[1]))
Exemplo n.º 17
0
 def testIndexedReads(self):
     spacing = 10
     with utils.Stubber(sequential_collection.IndexedSequentialCollection,
                        "INDEX_SPACING", spacing):
         urn = "aff4:/sequential_collection/testIndexedReads"
         collection = self._TestCollection(urn)
         data_size = 4 * spacing
         # TODO(amoser): Without using a mutation pool, this test is really
         # slow on MySQL data store.
         with data_store.DB.GetMutationPool() as pool:
             for i in range(data_size):
                 collection.StaticAdd(rdfvalue.RDFURN(urn),
                                      rdfvalue.RDFInteger(i),
                                      mutation_pool=pool)
         with test_lib.FakeTime(rdfvalue.RDFDatetime.Now() +
                                rdfvalue.DurationSeconds("10m")):
             for i in range(data_size - 1, data_size - 20, -1):
                 self.assertEqual(collection[i], i)
             for i in [spacing - 1, spacing, spacing + 1]:
                 self.assertEqual(collection[i], i)
             for i in range(data_size - spacing + 5,
                            data_size - spacing - 5, -1):
                 self.assertEqual(collection[i], i)
Exemplo n.º 18
0
    def Run(self):
        # Fix the time to avoid regressions.
        with test_lib.FakeTime(42):
            self.SetupClients(1)

            start_handler = flow_plugin.ApiStartRobotGetFilesOperationHandler()
            start_args = flow_plugin.ApiStartRobotGetFilesOperationArgs(
                hostname="Host", paths=["/test"])
            start_result = start_handler.Handle(start_args, token=self.token)

            # Exploit the fact that 'get files' operation id is effectively a flow
            # URN.
            flow_urn = rdfvalue.RDFURN(start_result.operation_id)

            # Put something in the output collection
            collection = flow.GRRFlow.ResultCollectionForFID(flow_urn,
                                                             token=self.token)
            collection.Add(rdf_client.ClientSummary())

            self.Check("GetRobotGetFilesOperationState",
                       args=flow_plugin.ApiGetRobotGetFilesOperationStateArgs(
                           operation_id=start_result.operation_id),
                       replace={flow_urn.Basename(): "F:ABCDEF12"})
Exemplo n.º 19
0
    def testCronJobRunDoesNothingIfDueTimeHasNotComeYet(self):
        with test_lib.FakeTime(0):
            cron_manager = aff4_cronjobs.GetCronManager()
            cron_args = rdf_cronjobs.CreateCronJobArgs(allow_overruns=False,
                                                       frequency="1h",
                                                       flow_name="FakeCronJob")

            job_id = cron_manager.CreateJob(cron_args=cron_args,
                                            token=self.token)

            cron_manager.RunOnce(token=self.token)

            cron_job_runs = cron_manager.ReadJobRuns(job_id, token=self.token)
            self.assertEqual(len(cron_job_runs), 1)

            # Let 59 minutes pass. Frequency is 1 hour, so new flow is not
            # supposed to start.
            time.time = lambda: 59 * 60

            cron_manager.RunOnce(token=self.token)

            cron_job_runs = cron_manager.ReadJobRuns(job_id, token=self.token)
            self.assertEqual(len(cron_job_runs), 1)
Exemplo n.º 20
0
  def testTimelineIsReturnedInChunks(self):
    # Change chunk size to see if the handler behaves correctly.
    self.handler.CHUNK_SIZE = 1

    args = vfs_plugin.ApiGetVfsTimelineAsCsvArgs(
        client_id=self.client_id, file_path=self.folder_path)
    result = self.handler.Handle(args, token=self.token)

    # Check rows returned correctly.
    self.assertTrue(hasattr(result, "GenerateContent"))
    for i in reversed(range(0, 5)):
      with test_lib.FakeTime(i):
        next_chunk = next(result.GenerateContent()).strip()
        timestamp = rdfvalue.RDFDatetime.Now()
        if i == 4:  # The first row includes the column headings.
          self.assertEqual(
              next_chunk, "Timestamp,Datetime,Message,Timestamp_desc\r\n"
              "%d,%s,%s,MODIFICATION" % (timestamp.AsMicroSecondsFromEpoch(),
                                         str(timestamp), self.file_path))
        else:
          self.assertEqual(next_chunk, "%d,%s,%s,MODIFICATION" %
                           (timestamp.AsMicroSecondsFromEpoch(), str(timestamp),
                            self.file_path))
Exemplo n.º 21
0
  def testWorksCorrectlyWithTestOutputPluginOnFlowWithSingleResult(self):
    with test_lib.FakeTime(42):
      sid = flow_test_lib.TestFlowHelper(
          compatibility.GetName(flow_test_lib.DummyFlowWithSingleReply),
          client_id=self.client_id,
          token=self.token)

    result = self.handler.Handle(
        flow_plugin.ApiGetExportedFlowResultsArgs(
            client_id=self.client_id,
            flow_id=sid,
            plugin_name=test_plugins.TestInstantOutputPlugin.plugin_name),
        context=self.context)

    chunks = list(result.GenerateContent())

    self.assertListEqual(chunks, [
        "Start: aff4:/%s/flows/%s" %
        (self.client_id, sid), "Values of type: RDFString",
        "First pass: oh (source=aff4:/%s)" % self.client_id,
        "Second pass: oh (source=aff4:/%s)" % self.client_id,
        "Finish: aff4:/%s/flows/%s" % (self.client_id, sid)
    ])
Exemplo n.º 22
0
  def Run(self):
    client_urn = self.SetupClient(0)
    client_id = client_urn.Basename()

    def ReplaceFlowId():
      flows = data_store.REL_DB.ReadAllFlowObjects(client_id=client_id)
      self.assertNotEmpty(flows)
      flow_id = flows[0].flow_id

      return api_regression_test_lib.GetFlowTestReplaceDict(client_id, flow_id)

    with test_lib.FakeTime(42):
      self.Check(
          "CreateFlow",
          args=flow_plugin.ApiCreateFlowArgs(
              client_id=client_id,
              flow=flow_plugin.ApiFlow(
                  name=processes.ListProcesses.__name__,
                  args=processes.ListProcessesArgs(
                      filename_regex=".", fetch_binaries=True),
                  runner_args=rdf_flow_runner.FlowRunnerArgs(
                      output_plugins=[], notify_to_user=True))),
          replace=ReplaceFlowId)
Exemplo n.º 23
0
  def Run(self):
    # Fix the time to avoid regressions.
    with test_lib.FakeTime(42):
      client_urn = self.SetupClient(0)

      # Delete the certificates as it's being regenerated every time the
      # client is created.
      with aff4.FACTORY.Open(
          client_urn, mode="rw", token=self.token) as client_obj:
        client_obj.DeleteAttribute(client_obj.Schema.CERT)

      flow_id = flow.GRRFlow.StartFlow(
          flow_name=discovery.Interrogate.__name__,
          client_id=client_urn,
          token=self.token)

      self.Check(
          "GetFlow",
          args=flow_plugin.ApiGetFlowArgs(
              client_id=client_urn.Basename(), flow_id=flow_id.Basename()),
          replace={
              flow_id.Basename(): "F:ABCDEF12"
          })

      with data_store.DB.GetMutationPool() as pool:
        flow.GRRFlow.MarkForTermination(
            flow_id, reason="Some reason", mutation_pool=pool)

      # Fetch the same flow which is now should be marked as pending
      # termination.
      self.Check(
          "GetFlow",
          args=flow_plugin.ApiGetFlowArgs(
              client_id=client_urn.Basename(), flow_id=flow_id.Basename()),
          replace={
              flow_id.Basename(): "F:ABCDEF13"
          })
Exemplo n.º 24
0
    def Run(self):
        client_id = self.SetupClient(0)

        replace = {}
        with test_lib.FakeTime(42):
            flow_urn = flow.StartAFF4Flow(
                client_id=client_id,
                flow_name=processes.ListProcesses.__name__,
                token=self.token)
            replace[flow_urn.Basename()] = "F:123456"

            test_process = client_test_lib.MockWindowsProcess(
                name="test_process")
            with utils.Stubber(psutil, "Process", lambda: test_process):
                # Here we emulate a mock client with no actions (None) that
                # should produce an error.
                mock = flow_test_lib.MockClient(client_id,
                                                None,
                                                token=self.token)
                while mock.Next():
                    pass

        manager = queue_manager.QueueManager(token=self.token)
        requests_responses = manager.FetchRequestsAndResponses(flow_urn)
        for request, responses in requests_responses:
            replace[str(request.request.task_id)] = "42"
            for response in responses:
                replace[str(response.task_id)] = "43"

        self.Check("ListClientActionRequests",
                   args=client_plugin.ApiListClientActionRequestsArgs(
                       client_id=client_id.Basename()),
                   replace=replace)
        self.Check("ListClientActionRequests",
                   args=client_plugin.ApiListClientActionRequestsArgs(
                       client_id=client_id.Basename(), fetch_responses=True),
                   replace=replace)
Exemplo n.º 25
0
  def testCronJobRuns(self):
    with self.assertRaises(db.UnknownCronJobError):
      self.db.WriteCronJobRun(
          rdf_cronjobs.CronJobRun(cron_job_id="job1", run_id="00000000"))

    now = rdfvalue.RDFDatetime.Now()
    with test_lib.FakeTime(now):
      for j in range(1, 3):
        self.db.WriteCronJob(rdf_cronjobs.CronJob(cron_job_id="job%d" % j))
        for r in range(1, 3):
          run = rdf_cronjobs.CronJobRun(
              cron_job_id="job%d" % j, run_id="abcd123%d" % r)
          self.db.WriteCronJobRun(run)

    for j in range(1, 3):
      job_id = "job%d" % j
      jobs = self.db.ReadCronJobRuns(job_id)
      self.assertLen(jobs, 2)
      for job in jobs:
        self.assertEqual(job.cron_job_id, job_id)
        self.assertEqual(job.timestamp, now)

    job = self.db.ReadCronJobRun("job1", "abcd1231")
    self.assertEqual(job.cron_job_id, "job1")
    self.assertEqual(job.run_id, "abcd1231")
    self.assertEqual(job.timestamp, now)

    with self.assertRaises(ValueError):
      self.db.ReadCronJobRun(job_id, "invalid_id")

    with self.assertRaises(db.UnknownCronJobRunError):
      self.db.ReadCronJobRun(job_id, "abcd1234")

    with self.assertRaises(db.UnknownCronJobRunError):
      self.db.ReadCronJobRun("doesntexist", "abcd1231")

    self.assertEqual(self.db.ReadCronJobRuns("doesntexist"), [])
Exemplo n.º 26
0
    def Run(self):
        with test_lib.FakeTime(42):
            if data_store.RelationalDBEnabled():
                hunt_id = self.CreateHunt(description="the hunt")
                hunt.StartHunt(hunt_id)
            else:
                hunt_urn = self.StartHunt(description="the hunt")
                hunt_id = hunt_urn.Basename()

            if data_store.RelationalDBEnabled():
                clients = self.SetupTestClientObjects(5)
                client_ids = sorted(clients)
            else:
                client_ids = [urn.Basename() for urn in self.SetupClients(5)]

            self.AssignTasksToClients(client_ids=client_ids[:-1])
            # Only running the hunt on a single client, as SampleMock
            # implementation is non-deterministic in terms of resources
            # usage that gets reported back to the hunt.
            client_urns = [rdf_client.ClientURN(client_ids[-1])]
            self.RunHunt(client_ids=client_urns, failrate=0)

        # Create replace dictionary.
        replace = {hunt_id: "H:123456", hunt_id + ":hunt": "H:123456"}

        self.Check("ListHuntClients",
                   args=hunt_plugin.ApiListHuntClientsArgs(
                       hunt_id=hunt_id, client_status="STARTED"),
                   replace=replace)
        self.Check("ListHuntClients",
                   args=hunt_plugin.ApiListHuntClientsArgs(
                       hunt_id=hunt_id, client_status="OUTSTANDING"),
                   replace=replace)
        self.Check("ListHuntClients",
                   args=hunt_plugin.ApiListHuntClientsArgs(
                       hunt_id=hunt_id, client_status="COMPLETED"),
                   replace=replace)
Exemplo n.º 27
0
    def testHandler(self):
        now = rdfvalue.RDFDatetime.Now()
        with test_lib.FakeTime(now):
            job = rdf_cronjobs.CronJob(cron_job_id="job_id",
                                       enabled=True,
                                       last_run_status="FINISHED",
                                       frequency=rdfvalue.Duration("7d"),
                                       lifetime=rdfvalue.Duration("1h"),
                                       allow_overruns=True)
            data_store.REL_DB.WriteCronJob(job)

        state = rdf_protodict.AttributedDict()
        state["item"] = "key"
        data_store.REL_DB.UpdateCronJob(job.cron_job_id,
                                        current_run_id="ABCD1234",
                                        state=state,
                                        forced_run_requested=True)

        args = cron_plugin.ApiGetCronJobArgs(cron_job_id=job.cron_job_id)
        result = self.handler.Handle(args)

        self.assertEqual(result.cron_job_id, job.cron_job_id)
        # TODO(amoser): The aff4 implementation does not store the create time so we
        # can't return it yet.
        # self.assertEqual(result.created_at, now)
        self.assertEqual(result.enabled, job.enabled)
        self.assertEqual(result.current_run_id, "ABCD1234")
        self.assertEqual(result.forced_run_requested, True)
        self.assertEqual(result.frequency, job.frequency)
        self.assertEqual(result.is_failing, False)
        self.assertEqual(result.last_run_status, job.last_run_status)
        self.assertEqual(result.lifetime, job.lifetime)
        state_entries = list(result.state.items)
        self.assertEqual(len(state_entries), 1)
        state_entry = state_entries[0]
        self.assertEqual(state_entry.key, "item")
        self.assertEqual(state_entry.value, "key")
Exemplo n.º 28
0
  def testMessageHandlerRequestLeasing(self):

    requests = [
        rdf_objects.MessageHandlerRequest(
            client_id="C.1000000000000000",
            handler_name="Testhandler",
            request_id=i * 100,
            request=rdfvalue.RDFInteger(i)) for i in range(10)
    ]
    lease_time = rdfvalue.Duration("5m")

    with test_lib.FakeTime(rdfvalue.RDFDatetime.FromSecondsSinceEpoch(10000)):
      self.db.WriteMessageHandlerRequests(requests)

    leased = Queue.Queue()
    self.db.RegisterMessageHandler(leased.put, lease_time, limit=5)

    got = []
    while len(got) < 10:
      try:
        l = leased.get(True, timeout=6)
      except Queue.Empty:
        self.fail(
            "Timed out waiting for messages, expected 10, got %d" % len(got))
      self.assertLessEqual(len(l), 5)
      for m in l:
        self.assertEqual(m.leased_by, utils.ProcessIdString())
        self.assertGreater(m.leased_until, rdfvalue.RDFDatetime.Now())
        self.assertLess(m.timestamp, rdfvalue.RDFDatetime.Now())
        m.leased_by = None
        m.leased_until = None
        m.timestamp = None
      got += l
    self.db.DeleteMessageHandlerRequests(got)

    got.sort(key=lambda req: req.request_id)
    self.assertEqual(requests, got)
Exemplo n.º 29
0
  def testLastRunStatusGetsUpdatedOnEveryRun(self):
    cron_manager = cronjobs.CronManager()
    cron_args = cronjobs.CreateCronJobFlowArgs()
    cron_args.flow_runner_args.flow_name = "OccasionallyFailingFakeCronJob"
    cron_args.periodicity = "30s"

    cron_job_urn = cron_manager.ScheduleFlow(
        cron_args=cron_args, token=self.token)

    for fake_time in [0, 60]:
      with test_lib.FakeTime(fake_time):
        # This call should start a new cron job flow
        cron_manager.RunOnce(token=self.token)
        cron_job = aff4.FACTORY.Open(
            cron_job_urn, aff4_type=cronjobs.CronJob, token=self.token)
        cron_flow_urn = cron_job.Get(cron_job.Schema.CURRENT_FLOW_URN)
        flow_test_lib.TestFlowHelper(
            cron_flow_urn, check_flow_errors=False, token=self.token)
        # This RunOnce call should determine that the flow has finished
        cron_manager.RunOnce(token=self.token)

    cron_job = aff4.FACTORY.Open(
        cron_job_urn, age=aff4.ALL_TIMES, token=self.token)
    statuses = list(
        cron_job.GetValuesForAttribute(cron_job.Schema.LAST_RUN_STATUS))

    statuses = sorted(statuses, key=lambda x: x.age)
    self.assertEqual(len(statuses), 2)

    self.assertEqual(statuses[0].age,
                     rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0))
    self.assertEqual(statuses[1].age,
                     rdfvalue.RDFDatetime.FromSecondsSinceEpoch(60))
    self.assertEqual(statuses[0].status,
                     rdf_cronjobs.CronJobRunStatus.Status.OK)
    self.assertEqual(statuses[1].status,
                     rdf_cronjobs.CronJobRunStatus.Status.ERROR)
Exemplo n.º 30
0
    def testWriteBuildYaml(self):
        """Test build.yaml is output correctly."""
        context = [
            "Target:LinuxDeb", "Platform:Linux", "Target:Linux", "Arch:amd64"
        ]
        expected = {
            "Client.build_environment":
            "cp27-cp27mu-linux_x86_64",
            "Client.build_time":
            "2016-05-24 20:04:25",
            "Template.build_type":
            "Release",
            "Template.build_context": ["ClientBuilder Context"] + context,
            "Template.version_major":
            str(config.CONFIG.Get("Source.version_major")),
            "Template.version_minor":
            str(config.CONFIG.Get("Source.version_minor")),
            "Template.version_revision":
            str(config.CONFIG.Get("Source.version_revision")),
            "Template.version_release":
            str(config.CONFIG.Get("Source.version_release")),
            "Template.arch":
            u"amd64"
        }

        # TODO(hanuszczak): YAML, consider using `StringIO` instead.
        fd = io.BytesIO()
        builder = build.ClientBuilder(context=context)

        with mock.patch.object(rdf_client.Uname, "FromCurrentSystem") as fcs:
            fcs.return_value.signature.return_value = "cp27-cp27mu-linux_x86_64"
            with test_lib.FakeTime(1464120265):
                builder.WriteBuildYaml(fd)

        fd.seek(0)
        self.assertEqual(yaml.Parse(fd.getvalue().decode("utf-8")), expected)