Exemplo n.º 1
0
def ScheduleSystemCronFlows(names=None, token=None):
  """Schedule all the SystemCronFlows found."""

  if data_store.RelationalDBReadEnabled(category="cronjobs"):
    return cronjobs.ScheduleSystemCronJobs(names=names)

  errors = []
  for name in config.CONFIG["Cron.disabled_system_jobs"]:
    try:
      cls = registry.FlowRegistry.FlowClassByName(name)
    except ValueError:
      errors.append("No such flow: %s." % name)
      continue

    if not aff4.issubclass(cls, SystemCronFlow):
      errors.append("Disabled system cron job name doesn't correspond to "
                    "a flow inherited from SystemCronFlow: %s" % name)

  if names is None:
    names = iterkeys(registry.FlowRegistry.FLOW_REGISTRY)

  for name in names:
    cls = registry.FlowRegistry.FlowClassByName(name)

    if not aff4.issubclass(cls, SystemCronFlow):
      continue

    cron_args = rdf_cronjobs.CreateCronJobFlowArgs(
        periodicity=cls.frequency,
        lifetime=cls.lifetime,
        allow_overruns=cls.allow_overruns)
    cron_args.flow_runner_args.flow_name = name

    if cls.enabled:
      enabled = name not in config.CONFIG["Cron.disabled_system_jobs"]
    else:
      enabled = False

    job_urn = CronManager.CRON_JOBS_PATH.Add(name)
    with aff4.FACTORY.Create(
        job_urn,
        aff4_type=CronJob,
        mode="rw",
        token=token,
        force_new_version=False) as cron_job:

      # If the cronjob was already present we don't want to overwrite the
      # original start_time.
      existing_cron_args = cron_job.Get(cron_job.Schema.CRON_ARGS)

      if cron_args != existing_cron_args:
        cron_job.Set(cron_job.Schema.CRON_ARGS(cron_args))

      cron_job.Set(cron_job.Schema.DISABLED(not enabled))

  if errors:
    raise ValueError(
        "Error(s) while parsing Cron.disabled_system_jobs: %s" % errors)
Exemplo n.º 2
0
def ScheduleSystemCronFlows(names=None, token=None):
  """Schedule all the SystemCronFlows found."""

  if data_store.RelationalDBReadEnabled(category="cronjobs"):
    return cronjobs.ScheduleSystemCronJobs(names=names)

  errors = []
  for name in config.CONFIG["Cron.disabled_system_jobs"]:
    try:
      cls = registry.FlowRegistry.FlowClassByName(name)
    except ValueError:
      errors.append("No such flow: %s." % name)
      continue

    if not aff4.issubclass(cls, SystemCronFlow):
      errors.append("Disabled system cron job name doesn't correspond to "
                    "a flow inherited from SystemCronFlow: %s" % name)

  if names is None:
    names = iterkeys(registry.FlowRegistry.FLOW_REGISTRY)

  for name in names:
    cls = registry.FlowRegistry.FlowClassByName(name)

    if not aff4.issubclass(cls, SystemCronFlow):
      continue

    cron_args = rdf_cronjobs.CreateCronJobFlowArgs(
        periodicity=cls.frequency,
        lifetime=cls.lifetime,
        allow_overruns=cls.allow_overruns)
    cron_args.flow_runner_args.flow_name = name

    if cls.enabled:
      enabled = name not in config.CONFIG["Cron.disabled_system_jobs"]
    else:
      enabled = False

    job_urn = CronManager.CRON_JOBS_PATH.Add(name)
    with aff4.FACTORY.Create(
        job_urn,
        aff4_type=CronJob,
        mode="rw",
        token=token,
        force_new_version=False) as cron_job:

      # If the cronjob was already present we don't want to overwrite the
      # original start_time.
      existing_cron_args = cron_job.Get(cron_job.Schema.CRON_ARGS)

      if cron_args != existing_cron_args:
        cron_job.Set(cron_job.Schema.CRON_ARGS(cron_args))

      cron_job.Set(cron_job.Schema.DISABLED(not enabled))

  if errors:
    raise ValueError(
        "Error(s) while parsing Cron.disabled_system_jobs: %s" % errors)
Exemplo n.º 3
0
def ScheduleSystemCronFlows(names=None, token=None):
    """Schedule all the SystemCronFlows found."""

    errors = []
    for name in config.CONFIG["Cron.disabled_system_jobs"]:
        try:
            cls = registry.FlowRegistry.FlowClassByName(name)
        except ValueError:
            errors.append("No such flow: %s." % name)
            continue

        if not aff4.issubclass(cls, SystemCronFlow):
            errors.append(
                "Disabled system cron job name doesn't correspond to "
                "a flow inherited from SystemCronFlow: %s" % name)

    if names is None:
        names = registry.FlowRegistry.FLOW_REGISTRY.keys()

    for name in names:
        cls = registry.FlowRegistry.FlowClassByName(name)

        if aff4.issubclass(cls, SystemCronFlow):
            cron_args = rdf_cronjobs.CreateCronJobFlowArgs(
                periodicity=cls.frequency)
            cron_args.flow_runner_args.flow_name = name
            cron_args.lifetime = cls.lifetime
            cron_args.allow_overruns = cls.allow_overruns
            cron_args.start_time = GetStartTime(cls)

            if cls.disabled:
                disabled = True
            else:
                disabled = name in config.CONFIG["Cron.disabled_system_jobs"]

            manager = GetCronManager()
            if data_store.RelationalDBReadEnabled(category="cronjobs"):
                manager.CreateJob(cron_args=cron_args,
                                  job_id=name,
                                  disabled=disabled)
            else:
                manager.CreateJob(cron_args=cron_args,
                                  job_id=name,
                                  token=token,
                                  disabled=disabled)

    if errors:
        raise ValueError(
            "Error(s) while parsing Cron.disabled_system_jobs: %s" % errors)
Exemplo n.º 4
0
  def CallFallback(self, artifact_name, request_data):
    classes = iteritems(artifact.ArtifactFallbackCollector.classes)
    for clsname, fallback_class in classes:

      if not aff4.issubclass(fallback_class,
                             artifact.ArtifactFallbackCollector):
        continue

      if artifact_name in fallback_class.artifacts:
        if artifact_name in self.state.called_fallbacks:
          self.Log("Already called fallback class %s for artifact: %s", clsname,
                   artifact_name)
        else:
          self.Log("Calling fallback class %s for artifact: %s", clsname,
                   artifact_name)

          self.CallFlow(
              clsname,
              request_data=request_data.ToDict(),
              artifact_name=artifact_name,
              next_state="ProcessCollected")

          # Make sure we only try this once
          self.state.called_fallbacks.add(artifact_name)
          return True
    return False
Exemplo n.º 5
0
  def CallFallback(self, artifact_name, request_data):
    classes = iteritems(artifact.ArtifactFallbackCollector.classes)
    for clsname, fallback_class in classes:

      if not aff4.issubclass(fallback_class,
                             artifact.ArtifactFallbackCollector):
        continue

      if artifact_name in fallback_class.artifacts:
        if artifact_name in self.state.called_fallbacks:
          self.Log("Already called fallback class %s for artifact: %s", clsname,
                   artifact_name)
        else:
          self.Log("Calling fallback class %s for artifact: %s", clsname,
                   artifact_name)

          self.CallFlow(
              clsname,
              request_data=request_data.ToDict(),
              artifact_name=artifact_name,
              next_state="ProcessCollected")

          # Make sure we only try this once
          self.state.called_fallbacks.add(artifact_name)
          return True
    return False
Exemplo n.º 6
0
Arquivo: flow.py Projeto: bhyvex/grr
  def GetAllWellKnownFlows(cls, token=None):
    """Get instances of all well known flows."""
    well_known_flows = {}
    for cls in itervalues(registry.FlowRegistry.FLOW_REGISTRY):
      if aff4.issubclass(cls, WellKnownFlow) and cls.well_known_session_id:
        well_known_flow = cls(cls.well_known_session_id, mode="rw", token=token)
        well_known_flows[cls.well_known_session_id.FlowName()] = well_known_flow

    return well_known_flows
Exemplo n.º 7
0
  def GetAllWellKnownFlows(cls, token=None):
    """Get instances of all well known flows."""
    well_known_flows = {}
    for cls in itervalues(registry.AFF4FlowRegistry.FLOW_REGISTRY):
      if aff4.issubclass(cls, WellKnownFlow) and cls.well_known_session_id:
        well_known_flow = cls(cls.well_known_session_id, mode="rw", token=token)
        well_known_flows[cls.well_known_session_id.FlowName()] = well_known_flow

    return well_known_flows
Exemplo n.º 8
0
    def GetRendererForValueOrClass(cls, value, limit_lists=-1):
        """Returns renderer corresponding to a given value and rendering args."""

        if inspect.isclass(value):
            value_cls = value
        else:
            value_cls = value.__class__

        cache_key = "%s_%d" % (value_cls.__name__, limit_lists)
        try:
            renderer_cls = cls._renderers_cache[cache_key]
        except KeyError:
            candidates = []
            for candidate in itervalues(ApiValueRenderer.classes):
                if candidate.value_class:
                    candidate_class = candidate.value_class
                else:
                    continue

                if inspect.isclass(value):
                    if aff4.issubclass(value_cls, candidate_class):
                        candidates.append((candidate, candidate_class))
                else:
                    if isinstance(value, candidate_class):
                        candidates.append((candidate, candidate_class))

            if not candidates:
                raise RuntimeError("No renderer found for value %s." %
                                   value.__class__.__name__)

            candidates = sorted(candidates,
                                key=lambda candidate: len(candidate[1].mro()))
            renderer_cls = candidates[-1][0]
            cls._renderers_cache[cache_key] = renderer_cls

        return renderer_cls(limit_lists=limit_lists)
Exemplo n.º 9
0
    def CreateClientObject(self, vfs_fixture):
        """Make a new client object."""

        # First remove the old fixture just in case its still there.
        aff4.FACTORY.Delete(self.client_id, token=self.token)

        # Create the fixture at a fixed time.
        with test_lib.FakeTime(self.age):
            for path, (aff4_type, attributes) in vfs_fixture:
                path %= self.args

                aff4_object = aff4.FACTORY.Create(self.client_id.Add(path),
                                                  aff4_type,
                                                  mode="rw",
                                                  token=self.token)

                if data_store.RelationalDBWriteEnabled():
                    data_store.REL_DB.WriteClientMetadata(
                        self.client_id.Basename(), fleetspeak_enabled=False)

                    components = [
                        component for component in path.split("/") if component
                    ]
                    if components[0:2] == ["fs", "os"]:
                        path_info = rdf_objects.PathInfo()
                        path_info.path_type = rdf_objects.PathInfo.PathType.OS
                        path_info.components = components[2:]
                        if aff4_type in [
                                aff4_grr.VFSFile, aff4_grr.VFSMemoryFile
                        ]:
                            path_info.directory = False
                        elif aff4_type == aff4_standard.VFSDirectory:
                            path_info.directory = True
                        else:
                            raise ValueError("Incorrect AFF4 type: %s" %
                                             aff4_type)
                        data_store.REL_DB.WritePathInfos(
                            client_id=self.client_id.Basename(),
                            path_infos=[path_info])

                for attribute_name, value in iteritems(attributes):
                    attribute = aff4.Attribute.PREDICATES[attribute_name]
                    if isinstance(value, (str, unicode)):
                        # Interpolate the value
                        value %= self.args

                    # Is this supposed to be an RDFValue array?
                    if aff4.issubclass(attribute.attribute_type,
                                       rdf_protodict.RDFValueArray):
                        rdfvalue_object = attribute()
                        for item in value:
                            new_object = rdfvalue_object.rdf_type.FromTextFormat(
                                utils.SmartStr(item))
                            rdfvalue_object.Append(new_object)

                    # It is a text serialized protobuf.
                    elif aff4.issubclass(attribute.attribute_type,
                                         rdf_structs.RDFProtoStruct):
                        # Use the alternate constructor - we always write protobufs in
                        # textual form:
                        rdfvalue_object = attribute.attribute_type.FromTextFormat(
                            utils.SmartStr(value))

                    elif aff4.issubclass(attribute.attribute_type,
                                         rdfvalue.RDFInteger):
                        rdfvalue_object = attribute(int(value))
                    else:
                        rdfvalue_object = attribute(value)

                    # If we don't already have a pathspec, try and get one from the stat.
                    if aff4_object.Get(aff4_object.Schema.PATHSPEC) is None:
                        # If the attribute was a stat, it has a pathspec nested in it.
                        # We should add that pathspec as an attribute.
                        if attribute.attribute_type == rdf_client_fs.StatEntry:
                            stat_object = attribute.attribute_type.FromTextFormat(
                                utils.SmartStr(value))
                            if stat_object.pathspec:
                                pathspec_attribute = aff4.Attribute(
                                    "aff4:pathspec", rdf_paths.PathSpec,
                                    "The pathspec used to retrieve "
                                    "this object from the client.", "pathspec")
                                aff4_object.AddAttribute(
                                    pathspec_attribute, stat_object.pathspec)

                    if attribute in ["aff4:content", "aff4:content"]:
                        # For AFF4MemoryStreams we need to call Write() instead of
                        # directly setting the contents..
                        aff4_object.Write(rdfvalue_object.AsBytes())
                    else:
                        aff4_object.AddAttribute(attribute, rdfvalue_object)

                    if (isinstance(rdfvalue_object, rdf_client_fs.StatEntry)
                            and rdfvalue_object.pathspec.pathtype != "UNSET"):
                        if data_store.RelationalDBWriteEnabled():
                            client_id = self.client_id.Basename()
                            path_info = rdf_objects.PathInfo.FromStatEntry(
                                rdfvalue_object)
                            data_store.REL_DB.WritePathInfos(
                                client_id, [path_info])

                # Populate the KB from the client attributes.
                if aff4_type == aff4_grr.VFSGRRClient:
                    kb = rdf_client.KnowledgeBase()
                    artifact.SetCoreGRRKnowledgeBaseValues(kb, aff4_object)
                    aff4_object.Set(aff4_object.Schema.KNOWLEDGE_BASE, kb)

                # Make sure we do not actually close the object here - we only want to
                # sync back its attributes, not run any finalization code.
                aff4_object.Flush()
                if aff4_type == aff4_grr.VFSGRRClient:
                    index = client_index.CreateClientIndex(token=self.token)
                    index.AddClient(aff4_object)
Exemplo n.º 10
0
    def BuildTypeDescriptor(self, value_cls):
        result = ApiRDFValueDescriptor(
            name=value_cls.__name__,
            parents=[klass.__name__ for klass in value_cls.__mro__],
            doc=value_cls.__doc__ or "",
            kind="STRUCT")

        for field_desc in value_cls.type_infos:
            repeated = isinstance(field_desc, rdf_structs.ProtoList)
            if hasattr(field_desc, "delegate"):
                field_desc = field_desc.delegate

            field = ApiRDFValueFieldDescriptor(
                name=field_desc.name,
                index=field_desc.field_number,
                repeated=repeated,
                dynamic=isinstance(field_desc,
                                   rdf_structs.ProtoDynamicEmbedded))

            field_type = field_desc.type
            if field_type is not None:
                field.type = field_type.__name__

                if field_type.context_help_url:
                    # Class attribute context_help_url masks similarly named protobuf
                    # attribute. Using the Set method to set the right attribute.
                    field.Set("context_help_url", field_type.context_help_url)

            if field_type == rdf_structs.EnumNamedValue:
                for enum_label in sorted(field_desc.enum,
                                         key=field_desc.enum.get):
                    enum_value = field_desc.enum[enum_label]
                    labels = [
                        rdf_structs.SemanticDescriptor.Labels.reverse_enum[x]
                        for x in enum_value.labels or []
                    ]

                    field.allowed_values.append(
                        ApiRDFAllowedEnumValueDescriptor(
                            name=enum_label,
                            value=int(enum_value),
                            labels=labels,
                            doc=enum_value.description))

            if (field_desc.default is not None
                    and not aff4.issubclass(field_type, rdf_structs.RDFStruct)
                    and hasattr(field_desc, "GetDefault")):
                field.default = field.GetDefaultValueClass()(
                    field_desc.GetDefault())

            if field_desc.description:
                field.doc = field_desc.description

            if field_desc.friendly_name:
                field.friendly_name = field_desc.friendly_name

            if field_desc.labels:
                field.labels = [
                    rdf_structs.SemanticDescriptor.Labels.reverse_enum[x]
                    for x in field_desc.labels
                ]

            result.fields.append(field)

        for processor in self.descriptor_processors:
            result.fields = processor(self, result.fields)

        if getattr(value_cls, "union_field", None):
            result.union_field_name = value_cls.union_field

        try:
            result.default = value_cls()
        except Exception as e:  # pylint: disable=broad-except
            # TODO(user): Some RDFStruct classes can't be constructed using
            # default constructor (without arguments). Fix the code so that
            # we can either construct all the RDFStruct classes with default
            # constructors or know exactly which classes can't be constructed
            # with default constructors.
            logging.debug("Can't create default for struct %s: %s",
                          field_type.__name__, e)

        return result
Exemplo n.º 11
0
  def CreateClientObject(self, vfs_fixture):
    """Make a new client object."""

    # First remove the old fixture just in case its still there.
    aff4.FACTORY.Delete(self.client_id, token=self.token)

    # Create the fixture at a fixed time.
    with test_lib.FakeTime(self.age):
      for path, (aff4_type, attributes) in vfs_fixture:
        path %= self.args

        aff4_object = aff4.FACTORY.Create(
            self.client_id.Add(path), aff4_type, mode="rw", token=self.token)

        if data_store.RelationalDBWriteEnabled():
          data_store.REL_DB.WriteClientMetadata(
              self.client_id.Basename(), fleetspeak_enabled=False)

          components = [component for component in path.split("/") if component]
          if components[0:2] == ["fs", "os"]:
            path_info = rdf_objects.PathInfo()
            path_info.path_type = rdf_objects.PathInfo.PathType.OS
            path_info.components = components[2:]
            if aff4_type in [aff4_grr.VFSFile, aff4_grr.VFSMemoryFile]:
              path_info.directory = False
            elif aff4_type == aff4_standard.VFSDirectory:
              path_info.directory = True
            else:
              raise ValueError("Incorrect AFF4 type: %s" % aff4_type)
            data_store.REL_DB.WritePathInfos(
                client_id=self.client_id.Basename(), path_infos=[path_info])

        for attribute_name, value in iteritems(attributes):
          attribute = aff4.Attribute.PREDICATES[attribute_name]
          if isinstance(value, (str, unicode)):
            # Interpolate the value
            value %= self.args

          # Is this supposed to be an RDFValue array?
          if aff4.issubclass(attribute.attribute_type,
                             rdf_protodict.RDFValueArray):
            rdfvalue_object = attribute()
            for item in value:
              new_object = rdfvalue_object.rdf_type.FromTextFormat(
                  utils.SmartStr(item))
              rdfvalue_object.Append(new_object)

          # It is a text serialized protobuf.
          elif aff4.issubclass(attribute.attribute_type,
                               rdf_structs.RDFProtoStruct):
            # Use the alternate constructor - we always write protobufs in
            # textual form:
            rdfvalue_object = attribute.attribute_type.FromTextFormat(
                utils.SmartStr(value))

          elif aff4.issubclass(attribute.attribute_type, rdfvalue.RDFInteger):
            rdfvalue_object = attribute(int(value))
          else:
            rdfvalue_object = attribute(value)

          # If we don't already have a pathspec, try and get one from the stat.
          if aff4_object.Get(aff4_object.Schema.PATHSPEC) is None:
            # If the attribute was a stat, it has a pathspec nested in it.
            # We should add that pathspec as an attribute.
            if attribute.attribute_type == rdf_client.StatEntry:
              stat_object = attribute.attribute_type.FromTextFormat(
                  utils.SmartStr(value))
              if stat_object.pathspec:
                pathspec_attribute = aff4.Attribute(
                    "aff4:pathspec", rdf_paths.PathSpec,
                    "The pathspec used to retrieve "
                    "this object from the client.", "pathspec")
                aff4_object.AddAttribute(pathspec_attribute,
                                         stat_object.pathspec)

          if attribute in ["aff4:content", "aff4:content"]:
            # For AFF4MemoryStreams we need to call Write() instead of
            # directly setting the contents..
            aff4_object.Write(rdfvalue_object.AsBytes())
          else:
            aff4_object.AddAttribute(attribute, rdfvalue_object)

          if (isinstance(rdfvalue_object, rdf_client.StatEntry) and
              rdfvalue_object.pathspec.pathtype != "UNSET"):
            if data_store.RelationalDBWriteEnabled():
              client_id = self.client_id.Basename()
              path_info = rdf_objects.PathInfo.FromStatEntry(rdfvalue_object)
              data_store.REL_DB.WritePathInfos(client_id, [path_info])

        # Populate the KB from the client attributes.
        if aff4_type == aff4_grr.VFSGRRClient:
          kb = rdf_client.KnowledgeBase()
          artifact.SetCoreGRRKnowledgeBaseValues(kb, aff4_object)
          aff4_object.Set(aff4_object.Schema.KNOWLEDGE_BASE, kb)

        # Make sure we do not actually close the object here - we only want to
        # sync back its attributes, not run any finalization code.
        aff4_object.Flush()
        if aff4_type == aff4_grr.VFSGRRClient:
          index = client_index.CreateClientIndex(token=self.token)
          index.AddClient(aff4_object)