def ScheduleSystemCronFlows(token=None): """Schedule all the SystemCronFlows found.""" for name in config_lib.CONFIG["Cron.enabled_system_jobs"]: try: cls = flow.GRRFlow.classes[name] except KeyError: raise KeyError("No such flow: %s." % name) if not aff4.issubclass(cls, SystemCronFlow): raise ValueError( "Enabled system cron job name doesn't correspond to " "a flow inherited from SystemCronFlow: %s" % name) for name, cls in flow.GRRFlow.classes.items(): if aff4.issubclass(cls, SystemCronFlow): cron_args = CreateCronJobFlowArgs(periodicity=cls.frequency) cron_args.flow_runner_args.flow_name = name cron_args.lifetime = cls.lifetime cron_args.start_time = GetStartTime(cls) disabled = name not in config_lib.CONFIG[ "Cron.enabled_system_jobs"] CRON_MANAGER.ScheduleFlow(cron_args=cron_args, job_name=name, token=token, disabled=disabled)
def FindRendererForObject(rdf_obj): """Find the appropriate renderer for an RDFValue object.""" # Rebuild the cache if needed. if not semantic_renderer_cache: for cls in RDFValueRenderer.classes.values(): if aff4.issubclass(cls, RDFValueArrayRenderer): repeated_renderer_cache[cls.classname] = cls elif aff4.issubclass(cls, RDFValueRenderer): semantic_renderer_cache[cls.classname] = cls rdf_obj_classname = rdf_obj.__class__.__name__ # Try to find an RDFValueArray renderer for repeated types. This allows # renderers to be specified for repeated fields. if isinstance(rdf_obj, rdf_protodict.RDFValueArray): return repeated_renderer_cache.get(rdf_obj_classname, RDFValueArrayRenderer)(rdf_obj) if isinstance(rdf_obj, rdf_structs.RepeatedFieldHelper): rdf_obj_classname = rdf_obj.type_descriptor.type.__name__ return repeated_renderer_cache.get(rdf_obj_classname, RDFValueArrayRenderer)(rdf_obj) # If it is a semantic proto, we just use the RDFProtoRenderer. if isinstance(rdf_obj, rdf_structs.RDFProtoStruct): return semantic_renderer_cache.get(rdf_obj_classname, RDFProtoRenderer)(rdf_obj) # If it is a semantic value, we just use the RDFValueRenderer. if isinstance(rdf_obj, rdfvalue.RDFValue): return semantic_renderer_cache.get(rdf_obj_classname, RDFValueRenderer)(rdf_obj) elif isinstance(rdf_obj, dict): return DictRenderer(rdf_obj) # Default renderer. return RDFValueRenderer(rdf_obj)
def ScheduleSystemCronFlows(token=None): """Schedule all the SystemCronFlows found.""" for name in config_lib.CONFIG["Cron.enabled_system_jobs"]: try: cls = flow.GRRFlow.classes[name] except KeyError: raise KeyError("No such flow: %s." % name) if not aff4.issubclass(cls, SystemCronFlow): raise ValueError("Enabled system cron job name doesn't correspond to " "a flow inherited from SystemCronFlow: %s" % name) for name, cls in flow.GRRFlow.classes.items(): if aff4.issubclass(cls, SystemCronFlow): cron_args = CreateCronJobFlowArgs(periodicity=cls.frequency) cron_args.flow_runner_args.flow_name = name cron_args.lifetime = cls.lifetime cron_args.start_time = GetStartTime(cls) disabled = name not in config_lib.CONFIG["Cron.enabled_system_jobs"] CRON_MANAGER.ScheduleFlow(cron_args=cron_args, job_name=name, token=token, disabled=disabled)
def ScheduleSystemCronFlows(names=None, token=None): """Schedule all the SystemCronFlows found.""" if (config_lib.CONFIG["Cron.enabled_system_jobs"] and config_lib.CONFIG["Cron.disabled_system_jobs"]): raise RuntimeError( "Can't have both Cron.enabled_system_jobs and " "Cron.disabled_system_jobs specified in the config.") # TODO(user): remove references to Cron.enabled_system_jobs by the end # of Q1 2016. for name in config_lib.CONFIG["Cron.enabled_system_jobs"]: try: cls = flow.GRRFlow.classes[name] except KeyError: raise KeyError("No such flow: %s." % name) if not aff4.issubclass(cls, SystemCronFlow): raise ValueError( "Enabled system cron job name doesn't correspond to " "a flow inherited from SystemCronFlow: %s" % name) for name in config_lib.CONFIG["Cron.disabled_system_jobs"]: try: cls = flow.GRRFlow.classes[name] except KeyError: raise KeyError("No such flow: %s." % name) if not aff4.issubclass(cls, SystemCronFlow): raise ValueError( "Disabled system cron job name doesn't correspond to " "a flow inherited from SystemCronFlow: %s" % name) if names is None: names = flow.GRRFlow.classes.keys() for name in names: cls = flow.GRRFlow.classes[name] if aff4.issubclass(cls, SystemCronFlow): cron_args = CreateCronJobFlowArgs(periodicity=cls.frequency) cron_args.flow_runner_args.flow_name = name cron_args.lifetime = cls.lifetime cron_args.allow_overruns = cls.allow_overruns cron_args.start_time = GetStartTime(cls) if cls.disabled: disabled = True elif config_lib.CONFIG["Cron.enabled_system_jobs"]: disabled = name not in config_lib.CONFIG[ "Cron.enabled_system_jobs"] else: disabled = name in config_lib.CONFIG[ "Cron.disabled_system_jobs"] CRON_MANAGER.ScheduleFlow(cron_args=cron_args, job_name=name, token=token, disabled=disabled)
def ScheduleSystemCronFlows(names=None, token=None): """Schedule all the SystemCronFlows found.""" if (config_lib.CONFIG["Cron.enabled_system_jobs"] and config_lib.CONFIG["Cron.disabled_system_jobs"]): raise RuntimeError("Can't have both Cron.enabled_system_jobs and " "Cron.disabled_system_jobs specified in the config.") # TODO(user): remove references to Cron.enabled_system_jobs by the end # of Q1 2016. for name in config_lib.CONFIG["Cron.enabled_system_jobs"]: try: cls = flow.GRRFlow.classes[name] except KeyError: raise KeyError("No such flow: %s." % name) if not aff4.issubclass(cls, SystemCronFlow): raise ValueError("Enabled system cron job name doesn't correspond to " "a flow inherited from SystemCronFlow: %s" % name) for name in config_lib.CONFIG["Cron.disabled_system_jobs"]: try: cls = flow.GRRFlow.classes[name] except KeyError: raise KeyError("No such flow: %s." % name) if not aff4.issubclass(cls, SystemCronFlow): raise ValueError("Disabled system cron job name doesn't correspond to " "a flow inherited from SystemCronFlow: %s" % name) if names is None: names = flow.GRRFlow.classes.keys() for name in names: cls = flow.GRRFlow.classes[name] if aff4.issubclass(cls, SystemCronFlow): cron_args = CreateCronJobFlowArgs(periodicity=cls.frequency) cron_args.flow_runner_args.flow_name = name cron_args.lifetime = cls.lifetime cron_args.allow_overruns = cls.allow_overruns cron_args.start_time = GetStartTime(cls) if cls.disabled: disabled = True elif config_lib.CONFIG["Cron.enabled_system_jobs"]: disabled = name not in config_lib.CONFIG["Cron.enabled_system_jobs"] else: disabled = name in config_lib.CONFIG["Cron.disabled_system_jobs"] CRON_MANAGER.ScheduleFlow(cron_args=cron_args, job_name=name, token=token, disabled=disabled)
def StartHunt(cls, args=None, runner_args=None, **kwargs): """This class method creates new hunts.""" # Build the runner args from the keywords. if runner_args is None: runner_args = HuntRunnerArgs() cls.FilterArgsFromSemanticProtobuf(runner_args, kwargs) # Is the required flow a known flow? if (runner_args.hunt_name not in cls.classes or not aff4.issubclass(cls.classes[runner_args.hunt_name], GRRHunt)): raise RuntimeError("Unable to locate hunt %s" % runner_args.hunt_name) # Make a new hunt object and initialize its runner. hunt_obj = aff4.FACTORY.Create( None, cls.classes[runner_args.hunt_name], mode="w", token=runner_args.token) # Hunt is called using keyword args. We construct an args proto from the # kwargs.. if hunt_obj.args_type and args is None: args = hunt_obj.args_type() cls.FilterArgsFromSemanticProtobuf(args, kwargs) if hunt_obj.args_type and not isinstance(args, hunt_obj.args_type): raise RuntimeError("Hunt args must be instance of %s" % hunt_obj.args_type) if kwargs: raise type_info.UnknownArg("Unknown parameters to StartHunt: %s" % kwargs) # Store the hunt args in the state. hunt_obj.state.Register("args", args) # Hunts are always created in the paused state. The runner method Start # should be called to start them. hunt_obj.Set(hunt_obj.Schema.STATE("PAUSED")) runner = hunt_obj.CreateRunner(runner_args=runner_args) # Allow the hunt to do its own initialization. runner.RunStateMethod("Start") hunt_obj.Flush() try: flow_name = args.flow_runner_args.flow_name except AttributeError: flow_name = "" event = events_lib.AuditEvent( user=runner_args.token.username, action="HUNT_CREATED", urn=hunt_obj.urn, flow_name=flow_name, description=runner_args.description) events_lib.Events.PublishEvent("Audit", event, token=runner_args.token) return hunt_obj
def StartHunt(cls, args=None, runner_args=None, **kwargs): """This class method creates new hunts.""" # Build the runner args from the keywords. if runner_args is None: runner_args = HuntRunnerArgs() cls.FilterArgsFromSemanticProtobuf(runner_args, kwargs) # Is the required flow a known flow? if (runner_args.hunt_name not in cls.classes or not aff4.issubclass( cls.classes[runner_args.hunt_name], GRRHunt)): raise RuntimeError("Unable to locate hunt %s" % runner_args.hunt_name) # Make a new hunt object and initialize its runner. hunt_obj = aff4.FACTORY.Create(None, runner_args.hunt_name, mode="w", token=runner_args.token) # Hunt is called using keyword args. We construct an args proto from the # kwargs.. if hunt_obj.args_type and args is None: args = hunt_obj.args_type() cls.FilterArgsFromSemanticProtobuf(args, kwargs) if hunt_obj.args_type and not isinstance(args, hunt_obj.args_type): raise RuntimeError("Hunt args must be instance of %s" % hunt_obj.args_type) if kwargs: raise type_info.UnknownArg("Unknown parameters to StartHunt: %s" % kwargs) # Store the hunt args in the state. hunt_obj.state.Register("args", args) # Hunts are always created in the paused state. The runner method Start # should be called to start them. hunt_obj.Set(hunt_obj.Schema.STATE("PAUSED")) runner = hunt_obj.CreateRunner(runner_args=runner_args) # Allow the hunt to do its own initialization. runner.RunStateMethod("Start") hunt_obj.Flush() try: flow_name = args.flow_runner_args.flow_name except AttributeError: flow_name = "" event = flow.AuditEvent(user=runner_args.token.username, action="HUNT_CREATED", urn=hunt_obj.urn, flow_name=flow_name, description=runner_args.description) flow.Events.PublishEvent("Audit", event, token=runner_args.token) return hunt_obj
def CheckFlowCanBeStartedAsGlobal(flow_name): """Checks if flow can be started without a client id. Two kinds of flows can be started on clients by unprivileged users: 1) ACL_ENFORCED=False flows, because they're expected to do their own ACL checking and are often used by AdminUI to execute code with elevated privileges. 2) Flows inherited from GRRGlobalFlow, with a category. Having a category means that the flow will be accessible from the UI. Args: flow_name: Name of the flow to check access for. Returns: True if flow is externally accessible. Raises: access_control.UnauthorizedAccess: if flow is not externally accessible. """ flow_cls = flow.GRRFlow.GetPlugin(flow_name) if (not flow_cls.ACL_ENFORCED or aff4.issubclass(flow_cls, flow.GRRGlobalFlow) and flow_cls.category): return True else: raise access_control.UnauthorizedAccess( "Flow %s can't be started globally by non-suid users" % flow_name)
def CallFallback(self, artifact_name, request_data): classes = artifact.ArtifactFallbackCollector.classes.items() for clsname, fallback_class in classes: if not aff4.issubclass(fallback_class, artifact.ArtifactFallbackCollector): continue if artifact_name in fallback_class.artifacts: if artifact_name in self.state.called_fallbacks: self.Log("Already called fallback class %s for artifact: %s", clsname, artifact_name) else: self.Log("Calling fallback class %s for artifact: %s", clsname, artifact_name) self.CallFlow( clsname, request_data=request_data.ToDict(), artifact_name=artifact_name, next_state="ProcessCollected") # Make sure we only try this once self.state.called_fallbacks.add(artifact_name) return True return False
def CallFallback(self, artifact_name, request_data): classes = artifact.ArtifactFallbackCollector.classes.items() for clsname, fallback_class in classes: if not aff4.issubclass(fallback_class, artifact.ArtifactFallbackCollector): continue if artifact_name in fallback_class.artifacts: if artifact_name in self.state.called_fallbacks: self.Log( "Already called fallback class %s for artifact: %s", clsname, artifact_name) else: self.Log("Calling fallback class %s for artifact: %s", clsname, artifact_name) self.CallFlow(clsname, request_data=request_data.ToDict(), artifact_name=artifact_name, next_state="ProcessCollected") # Make sure we only try this once self.state.called_fallbacks.add(artifact_name) return True return False
def RenderAFF4Object(request): """Handler for the /api/aff4 requests.""" aff4_path = request.path.split("/", 3)[-1].strip("/") request.REQ = request.REQUEST token = BuildToken(request, 60) aff4_object = aff4.FACTORY.Open(aff4_path, token=token) try: renderer_cls = AFF4_RENDERERS_CACHE[aff4_object.__class__.__name__] except KeyError: candidates = [] for candidate in api_renderers.ApiRenderer.classes.values(): if candidate.aff4_type and aff4.issubclass( aff4_object.__class__, aff4.AFF4Object.classes[candidate.aff4_type]): candidates.append(candidate) if not candidates: raise RuntimeError("No renderer found for object %s." % aff4_object.__class__.__name__) candidates = sorted(candidates, key=lambda cls: len(cls.mro())) renderer_cls = candidates[-1] AFF4_RENDERERS_CACHE[aff4_object.__class__.__name__] = renderer_cls api_renderer = renderer_cls() rendered_data = api_renderer.RenderObject(aff4_object, request.REQ) response = http.HttpResponse(content_type="application/json") response.write(json.dumps(rendered_data)) return response
def Homepage(request): """Basic handler to render the index page.""" # We build a list of all js files to include by looking at the list # of renderers modules. JS files are always named in accordance with # renderers modules names. I.e. if there's a renderers package called # grr.gui.plugins.acl_manager, we expect a js files called acl_manager.js. renderers_js_files = set() for cls in renderers.Renderer.classes.values(): if aff4.issubclass(cls, renderers.Renderer) and cls.__module__: module_components = cls.__module__.split(".") # Only include files corresponding to renderers in "plugins" package. if module_components[-2] == "plugins": renderers_js_files.add(module_components[-1] + ".js") create_time = psutil.Process(os.getpid()).create_time() context = {"page_title": config_lib.CONFIG["AdminUI.page_title"], "heading": config_lib.CONFIG["AdminUI.heading"], "report_url": config_lib.CONFIG["AdminUI.report_url"], "help_url": config_lib.CONFIG["AdminUI.help_url"], "use_precompiled_js": config_lib.CONFIG[ "AdminUI.use_precompiled_js"], "renderers_js": renderers_js_files, "timestamp": create_time} return shortcuts.render_to_response( "base.html", context, context_instance=template.RequestContext(request))
def Homepage(request): """Basic handler to render the index page.""" # We build a list of all js files to include by looking at the list # of renderers modules. JS files are always named in accordance with # renderers modules names. I.e. if there's a renderers package called # grr.gui.plugins.acl_manager, we expect a js files called acl_manager.js. renderers_js_files = set() for cls in renderers.Renderer.classes.values(): if aff4.issubclass(cls, renderers.Renderer) and cls.__module__: module_components = cls.__module__.split(".") # Only include files corresponding to renderers in "plugins" package. if module_components[-2] == "plugins": renderers_js_files.add(module_components[-1] + ".js") create_time = psutil.Process(os.getpid()).create_time() context = { "page_title": config_lib.CONFIG["AdminUI.page_title"], "heading": config_lib.CONFIG["AdminUI.heading"], "report_url": config_lib.CONFIG["AdminUI.report_url"], "help_url": config_lib.CONFIG["AdminUI.help_url"], "use_precompiled_js": config_lib.CONFIG["AdminUI.use_precompiled_js"], "renderers_js": renderers_js_files, "timestamp": create_time } return shortcuts.render_to_response( "base.html", context, context_instance=template.RequestContext(request))
def _AddTest(self, cls, system, client_version): if aff4.issubclass(cls, base.AutomatedTest): if system not in cls.platforms: return if cls.client_min_version and client_version < cls.client_min_version: return if not cls.__name__.startswith("Abstract"): self.state.test_set.add(cls)
def GetStatsPaths(self, request): paths = [] labels = aff4_grr.GetAllClientLabels(request.token, include_catchall=True) for cls in self.classes.values(): if aff4.issubclass(cls, Report) and cls.category: paths.extend(InterpolatePaths(cls.category, labels).keys()) paths.sort() return paths
def GetStatsClasses(self): classes = [] for cls in self.classes.values(): if aff4.issubclass(cls, Report) and cls.category: classes.append(cls.category) classes.sort() return classes
def GetAllWellKnownFlows(cls, token=None): """Get instances of all well known flows.""" well_known_flows = {} for cls in GRRFlow.classes.values(): if aff4.issubclass(cls, WellKnownFlow) and cls.well_known_session_id: well_known_flow = cls(cls.well_known_session_id, mode="rw", token=token) well_known_flows[cls.well_known_session_id.FlowName()] = well_known_flow return well_known_flows
def _AddTest(self, test_name, system, client_version): # We need to exclude classes that aren't in automatedtest, but .classes is # shared between all classes in the inheritance structure by design. cls = base.AutomatedTest.classes[test_name] if aff4.issubclass(cls, base.AutomatedTest): if system not in cls.platforms: return if cls.client_min_version and client_version < cls.client_min_version: return if not cls.__name__.startswith("Abstract"): self.state.test_set.add(test_name)
def FindRendererForObject(rdf_obj): """Find the appropriate renderer for an RDFValue object.""" # Rebuild the cache if needed. if not semantic_renderer_cache: for cls in RDFValueRenderer.classes.values(): if aff4.issubclass(cls, RDFValueArrayRenderer): repeated_renderer_cache[cls.classname] = cls elif aff4.issubclass(cls, RDFValueRenderer): semantic_renderer_cache[cls.classname] = cls rdf_obj_classname = rdf_obj.__class__.__name__ # Try to find an RDFValueArray renderer for repeated types. This allows # renderers to be specified for repeated fields. if isinstance(rdf_obj, rdfvalue.RDFValueArray): return repeated_renderer_cache.get(rdf_obj_classname, RDFValueArrayRenderer)(rdf_obj) if isinstance(rdf_obj, structs.RepeatedFieldHelper): rdf_obj_classname = rdf_obj.type_descriptor.type.__name__ return repeated_renderer_cache.get(rdf_obj_classname, RDFValueArrayRenderer)(rdf_obj) # If it is a semantic proto, we just use the RDFProtoRenderer. if isinstance(rdf_obj, structs.RDFProtoStruct): return semantic_renderer_cache.get(rdf_obj_classname, RDFProtoRenderer)(rdf_obj) # If it is a semantic value, we just use the RDFValueRenderer. if isinstance(rdf_obj, rdfvalue.RDFValue): return semantic_renderer_cache.get(rdf_obj_classname, RDFValueRenderer)(rdf_obj) elif isinstance(rdf_obj, dict): return DictRenderer(rdf_obj) # Default renderer. return RDFValueRenderer(rdf_obj)
def Render(self, args, token=None): _ = token if self.args_type: rdfvalue_class = rdfvalue.RDFValue.classes[args.type] return self.RenderType(rdfvalue_class) else: results = {} for cls in rdfvalue.RDFValue.classes.values(): if aff4.issubclass(cls, rdfvalue.RDFValue): results[cls.__name__] = self.RenderType(cls) return results
def RunTests(client_id=None, platform=None, testname=None, token=None, local_worker=False): runner = unittest.TextTestRunner() for cls in ClientTestBase.classes.values(): if testname is not None and testname != cls.__name__: continue if not aff4.issubclass(cls, ClientTestBase): continue if platform in cls.platforms: print "Running %s." % cls.__name__ runner.run(cls(client_id=client_id, platform=platform, token=token, local_worker=local_worker))
def StartHunt(cls, args=None, runner_args=None, **kwargs): """This class method creates new hunts.""" # Build the runner args from the keywords. if runner_args is None: runner_args = HuntRunnerArgs() cls._FilterArgsFromSemanticProtobuf(runner_args, kwargs) # Is the required flow a known flow? if (runner_args.hunt_name not in cls.classes and not aff4.issubclass( GRRHunt, cls.classes[runner_args.hunt_name])): raise RuntimeError("Unable to locate hunt %s" % runner_args.hunt_name) # Make a new hunt object and initialize its runner. hunt_obj = aff4.FACTORY.Create(None, runner_args.hunt_name, mode="w", token=runner_args.token) # Hunt is called using keyword args. We construct an args proto from the # kwargs.. if hunt_obj.args_type and args is None: args = hunt_obj.args_type() cls._FilterArgsFromSemanticProtobuf(args, kwargs) if hunt_obj.args_type and not isinstance(args, hunt_obj.args_type): raise RuntimeError("Hunt args must be instance of %s" % hunt_obj.args_type) if kwargs: raise type_info.UnknownArg("Unknown parameters to StartHunt: %s" % kwargs) # Store the hunt args in the state. hunt_obj.state.Register("args", args) # Hunts are always created in the paused state. The runner method Start # should be called to start them. hunt_obj.Set(hunt_obj.Schema.STATE("PAUSED")) with hunt_obj.CreateRunner(runner_args=runner_args) as runner: # Allow the hunt to do its own initialization. runner.RunStateMethod("Start") hunt_obj.Flush() return hunt_obj
def Layout(self, request, response): """Render the form for creating the flow args.""" self.flow_name = os.path.basename(request.REQ.get("flow_path", "")) self.flow_cls = flow.GRRFlow.classes.get(self.flow_name) if aff4.issubclass(self.flow_cls, flow.GRRFlow): self.flow_found = True self.form = forms.SemanticProtoFormRenderer( self.flow_cls.GetDefaultArgs(token=request.token), prefix="args").RawHTML(request) self.runner_form = forms.SemanticProtoFormRenderer( flow.FlowRunnerArgs(flow_name=self.flow_name), prefix="runner").RawHTML(request) return super(SemanticProtoFlowForm, self).Layout(request, response)
def RunTests(client_id=None, platform=None, testname=None, token=None, local_worker=False): runner = unittest.TextTestRunner() for cls in ClientTestBase.classes.values(): if testname is not None and testname != cls.__name__: continue if not aff4.issubclass(cls, ClientTestBase): continue if platform in cls.platforms: print "Running %s." % cls.__name__ try: runner.run(cls(client_id=client_id, platform=platform, token=token, local_worker=local_worker)) except Exception: # pylint: disable=broad-except logging.exception("Failed to run test %s", cls)
def GetTypeDescriptorRenderer(type_descriptor): """Return a TypeDescriptorFormRenderer responsible for the type_descriptor.""" # Cache a mapping between type descriptors and their renderers for speed. if not semantic_renderer_cache: # Rebuild the cache on first access. for renderer_cls in TypeDescriptorFormRenderer.classes.values(): # A renderer can specify that it works on a type. This is used for nested # protobuf. delegate = getattr(renderer_cls, "type", None) # Or a generic type descriptor (i.e. all items of this type). if delegate is None: delegate = getattr(renderer_cls, "type_descriptor", None) if delegate: # Repeated form renderers go in their own cache. if aff4.issubclass(renderer_cls, RepeatedFieldFormRenderer): repeated_renderer_cache[delegate] = renderer_cls else: semantic_renderer_cache[delegate] = renderer_cls # Try to find a renderer for this type descriptor's type: if isinstance(type_descriptor, type_info.ProtoList): # Special handling for repeated fields - must read from # repeated_renderer_cache. delegate_type = getattr(type_descriptor.delegate, "type", None) cache = repeated_renderer_cache default = RepeatedFieldFormRenderer else: delegate_type = getattr(type_descriptor, "type", None) cache = semantic_renderer_cache default = StringTypeFormRenderer result = cache.get(delegate_type) # Try to find a handler for all fields of this type. if result is None: result = cache.get(type_descriptor.__class__) # Fallback in case we have no handler. if result is None: result = default return result
def RenderObject(obj, request=None): """Handler for the /api/aff4 requests.""" if request is None: request = {} if isinstance(obj, aff4.AFF4Object): is_aff4 = True key = "aff4." + obj.__class__.__name__ elif isinstance(obj, rdfvalue.RDFValue): is_aff4 = False key = "rdfvalue." + obj.__class__.__name__ else: raise ValueError("Can't render object that's neither AFF4Object nor " "RDFValue: %s." % utils.SmartStr(obj)) try: renderer_cls = RENDERERS_CACHE[key] except KeyError: candidates = [] for candidate in ApiObjectRenderer.classes.values(): if is_aff4 and candidate.aff4_type: candidate_class = aff4.AFF4Object.classes[candidate.aff4_type] elif candidate.rdfvalue_type: candidate_class = rdfvalue.RDFValue.classes[ candidate.rdfvalue_type] else: continue if aff4.issubclass(obj.__class__, candidate_class): candidates.append((candidate, candidate_class)) if not candidates: raise RuntimeError("No renderer found for object %s." % obj.__class__.__name__) candidates = sorted(candidates, key=lambda candidate: len(candidate[1].mro())) renderer_cls = candidates[-1][0] RENDERERS_CACHE[key] = renderer_cls api_renderer = renderer_cls() rendered_data = api_renderer.RenderObject(obj, request) return rendered_data
def Homepage(request): """Basic handler to render the index page.""" # We build a list of all js files to include by looking at the list # of renderers modules. JS files are always named in accordance with # renderers modules names. I.e. if there's a renderers package called # grr.gui.plugins.acl_manager, we expect a js files called acl_manager.js. renderers_js_files = set() for cls in renderers.Renderer.classes.values(): if aff4.issubclass(cls, renderers.Renderer) and cls.__module__: renderers_js_files.add(cls.__module__.split(".")[-1] + ".js") context = {"page_title": config_lib.CONFIG["AdminUI.page_title"], "heading": config_lib.CONFIG["AdminUI.heading"], "report_url": config_lib.CONFIG["AdminUI.report_url"], "help_url": config_lib.CONFIG["AdminUI.help_url"], "renderers_js": renderers_js_files} return shortcuts.render_to_response( "base.html", context, context_instance=template.RequestContext(request))
def Layout(self, request, response): """Render the form for creating the flow args.""" self.flow_name = self._GetFlowName(request) self.flow_cls = flow.GRRFlow.classes.get(self.flow_name) if aff4.issubclass(self.flow_cls, flow.GRRFlow): self.flow_found = True self.form = forms.SemanticProtoFormRenderer( self.flow_cls.GetDefaultArgs(token=request.token), prefix="args" ).RawHTML(request) self.runner_form = forms.SemanticProtoFormRenderer( flow_runner.FlowRunnerArgs(flow_name=self.flow_name), prefix="runner" ).RawHTML(request) response = super(SemanticProtoFlowForm, self).Layout(request, response) return self.CallJavascript(response, "SemanticProtoFlowForm.Layout", renderer=self.__class__.__name__)
def StartHunt(cls, args=None, runner_args=None, **kwargs): """This class method creates new hunts.""" # Build the runner args from the keywords. if runner_args is None: runner_args = HuntRunnerArgs() cls._FilterArgsFromSemanticProtobuf(runner_args, kwargs) # Is the required flow a known flow? if (runner_args.hunt_name not in cls.classes and not aff4.issubclass(GRRHunt, cls.classes[runner_args.hunt_name])): raise RuntimeError("Unable to locate hunt %s" % runner_args.hunt_name) # Make a new hunt object and initialize its runner. hunt_obj = aff4.FACTORY.Create(None, runner_args.hunt_name, mode="w", token=runner_args.token) # Hunt is called using keyword args. We construct an args proto from the # kwargs.. if hunt_obj.args_type and args is None: args = hunt_obj.args_type() cls._FilterArgsFromSemanticProtobuf(args, kwargs) if hunt_obj.args_type and not isinstance(args, hunt_obj.args_type): raise RuntimeError("Hunt args must be instance of %s" % hunt_obj.args_type) if kwargs: raise type_info.UnknownArg("Unknown parameters to StartHunt: %s" % kwargs) # Store the hunt args in the state. hunt_obj.state.Register("args", args) # Hunts are always created in the paused state. The runner method Start # should be called to start them. hunt_obj.Set(hunt_obj.Schema.STATE("PAUSED")) with hunt_obj.CreateRunner(runner_args=runner_args) as runner: # Allow the hunt to do its own initialization. runner.RunStateMethod("Start") hunt_obj.Flush() return hunt_obj
def RenderAFF4Object(obj, args=None): """Renders given AFF4 object into JSON-friendly data structure.""" args = args or [] cache_key = obj.__class__.__name__ try: candidates = RENDERERS_CACHE[cache_key] except KeyError: candidates = [] for candidate in ApiAFF4ObjectRendererBase.classes.values(): if candidate.aff4_type: candidate_class = aff4.AFF4Object.classes[candidate.aff4_type] else: continue if aff4.issubclass(obj.__class__, candidate_class): candidates.append(candidate) if not candidates: raise RuntimeError("No renderer found for object %s." % obj.__class__.__name__) # Ensure that the renderers order is stable. candidates = sorted(candidates, key=lambda cls: cls.__name__) RENDERERS_CACHE[cache_key] = candidates result = {} for candidate in candidates: api_renderer_args = None for arg in args: if candidate.args_type and isinstance(arg, candidate.args_type): api_renderer_args = arg if api_renderer_args is None and candidate.args_type is not None: api_renderer_args = candidate.args_type() api_renderer = candidate() renderer_output = api_renderer.RenderObject(obj, api_renderer_args) for k, v in renderer_output.items(): result[k] = v return result
def RenderObject(obj, request=None): """Handler for the /api/aff4 requests.""" if request is None: request = {} if isinstance(obj, aff4.AFF4Object): is_aff4 = True key = "aff4." + obj.__class__.__name__ elif isinstance(obj, rdfvalue.RDFValue): is_aff4 = False key = "rdfvalue." + obj.__class__.__name__ else: raise ValueError("Can't render object that's neither AFF4Object nor " "RDFValue: %s." % utils.SmartStr(obj)) try: renderer_cls = RENDERERS_CACHE[key] except KeyError: candidates = [] for candidate in ApiObjectRenderer.classes.values(): if is_aff4 and candidate.aff4_type: candidate_class = aff4.AFF4Object.classes[candidate.aff4_type] elif candidate.rdfvalue_type: candidate_class = rdfvalue.RDFValue.classes[candidate.rdfvalue_type] else: continue if aff4.issubclass(obj.__class__, candidate_class): candidates.append((candidate, candidate_class)) if not candidates: raise RuntimeError("No renderer found for object %s." % obj.__class__.__name__) candidates = sorted(candidates, key=lambda candidate: len(candidate[1].mro())) renderer_cls = candidates[-1][0] RENDERERS_CACHE[key] = renderer_cls api_renderer = renderer_cls() rendered_data = api_renderer.RenderObject(obj, request) return rendered_data
def Layout(self, request, response): """Render the form for creating the flow args.""" self.flow_name = self._GetFlowName(request) self.flow_cls = flow.GRRFlow.classes.get(self.flow_name) if aff4.issubclass(self.flow_cls, flow.GRRFlow): self.flow_found = True self.form = forms.SemanticProtoFormRenderer( self.flow_cls.GetDefaultArgs(token=request.token), prefix="args").RawHTML(request) self.runner_form = forms.SemanticProtoFormRenderer( flow_runner.FlowRunnerArgs(flow_name=self.flow_name), prefix="runner").RawHTML(request) response = super(SemanticProtoFlowForm, self).Layout(request, response) return self.CallJavascript(response, "SemanticProtoFlowForm.Layout", renderer=self.__class__.__name__)
def CheckFlowCanBeStartedAsGlobal(flow_name): """Checks if flow can be started without a client id. Single kind of flows can be started on clients by unprivileged users: Flows inherited from GRRGlobalFlow, with a category. Having a category means that the flow will be accessible from the UI. Args: flow_name: Name of the flow to check access for. Returns: True if flow is externally accessible. Raises: access_control.UnauthorizedAccess: if flow is not externally accessible. """ flow_cls = flow.GRRFlow.GetPlugin(flow_name) if aff4.issubclass(flow_cls, flow.GRRGlobalFlow) and flow_cls.category: return True else: raise access_control.UnauthorizedAccess( "Flow %s can't be started globally by non-suid users" % flow_name)
def Layout(self, request, response): """Update the progress bar based on the progress reported.""" self.flow_name = request.REQ.get("flow_path", "").split("/")[-1] try: flow_class = flow.GRRFlow.classes[self.flow_name] if not aff4.issubclass(flow_class, flow.GRRFlow): return response except KeyError: return response self.states = [] # Fill in information about each state for state_method in flow_class.__dict__.values(): try: next_states = state_method.next_states # Only show the first line of the doc string. try: func_doc = state_method.func_doc.split("\n")[0].strip() except AttributeError: func_doc = "" self.states.append( (state_method.func_name, func_doc, ", ".join(next_states))) except AttributeError: pass # Now fill in information about each arg to this flow. prototypes = [] for type_descriptor in flow_class.args_type.type_infos: if not type_descriptor.hidden: prototypes.append("%s" % (type_descriptor.name)) self.prototype = "%s(%s)" % (flow_class.__name__, ", ".join(prototypes)) self.flow_doc = flow_class.__doc__ return super(FlowInformation, self).Layout(request, response)
def Layout(self, request, response): """Update the progress bar based on the progress reported.""" self.flow_name = request.REQ.get("flow_path", "").split("/")[-1] try: flow_class = flow.GRRFlow.classes[self.flow_name] if not aff4.issubclass(flow_class, flow.GRRFlow): return response except KeyError: return response self.states = [] # Fill in information about each state for state_method in flow_class.__dict__.values(): try: next_states = state_method.next_states # Only show the first line of the doc string. try: func_doc = state_method.func_doc.split("\n")[0].strip() except AttributeError: func_doc = "" self.states.append((state_method.func_name, func_doc, ", ".join(next_states))) except AttributeError: pass # Now fill in information about each arg to this flow. prototypes = [] for type_descriptor in flow_class.args_type.type_infos: if not type_descriptor.hidden: prototypes.append("%s" % (type_descriptor.name)) self.prototype = "%s(%s)" % (flow_class.__name__, ", ".join(prototypes)) self.flow_doc = flow_class.__doc__ return super(FlowInformation, self).Layout(request, response)
def RunTests(client_id=None, platform=None, testname=None, token=None, local_worker=False): runner = unittest.TextTestRunner() for cls in ClientTestBase.classes.values(): if testname is not None and testname != cls.__name__: continue if not aff4.issubclass(cls, ClientTestBase): continue if platform in cls.platforms: print "Running %s." % cls.__name__ try: runner.run( cls(client_id=client_id, platform=platform, token=token, local_worker=local_worker)) except Exception: # pylint: disable=broad-except logging.exception("Failed to run test %s", cls)
def GetRendererForValueOrClass(cls, value, limit_lists=-1): """Returns renderer corresponding to a given value and rendering args.""" if inspect.isclass(value): value_cls = value else: value_cls = value.__class__ cache_key = "%s_%d" % (value_cls.__name__, limit_lists) try: renderer_cls = cls._renderers_cache[cache_key] except KeyError: candidates = [] for candidate in ApiValueRenderer.classes.values(): if candidate.value_class: candidate_class = candidate.value_class else: continue if inspect.isclass(value): if aff4.issubclass(value_cls, candidate_class): candidates.append((candidate, candidate_class)) else: if isinstance(value, candidate_class): candidates.append((candidate, candidate_class)) if not candidates: raise RuntimeError("No renderer found for value %s." % value.__class__.__name__) candidates = sorted(candidates, key=lambda candidate: len(candidate[1].mro())) renderer_cls = candidates[-1][0] cls._renderers_cache[cache_key] = renderer_cls return renderer_cls(limit_lists=limit_lists)
def GetRendererForValueOrClass(cls, value, limit_lists=-1): """Returns renderer corresponding to a given value and rendering args.""" if inspect.isclass(value): value_cls = value else: value_cls = value.__class__ cache_key = "%s_%d" % (value_cls.__name__, limit_lists) try: renderer_cls = cls._renderers_cache[cache_key] except KeyError: candidates = [] for candidate in ApiValueRenderer.classes.values(): if candidate.value_class: candidate_class = candidate.value_class else: continue if inspect.isclass(value): if aff4.issubclass(value_cls, candidate_class): candidates.append((candidate, candidate_class)) else: if isinstance(value, candidate_class): candidates.append((candidate, candidate_class)) if not candidates: raise RuntimeError( "No renderer found for value %s." % value.__class__.__name__) candidates = sorted( candidates, key=lambda candidate: len(candidate[1].mro())) renderer_cls = candidates[-1][0] cls._renderers_cache[cache_key] = renderer_cls return renderer_cls(limit_lists=limit_lists)
def RunEndToEndTests(): runner = unittest.TextTestRunner() # We are running a test so let the config system know that. config.CONFIG.AddContext(contexts.TEST_CONTEXT, "Context applied when we run tests.") server_startup.Init() token = access_control.ACLToken( username="******", reason="Running end to end client tests.") # We need this for the launchbinary test with aff4.FACTORY.Create( "aff4:/users/GRREndToEndTest", aff4_users.GRRUser, mode="rw", token=token) as test_user: test_user.AddLabels("admin") client_id_set = base.GetClientTestTargets( client_ids=flags.FLAGS.client_ids, hostnames=flags.FLAGS.hostnames, checkin_duration_threshold="1h", token=token) for cls in base.ClientTestBase.classes.values(): for p in cls.platforms: if p not in set(["Linux", "Darwin", "Windows"]): raise ValueError("Unsupported platform: %s in class %s" % (p, cls.__name__)) if not client_id_set: print("No clients to test on. Define Test.end_to_end_client* config " "options, or pass them as parameters.") results_by_client = {} for client in aff4.FACTORY.MultiOpen(client_id_set, token=token): client_summary = client.GetSummary() if hasattr(client_summary, "system_info"): sysinfo = client_summary.system_info else: raise RuntimeError("Unknown system type, likely waiting on interrogate" " to complete.") results = {} results_by_client[client.urn] = results for cls in base.ClientTestBase.classes.values(): if flags.FLAGS.testnames and (cls.__name__ not in flags.FLAGS.testnames): continue if not aff4.issubclass(cls, base.ClientTestBase): continue if cls.__name__.startswith("Abstract"): continue if sysinfo.system in cls.platforms: print "Running %s on %s (%s: %s, %s, %s)" % (cls.__name__, client_summary.client_id, sysinfo.fqdn, sysinfo.system, sysinfo.version, sysinfo.machine) try: # Mixin the unittest framework so we can use the test runner to run # the test and get nice output. We don't want to depend on unitttest # code in the tests themselves. testcase = cls( client_id=client_summary.client_id, platform=sysinfo.system, token=token, local_client=flags.FLAGS.local_client, local_worker=flags.FLAGS.local_worker) results[cls.__name__] = runner.run(testcase) except Exception: # pylint: disable=broad-except logging.exception("Failed to run test %s", cls) # Print a little summary. for client, results in results_by_client.iteritems(): print "Results for %s:" % client for testcase, result in sorted(results.items()): res = "[ OK ]" if result.errors or result.failures: res = "[ FAIL ]" print "%45s: %s" % (testcase, res)
def RenderType(self, cls): if aff4.issubclass(cls, rdfvalue.RDFStruct): return self.RenderRDFStruct(cls) else: return self.RenderPrimitiveRDFValue(cls)
#!/usr/bin/env python # Copyright 2010 Google Inc. All Rights Reserved. """Hunts and hunt implementations.""" # pylint: disable=unused-import # These imports populate the GRRHunt registry from grr.lib import aff4 from grr.lib.hunts import implementation from grr.lib.hunts import output_plugins from grr.lib.hunts import results from grr.lib.hunts import standard # Add shortcuts to hunts into this module. for name, cls in implementation.GRRHunt.classes.items(): if aff4.issubclass(cls, implementation.GRRHunt): globals()[name] = cls
def RunEndToEndTests(): runner = unittest.TextTestRunner() # We are running a test so let the config system know that. config_lib.CONFIG.AddContext("Test Context", "Context applied when we run tests.") startup.Init() token = access_control.ACLToken(username="******", reason="Running end to end client tests.") client_id_set = base.GetClientTestTargets( client_ids=flags.FLAGS.client_ids, hostnames=flags.FLAGS.hostnames, checkin_duration_threshold="1h") for cls in base.ClientTestBase.classes.values(): for p in cls.platforms: if p not in set(["Linux", "Darwin", "Windows"]): raise ValueError("Unsupported platform: %s in class %s" % (p, cls.__name__)) if not client_id_set: print( "No clients to test on. Define Test.end_to_end_client* config " "options, or pass them as parameters.") for client in aff4.FACTORY.MultiOpen(client_id_set, token=token): client = client.Get(client.SchemaCls.SUMMARY) if hasattr(client, "system_info"): sysinfo = client.system_info else: raise RuntimeError( "Unknown system type, likely waiting on interrogate" " to complete.") for cls in base.ClientTestBase.classes.values(): if flags.FLAGS.testnames and (cls.__name__ not in flags.FLAGS.testnames): continue if not aff4.issubclass(cls, base.ClientTestBase): continue # Fix the call method so we can use the test runner. See doco in # base.ClientTestBase def _RealCall(testcase, *args, **kwds): return testcase.run(*args, **kwds) cls.__call__ = _RealCall if sysinfo.system in cls.platforms: print "Running %s on %s (%s: %s, %s, %s)" % ( cls.__name__, client.client_id, sysinfo.fqdn, sysinfo.system, sysinfo.version, sysinfo.machine) try: # Mixin the unittest framework so we can use the test runner to run # the test and get nice output. We don't want to depend on unitttest # code in the tests themselves. testcase = cls(client_id=client.client_id, platform=sysinfo.system, token=token, local_client=flags.FLAGS.local_client, local_worker=flags.FLAGS.local_worker) runner.run(testcase) except Exception: # pylint: disable=broad-except logging.exception("Failed to run test %s", cls)
def Layout(self, request, response): """Manage content pane depending on passed in query parameter.""" self.reason = request.REQ.get("reason", "") if "/" in self.reason and not self.reason.startswith("http"): self.reason = "http://%s" % self.reason self.host_advanced_headings = [] self.host_headings = [] self.general_headings = datastructures.SortedDict([ ("General", ("Management", [], [])), ("Configuration", ("Configuration", [], [])) ]) # Introspect all the categories for cls in self.classes.values(): try: if not aff4.issubclass(cls, renderers.Renderer): continue cls.CheckAccess(request) except access_control.UnauthorizedAccess: continue for behaviour in self.general_headings: if behaviour in cls.behaviours: self.general_headings[behaviour][1].append((cls, cls.__name__)) if behaviour + "Advanced" in cls.behaviours: self.general_headings[behaviour][2].append((cls, cls.__name__)) if "Host" in cls.behaviours: self.host_headings.append((cls, cls.__name__)) if "HostAdvanced" in cls.behaviours: self.host_advanced_headings.append((cls, cls.__name__)) # Sort the output so they are in order. for heading in self.general_headings: # pylint: disable=g-long-lambda lkey = lambda x: (getattr(x[0], "order", 10), getattr(x[0], "description", "")) self.general_headings[heading][1].sort(key=lkey) self.host_headings.sort(key=lambda x: getattr(x[0], "order", 10)) self.hosts = [] self.unauthorized = False self.client_id = request.REQ.get("client_id") if self.client_id: client = aff4.FACTORY.Open(self.client_id, token=request.token) self.hosts.append((self.client_id, client.Get(client.Schema.HOSTNAME))) try: # Also check for proper access. aff4.FACTORY.Open(client.urn.Add("acl_check"), token=request.token) except access_control.UnauthorizedAccess as e: self.unauthorized = True self.unauthorized_exception = e super(Navigator, self).Layout(request, response) if self.unauthorized: renderers.Renderer.GetPlugin("UnauthorizedRenderer")().Layout( request, response, exception=e) return self.CallJavascript(response, "Navigator.Layout", renderer=self.__class__.__name__, client_id=self.client_id, poll_time=self.poll_time)
def BuildTypeDescriptor(self, value_cls): result = ApiRDFValueDescriptor( name=value_cls.__name__, parents=[klass.__name__ for klass in value_cls.__mro__], doc=value_cls.__doc__ or "", kind="STRUCT") for field_desc in value_cls.type_infos: repeated = isinstance(field_desc, rdf_structs.ProtoList) if hasattr(field_desc, "delegate"): field_desc = field_desc.delegate field = ApiRDFValueFieldDescriptor( name=field_desc.name, index=field_desc.field_number, repeated=repeated, dynamic=isinstance(field_desc, rdf_structs.ProtoDynamicEmbedded)) field_type = field_desc.type if field_type is not None: field.type = field_type.__name__ if field_type.context_help_url: field.context_help_url = field_type.context_help_url if field_type == rdf_structs.EnumNamedValue: for enum_label in sorted(field_desc.enum, key=field_desc.enum.get): enum_value = field_desc.enum[enum_label] labels = [ rdf_structs.SemanticDescriptor.Labels.reverse_enum[x] for x in enum_value.labels or [] ] field.allowed_values.append( ApiRDFAllowedEnumValueDescriptor( name=enum_label, value=int(enum_value), labels=labels, doc=enum_value.description)) if (field_desc.default is not None and not aff4.issubclass(field_type, rdf_structs.RDFStruct) and hasattr(field_desc, "GetDefault")): field.default = field.GetDefaultValueClass()(field_desc.GetDefault()) if field_desc.description: field.doc = field_desc.description if field_desc.friendly_name: field.friendly_name = field_desc.friendly_name if field_desc.labels: field.labels = [ rdf_structs.SemanticDescriptor.Labels.reverse_enum[x] for x in field_desc.labels ] result.fields.append(field) for processor in self.descriptor_processors: result.fields = processor(self, result.fields) if getattr(value_cls, "union_field", None): result.union_field_name = value_cls.union_field try: result.default = value_cls() except Exception as e: # pylint: disable=broad-except # TODO(user): Some RDFStruct classes can't be constructed using # default constructor (without arguments). Fix the code so that # we can either construct all the RDFStruct classes with default # constructors or know exactly which classes can't be constructed # with default constructors. logging.debug("Can't create default for struct %s: %s", field_type.__name__, e) return result
def RendererForRDFValue(cls, rdfvalue_cls_name): """Returns the class of the RDFValueRenderer which renders rdfvalue_cls.""" for candidate in cls.classes.values(): if (aff4.issubclass(candidate, RDFValueRenderer) and candidate.classname == rdfvalue_cls_name): return candidate
#!/usr/bin/env python """Hunts and hunt implementations.""" # pylint: disable=unused-import # These imports populate the GRRHunt registry from grr.lib import aff4 from grr.lib.hunts import implementation from grr.lib.hunts import output_plugins from grr.lib.hunts import process_results from grr.lib.hunts import results from grr.lib.hunts import standard # Add shortcuts to hunts into this module. for name, cls in implementation.GRRHunt.classes.items(): if aff4.issubclass(cls, implementation.GRRHunt): globals()[name] = cls
def Layout(self, request, response): """Manage content pane depending on passed in query parameter.""" self.reason = request.REQ.get("reason", "") if "/" in self.reason and not self.reason.startswith("http"): self.reason = "http://%s" % self.reason self.host_advanced_headings = [] self.host_headings = [] self.general_headings = datastructures.SortedDict([ ("General", ("Management", [], [])), ("Configuration", ("Configuration", [], [])) ]) # Introspect all the categories for cls in self.classes.values(): try: if not aff4.issubclass(cls, renderers.Renderer): continue cls.CheckAccess(request) except access_control.UnauthorizedAccess: continue for behaviour in self.general_headings: if behaviour in cls.behaviours: self.general_headings[behaviour][1].append( (cls, cls.__name__)) if behaviour + "Advanced" in cls.behaviours: self.general_headings[behaviour][2].append( (cls, cls.__name__)) if "Host" in cls.behaviours: self.host_headings.append((cls, cls.__name__)) if "HostAdvanced" in cls.behaviours: self.host_advanced_headings.append((cls, cls.__name__)) # Sort the output so they are in order. for heading in self.general_headings: # pylint: disable=g-long-lambda lkey = lambda x: (getattr(x[0], "order", 10), getattr(x[0], "description", "")) self.general_headings[heading][1].sort(key=lkey) self.host_headings.sort(key=lambda x: getattr(x[0], "order", 10)) self.hosts = [] self.unauthorized = False self.client_id = request.REQ.get("client_id") if self.client_id: client = aff4.FACTORY.Open(self.client_id, token=request.token) self.hosts.append( (self.client_id, client.Get(client.Schema.HOSTNAME))) try: # Also check for proper access. aff4.FACTORY.Open(client.urn.Add("acl_check"), token=request.token) except access_control.UnauthorizedAccess as e: self.unauthorized = True self.unauthorized_exception = e super(Navigator, self).Layout(request, response) if self.unauthorized: renderers.Renderer.GetPlugin("UnauthorizedRenderer")().Layout( request, response, exception=e) return self.CallJavascript(response, "Navigator.Layout", renderer=self.__class__.__name__, client_id=self.client_id, poll_time=self.poll_time)
def RenderMetadata(self, value_cls): fields = [] for field_desc in value_cls.type_infos: repeated = isinstance(field_desc, type_info.ProtoList) if hasattr(field_desc, "delegate"): field_desc = field_desc.delegate field = { "name": field_desc.name, "index": field_desc.field_number, "repeated": repeated, "dynamic": isinstance(field_desc, type_info.ProtoDynamicEmbedded) } field_type = field_desc.type if field_type is not None: field["type"] = field_type.__name__ if field_type == rdf_structs.EnumNamedValue: allowed_values = [] for enum_label in sorted(field_desc.enum, key=field_desc.enum.get): enum_value = field_desc.enum[enum_label] allowed_values.append( dict(name=enum_label, value=int(enum_value), doc=enum_value.description)) field["allowed_values"] = allowed_values field_default = None if (field_desc.default is not None and not aff4.issubclass(field_type, rdf_structs.RDFStruct) and hasattr(field_desc, "GetDefault")): field_default = field_desc.GetDefault() field["default"] = RenderValue(field_default) if field_desc.description: field["doc"] = field_desc.description if field_desc.friendly_name: field["friendly_name"] = field_desc.friendly_name if field_desc.labels: field["labels"] = [ rdf_structs.SemanticDescriptor.Labels.reverse_enum[x] for x in field_desc.labels ] fields.append(field) for processor in self.metadata_processors: fields = processor(self, fields) result = dict(name=value_cls.__name__, mro=[klass.__name__ for klass in value_cls.__mro__], doc=value_cls.__doc__ or "", fields=fields, kind="struct") if getattr(value_cls, "union_field", None): result["union_field"] = value_cls.union_field struct_default = None try: struct_default = value_cls() except Exception as e: # pylint: disable=broad-except # TODO(user): Some RDFStruct classes can't be constructed using # default constructor (without arguments). Fix the code so that # we can either construct all the RDFStruct classes with default # constructors or know exactly which classes can't be constructed # with default constructors. logging.debug("Can't create default for struct %s: %s", field_type.__name__, e) if struct_default is not None: result["default"] = RenderValue(struct_default) return result