def handle_POST(self): algorithm_name = self.get_param("algorithm") environment_name = self.get_param("environment") if not algorithm_name: raise Exception("missing algorithm") if not environment_name: raise Exception("missing environment") a = algorithm.get(self.splunk, algorithm_name) e = environment.get(self.splunk, environment_name) enable_schedule = self.get_param("enable_schedule") if enable_schedule: enable_schedule = is_truthy(enable_schedule) else: enable_schedule = None deployment_params = {} for name in a.runtime.deployment_param_names: value = self.get_param(name) if value is not None: deployment_params[name] = value create( self.splunk, a.name, e.name, enable_schedule=enable_schedule, params=deployment_params, )
def handle_GET(self): deployed_only = is_truthy(self.get_param("deployed_only", "0")) results = [] for algorithm in algorithm_api.get_all(self.splunk): info = { "name": algorithm.name, "description": algorithm.description, "runtime": algorithm.runtime_name, "category": algorithm.category, } status = [] status_message = [] editor = [] for d in deployment.get_all_for_algorithm(self.splunk, algorithm.name): status.append(d.status) status_message.append(d.status_message) editor.append(d.editor_url) info["status"] = status info["status_message"] = status_message info["editor"] = editor info["can_be_deleted"] = True if deployed_only and not len(status) > 0: continue results.append(info) self.send_entries(results)
def handle_GET(self): algorithm_name = self.get_param("algorithm") runtime_name = self.get_param("runtime") include_deployments = is_truthy(self.get_param("include_deployments")) if include_deployments and not algorithm_name: raise Exception( "include_deployments only works with algorithm_name") if algorithm_name: a = algorithm_api.get(self.splunk, algorithm_name) r = a.runtime get_default = r.get_param def get_value(name): return a.get_param(name, inherit=False) elif runtime_name: r = runtime.get(self.splunk, runtime_name) get_default = r.get_param def get_value(_): return None else: raise Exception("requires algorithm or runtime") params = [] params.extend([ { "name": name, "label": get_label_for_name(name), "default": get_default(name), "value": get_value(name), "type": "text", # "picker" "text", "mandatory": False, "important": True, } for name in r.algorithm_param_names ]) if include_deployments: for d in deployment.get_all_for_algorithm(self.splunk, algorithm_name): e = d.environment params.extend([ { "environment": e.name, "name": name, "label": get_label_for_name(name), "default": deployment.get_default_param(name, e, algorithm=a), "value": d.get_param(name, inherit=False), "type": "text", # "picker" "text", "mandatory": False, "important": True, } for name in r.deployment_param_names ]) self.send_entries(params)
def handle_PUT(self): enable_schedule = self.get_param("enable_schedule") if enable_schedule is not None: enable_schedule = is_truthy(enable_schedule) algorithm_name = self.get_param("algorithm") if not algorithm_name: raise Exception("algorithm missing") environment_name = self.get_param("environment") if not environment_name: raise Exception("environment missing") d = deployment.get(self.splunk, algorithm_name, environment_name) restart_required = self.get_param("restart_required") if restart_required is not None: d.restart_required = is_truthy(restart_required) editable = self.get_param("editable") if editable is not None: d.editable = is_truthy(editable) is_disabled = self.get_param("disabled") if is_disabled is not None: d.is_disabled = is_truthy(is_disabled) d.trigger_deploying(enable_schedule=enable_schedule, )
def handle_DELETE(self): query = self.request['query'] algorithm_name = query.get("algorithm", "") if not algorithm_name: raise Exception("missing algorithm") environment_name = query.get("environment", "") if not environment_name: raise Exception("missing environment") d = get(self.splunk, algorithm_name, environment_name) if not d: self.response.setStatus(404) return payload = self.request["query"] enable_schedule = query.get("enable_schedule", "") if enable_schedule: enable_schedule = is_truthy(enable_schedule) else: enable_schedule = None delete(self.splunk, d, enable_schedule=enable_schedule)
def store_models_in_volume(self): return is_truthy(self.get_param("store_models_in_volume"))
def has_sfx_smart_agent(self): if "sfx_smart_agent" in self._stanza: return is_truthy(self._stanza["sfx_smart_agent"]) else: return None
def is_disabled(self, value): value = is_truthy(value) if self.is_disabled != value: self._stanza.submit({"disabled": value}) self._stanza.refresh()
def setup(self): logging.info("setting up") self.command = self.getinfo['searchinfo']["command"] search_id = self.getinfo['searchinfo']["sid"] self.dispatch_dir = self.getinfo['searchinfo']["dispatch_dir"] logging.info("command name: %s" % self.command) is_searchpeer = "searchpeers" in __file__ command_options = options_parser.parse_options( self.getinfo['searchinfo']["args"], [ "model", "method", "algorithm", "environment", "type", "max_buffer_size", "is_preop", "prevent_preop", "fields", "opentracing_endpoint", "opentracing_user", "opentracing_password", "trace_context", "search_id", "trace_level", ], ignore_unknown=True, ) #raise Exception("command_options: %s" % command_options) if "search_id" in command_options: search_id = command_options["search_id"] if "trace_level" in command_options: trace_level = command_options["trace_level"] else: trace_level = "" if search_id.startswith("searchparsetmp") or search_id.endswith( "_tmp"): is_temp_search = True else: is_temp_search = False logging.info("is_temp_search: %s" % is_temp_search) if len(search_id) > 20: search_id = search_id[-20:] app_path = os.path.abspath( os.path.join(os.path.dirname(__file__), "..", "..", "..", "..")) if "fields" not in command_options: raise Exception("missing 'fields' parameter") self.fields = command_options["fields"].split(",") if "is_preop" in command_options: is_preop = is_truthy(command_options["is_preop"]) else: is_preop = False splunk = ConfBasedService(app_path) logging.info("is_preop: %s" % is_preop) if "prevent_preop" in command_options: prevent_preop = is_truthy(command_options["prevent_preop"]) else: prevent_preop = False logging.info("prevent_preop: %s" % prevent_preop) if "model" in command_options: self.model = model.get(splunk, command_options["model"]) self.deployment = self.model.deployment else: if not "algorithm" in command_options: raise Exception("missing 'algorithm' parameter") algorithm_name = command_options["algorithm"] if not "environment" in command_options: raise Exception("missing 'environment' parameter") environment_name = command_options["environment"] self.deployment = deployment.get(splunk, algorithm_name, environment_name) if not self.deployment: raise Exception( "Algorithm \"%s\" is not deployed to environment \"%s\"" % (algorithm_name, environment_name)) self.model = None if not "method" in command_options: default_method_name = self.deployment.algorithm.default_method if default_method_name: method_name = default_method_name else: raise Exception("missing 'method' parameter. E.g. method=fit") else: method_name = command_options["method"] method = self.deployment.algorithm.get_method(method_name) if not method: raise Exception("method '%s' does not exist" % method_name) if "max_buffer_size" in command_options: max_buffer_size = command_options["max_buffer_size"] else: max_buffer_size = method.max_buffer_size if not max_buffer_size or max_buffer_size == "auto": # keep initial class value pass elif self.max_buffer_size == "all": self.max_buffer_size = 0 else: self.max_buffer_size = int(self.max_buffer_size) self.reset_buffer() if not "type" in command_options: command_type = method.command_type else: command_type = command_options["type"] opentracing_endpoint = self.deployment.environment.opentracing_endpoint opentracing_user = self.deployment.environment.opentracing_user opentracing_password = self.deployment.environment.opentracing_password if "opentracing_endpoint" in command_options: opentracing_endpoint = command_options["opentracing_endpoint"] if "opentracing_user" in command_options: opentracing_user = command_options["opentracing_user"] if "opentracing_password" in command_options: opentracing_password = command_options["opentracing_password"] if "trace_context" in command_options: self.trace_context_string = command_options["trace_context"] if opentracing_endpoint and opentracing_user and opentracing_password: trace_config = jaeger_client.Config( config={ 'sampler': { 'type': 'const', 'param': 1, }, 'logging': False, 'jaeger_endpoint': opentracing_endpoint, 'jaeger_user': opentracing_user, 'jaeger_password': opentracing_password, "propagation": "b3", }, service_name="indexer" if is_searchpeer else "search_head", validate=True, ) self.tracer = trace_config.initialize_tracer() else: self.tracer = opentracing.tracer if self.trace_context_string: trace_context_bytes = self.trace_context_string.encode() trace_context_bytes = base64.b64decode(trace_context_bytes) trace_context_dict = json.loads(trace_context_bytes) trace_context = opentracing.tracer.extract( format=opentracing.propagation.Format.TEXT_MAP, carrier=trace_context_dict) else: trace_context = None self.trace_scope = self.tracer.start_span( "compute-command", child_of=trace_context, # ignore_active_span tags={ 'dltk-method': method_name, 'dltk-search_id': search_id, 'dltk-algorithm': self.deployment.algorithm.name, 'dltk-runtime': self.deployment.algorithm.runtime.name, 'dltk-environment': self.deployment.environment.name, 'dltk-preop': is_preop, 'dltk-command_type': command_type, }) if not self.trace_context_string: trace_context_dict = {} opentracing.tracer.inject( span_context=self.trace_scope, format=opentracing.propagation.Format.TEXT_MAP, carrier=trace_context_dict) trace_context_json = json.dumps(trace_context_dict) self.trace_context_string = base64.b64encode( trace_context_json.encode()).decode() if not trace_context: trace_context_dict = {} opentracing.tracer.inject( span_context=self.trace_scope, format=opentracing.propagation.Format.HTTP_HEADERS, carrier=trace_context_dict) if "X-B3-TraceId" in trace_context_dict: self.trace_id = trace_context_dict["X-B3-TraceId"] if self.deployment.is_disabled: raise Exception("Algorithm '%s' disabled for environment '%s'" % ( self.deployment.algorithm_name, self.deployment.environment_name, )) if not self.deployment.is_deployed: raise Exception( "Algorithm '%s' not yet successfully deployed to environment '%s'. (status: %s message: %s)" % ( self.deployment.algorithm_name, self.deployment.environment_name, self.deployment.status, self.deployment.status_message, )) self.context = ExecutionContext( is_preop=is_preop, is_searchpeer=is_searchpeer, search_id=search_id, model=self.model, root_trace_context_string=self.trace_context_string, method=method, message_logger=self.messages_logger, fields=self.fields, params=command_options) if not is_temp_search: with self.tracer.start_active_span( 'initialize', child_of=self.trace_scope, ): self.execution = self.deployment.create_execution(self.context) try: self.execution.logger.warning( "starting execution setup (search=\"%s\")" % self.getinfo['searchinfo']["search"]) self.execution.setup() self.execution.logger.warning("execution setup stopped") except Exception as e: self.execution.logger.warning(traceback.format_exc()) self.die( "Unexpected error starting deployment execution: %s" % (', '.join(traceback.format_exception_only(type(e), e)))) # https://community.splunk.com/t5/Splunk-Search/Metadata-fields-of-custom-search-command/m-p/300360 # mltk fit: EVENTS # mltk apply: streaming or stateful info = { "type": command_type, "required_fields": self.fields, # generating # maxinputs } # streaming_preop support_preop = method.support_preop and not prevent_preop if not is_preop and command_type == "reporting" and support_preop: info.update({ "streaming_preop": "%s is_preop=1 type=streaming %s %s %s %s %s %s %s %s" % ( self.command, "search_id=\"%s\"" % search_id, "method=\"%s\"" % method_name, ("model=\"%s\"" % command_options["model"]) if "model" in command_options else "", ("algorithm=\"%s\"" % command_options["algorithm"]) if "algorithm" in command_options else "", ("environment=\"%s\"" % command_options["environment"]) if "environment" in command_options else "", ("max_buffer_size=%s" % command_options["max_buffer_size"]) if "max_buffer_size" in command_options else "", ("fields=\"%s\"" % command_options["fields"]) if "fields" in command_options else "", "trace_context=\"%s\"" % self.trace_context_string, ), }) return info
def restart_required(self, value): value = is_truthy(value) if self.restart_required != value: self._stanza.submit({"restart_required": value}) self._stanza.refresh()
def is_disabled(self): value = self._stanza["disabled"] if value is None: value = False return is_truthy(value)
def restart_required(self): value = self._stanza["restart_required"] if value is None: return False return is_truthy(value)
def editable(self, value): value = is_truthy(value) if self.editable != value: self._stanza.submit({"editable": value}) self._stanza.refresh()
def editable(self): value = self._stanza["editable"] if value is None: return False return is_truthy(value)
def is_schedule_enabled(splunk): return is_truthy(splunk.confs[conf_name]["general"]["enableSched"])
def can_be_deleted(self): return is_truthy(self._stanza.access["removable"])