def post(self, request): # create homedirs create_user_list = user.objects.exclude( Q(export=None) ).filter( Q(home_dir_created=False) & Q(active=True) & Q(group__active=True) ).select_related("export__device") logger.info("user homes to create: {:d}".format(len(create_user_list))) for create_user in create_user_list: logger.info( "trying to create user_home for '{}' on server {}".format( str(create_user), create_user.export.device.full_name, ) ) srv_com = server_command.srv_command(command="create_user_home") srv_com["server_key:username"] = create_user.login _result = contact_server( request, icswServiceEnum.cluster_server, srv_com, timeout=30, target_server_id=create_user.export.device_id ) # force sync_users request.user.save() if config_tools.icswServerCheck(service_type_enum=icswServiceEnum.monitor_server).effective_device: srv_com = server_command.srv_command(command="sync_http_users") _result = contact_server(request, icswServiceEnum.monitor_server, srv_com)
def run(self, cur_bg): to_run = [] _src_com = server_command.srv_command(source=cur_bg.command_xml) devs = device.objects.filter( Q(pk__in=[int(_pk) for _pk in _src_com.xpath(".//ns:object/@pk")])) # split for bootservers _boot_dict = {} for _dev in devs: if _dev.bootserver_id: _boot_dict.setdefault(_dev.bootserver_id, []).append(_dev) for srv_id, dev_list in _boot_dict.iteritems(): # target command srv_com = server_command.srv_command(command="refresh") # only valid for one device srv_com["devices"] = srv_com.builder( "devices", *[ srv_com.builder("device", name=dev.name, pk="{:d}".format(dev.pk)) for dev in dev_list ]) to_run.append(( background_job_run( background_job=cur_bg, server=dev_list[0].bootserver, command_xml=unicode(srv_com), start=cluster_timezone.localize(datetime.datetime.now()), ), srv_com, icswServiceEnum.mother_server, )) return to_run
def main(): my_p = _get_parser() opts = my_p.parse_args() _context = zmq.Context() _sender = process_tools.get_socket(_context, "DEALER", identity="spcc_{:d}".format(os.getpid())) conn_str = "tcp://{}:{:d}".format(opts.host, opts.port) _com = server_command.srv_command( command="passive_check_result", device=opts.device, check=opts.check, state=opts.state, output=opts.output, ) _sender.connect(conn_str) _sender.send_unicode(str(_com)) if _sender.poll(opts.timeout * 1000): recv_str = server_command.srv_command(source=_sender.recv()) _str, _ret = recv_str.get_log_tuple() print(_str) else: print( "error timeout in receive() from {} after {}".format( conn_str, logging_tools.get_plural("second", opts.timeout) ) ) _ret = 1 _sender.close() _context.term() sys.exit(_ret)
def do_chpasswd(cur_opts, log_com): srv_com = server_command.srv_command(command="modify_password") srv_com["server_key:user_name"] = cur_opts.username print("changing password for user '{}'".format(cur_opts.username)) srv_com["server_key:old_password"] = base64.b64encode( bz2.compress( get_pass("please enter current password:"******"server_key:new_password_1"] = base64.b64encode( bz2.compress( get_pass("please enter the new password:"******"server_key:new_password_2"] = base64.b64encode( bz2.compress( get_pass("please reenter the new password:"******"pwd_change_request", timeout=cur_opts.timeout, ) _conn.add_connection("tcp://localhost:8004", srv_com, immediate=True) _result = _conn.loop()[0] _res_str, _res_state = _result.get_log_tuple() # _res_str, _res_state = ("ok", logging_tools.LOG_LEVEL_OK) print( "change gave [{}]: {}".format( logging_tools.get_log_level_str(_res_state) , _res_str ) ) if _res_state == logging_tools.LOG_LEVEL_OK: _conn = net_tools.ZMQConnection( "ldap_update_request", timeout=cur_opts.timeout, ) upd_com = server_command.srv_command(command="sync_ldap_config") _conn.add_connection("tcp://localhost:8004", upd_com, immediate=True) _res_str, _res_state = _conn.loop()[0].get_log_tuple() print( "syncing the LDAP tree returned ({}) {}".format( logging_tools.get_log_level_str(_res_state), _res_str, ) ) # print(_result.pretty_print()) return 0
def post(self, request): from initat.cluster.backbone.server_enums import icswServiceEnum _post = request.POST cd_con_pks = json.loads(_post["cd_pk_list"]) cur_cd_cons = cd_connection.objects.select_related( "child", "parent").filter(Q(pk__in=cd_con_pks)) command = _post["command"] logger.info("got hc command '{}' for {}:".format( command, logging_tools.get_plural("device", len(cd_con_pks)))) for cur_cd_con in cur_cd_cons: logger.info(" device {} (controlling device: {})".format( str(cur_cd_con.child), str(cur_cd_con.parent))) srv_com = server_command.srv_command(command="hard_control") srv_com["devices"] = srv_com.builder( "devices", *[ srv_com.builder( "device", command=command, pk="{:d}".format(cur_cd_con.parent_id), cd_con="{:d}".format(cur_cd_con.pk), bootserver_hint="{:d}".format( cur_cd_con.child.bootserver_id), ) for cur_cd_con in cur_cd_cons ]) contact_server(request, icswServiceEnum.mother_server, srv_com, timeout=10)
def post(self, request): import json from django.http import HttpResponse from initat.cluster.backbone.server_enums import icswServiceEnum from initat.cluster.frontend.helper_functions import contact_server from initat.tools import server_command idx_list = request.POST.getlist("idx_list[]", []) idx_list = [int(item) for item in idx_list] srv_com = server_command.srv_command( command="delete_report_history_objects") srv_com["idx_list"] = json.dumps(idx_list) (result, _) = contact_server( request, icswServiceEnum.report_server, srv_com, ) deleted = 0 if result: deleted = int(result["deleted"].text) return HttpResponse(json.dumps({'deleted': deleted}))
def post(self, request): import json from django.http import HttpResponse from initat.cluster.backbone.server_enums import icswServiceEnum from initat.cluster.frontend.helper_functions import contact_server from initat.tools import server_command pk_settings, _devices = _init_report_settings(request) srv_com = server_command.srv_command(command="generate_report") srv_com['format'] = 'xlsx' srv_com['pk_settings'] = str(pk_settings) srv_com['devices'] = str([d.idx for d in _devices]) (result, _) = contact_server( request, icswServiceEnum.report_server, srv_com, ) if result is not None: report_id = result.get("report_id") else: report_id = 0 return HttpResponse(json.dumps({'report_id': report_id}))
def _handle_return(self, recv_id, recv_str): try: srv_reply = server_command.srv_command(source=recv_str) except: print("cannot interpret reply: {}".format( process_tools.get_except_info())) print("reply was: {}".format(recv_str)) srv_reply = None self.ret_state = 1 else: self.verbose("\nXML response (id: '{}'):\n{}\n".format( recv_id, srv_reply.pretty_print())) if "result" in srv_reply: _result = srv_reply["result"] if not self.args.quiet: print(srv_reply["result"].attrib.get( "reply", "no reply attribute in result node")) self.ret_state = int(srv_reply["result"].attrib.get( "state", server_command.SRV_REPLY_STATE_UNSET)) elif len(srv_reply.xpath(".//nodestatus", smart_strings=False)): print( srv_reply.xpath(".//nodestatus", smart_strings=False)[0].text) self.ret_state = 0 else: print("no result node found in reply") self.ret_state = 2 return srv_reply
def post(self, request): _cmd = json.loads(request.POST["cmd"]) # import pprint # pprint.pprint(_cmd) logger.info( "got server_control '{0}' for instance {1} (server_id {2:d})".format( _cmd["type"], _cmd["instance"], int(_cmd["server_id"]), ) ) srv_com = server_command.srv_command( command="server_control", control=_cmd["type"], services=_cmd["instance"] ) # cur_routing = routing.SrvTypeRouting() request.xml_response["result"] = contact_server( request, icswServiceEnum.cluster_server, srv_com, timeout=10, connection_id="server_control", target_server_id=_cmd["server_id"] )
def post(self, request): cur_routing = routing.SrvTypeRouting(force=True) _server_list = [] for _server in cur_routing.resolv_dict.get(icswServiceEnum.cluster_server.name, []): srv_com = server_command.srv_command(command="server_status") _res = contact_server( request, icswServiceEnum.cluster_server, srv_com, timeout=10, connection_id="server_status", target_server_id=_server[2], ) if _res is not None and _res.tree is not None: # dirty stuff _res["command"].attrib["server_name"] = _server[0] _res["command"].attrib["server_id"] = "{:d}".format(_server[2]) _tree = _res.tree else: srv_com["command"].attrib["server_name"] = _server[0] srv_com["command"].attrib["server_id"] = "{:d}".format(_server[2]) _tree = srv_com.tree for _node in _tree.iter(): if str(_node.tag).startswith("{"): _node.tag = _node.tag.split("}", 1)[1] _server_list.append(_tree) request.xml_response["result"] = _server_list
def post(self, request): _post = request.POST if "pk_list" in _post: pk_list = json.loads(_post["pk_list"]) else: pk_list = request.POST.getlist("pks[]") srv_com = server_command.srv_command(command="get_host_config") srv_com["mode"] = _post["mode"] srv_com["device_list"] = E.device_list(*[ E.device( pk="{:d}".format(int(cur_pk)), only_build="1", ) for cur_pk in pk_list ]) result = contact_server(request, icswServiceEnum.monitor_server, srv_com, timeout=30) if result: if _post["mode"] != "fetch": node_results = result.xpath(".//result", smart_strings=False) if len(node_results): request.xml_response["result"] = node_results[0] else: request.xml_response.error("no config", logger=logger) else: request.xml_response.error("no result", logger=logger)
def send_passive_results_as_chunk_to_master(self, ascii_chunk): self.log("sending passive chunk (size {:d}) to master".format(len(ascii_chunk))) srv_com = server_command.srv_command( command="passive_check_results_as_chunk", ascii_chunk=ascii_chunk, ) self.send_to_syncer(srv_com)
def reload_searches(request): srv_com = server_command.srv_command(command="reload_searches") return contact_server(request, icswServiceEnum.package_server, srv_com, timeout=5, log_result=False)
def hostmonitor_full_update_handler_callback(self, callback_dict, result): callback_dict["command"] = "full_update_status" callback_dict["result"] = -1 callback_dict["error_string"] = None if result: if "update_status" in result: callback_dict["result"] = 1 progress = float(result["update_status"].text) if progress < 100.00: _device = callback_dict["device"] conn_str = "tcp://{}:{:d}".format(_device.target_ip, self.__hm_port) new_srv_com = server_command.srv_command( command="full_update") hm_command = HostMonitoringCommand( self.hostmonitor_full_update_handler_callback, callback_dict, timeout=5) self.discovery_process.send_pool_message( "send_host_monitor_command", hm_command.run_index, conn_str, str(new_srv_com)) callback_dict["progress"] = progress else: callback_dict["error_string"], _ = result.get_result() callback_dict_copy = {} for key in callback_dict: if key == "device": continue callback_dict_copy[key] = callback_dict[key] propagate_channel_object(WSStreamEnum.hm_status, callback_dict_copy)
def hostmonitor_full_update_handler(self, schedule_item): import pickle import binascii data = pickle.loads( binascii.a2b_base64(schedule_item.schedule_handler_data)) devices = device.objects.filter(idx__in=data["device_ids"]) update_file_data = binascii.b2a_base64( data["update_file_data"]).decode() self.discovery_process.get_route_to_devices(global_config, devices) for _device in devices: conn_str = "tcp://{}:{:d}".format(_device.target_ip, self.__hm_port) new_srv_com = server_command.srv_command(command="full_update") new_srv_com["update_file_data"] = update_file_data callback_dict = { "device": _device, "device_pk": _device.idx, "update_file_version": data["update_file_version"], "update_file_checksum": data["update_file_checksum"], "update_file_platform_bits": data["update_file_platform_bits"], "update_file_platform": PlatformSystemTypeEnum.WINDOWS.name } hm_command = HostMonitoringCommand( self.hostmonitor_full_update_handler_callback, callback_dict, timeout=30) self.discovery_process.send_pool_message( "send_host_monitor_command", hm_command.run_index, conn_str, str(new_srv_com))
def post(self, request): _post = request.POST img_name = _post["img_name"] logger.info("use_image called, image_name {}".format(img_name)) srv_com = server_command.srv_command(command="get_image_list") srv_result = contact_server(request, icswServiceEnum.cluster_server, srv_com, timeout=10, log_result=False) image.take_image(request.xml_response, srv_result, img_name, logger=logger)
def set_zmq_parameters(self, conn_str): self.zmq_conn_str = conn_str self.zmq_command = server_command.srv_command(command=self.ext_com) self.log(u"connection_str is {} ({})".format( self.zmq_conn_str, self.ext_com, ))
def _get_kpi_source_data(self, srv_com_src, **kwargs): srv_com = server_command.srv_command(source=srv_com_src) dev_mon_cat_tuples = json.loads(srv_com['dev_mon_cat_tuples'].text) KpiGlobals.set_context() start, end = Kpi.objects.parse_kpi_time_range( json.loads(srv_com['time_range'].text), json.loads(srv_com['time_range_parameter'].text), ) self.log( "Calculating KPI source data for: {}; start: {}; end: {}".format( dev_mon_cat_tuples, start, end ) ) kpi_set = KpiData( self.log, global_config, dev_mon_cat_tuples=dev_mon_cat_tuples ).get_kpi_set_for_dev_mon_cat_tuples( start, end, ) srv_com.set_result("ok") srv_com['kpi_set'] = json.dumps(kpi_set.serialize()) self.send_pool_message("remote_call_async_result", str(srv_com)) self.log("Finished KPI source data")
def post(self, request): from initat.cluster.backbone.server_enums import icswServiceEnum _post = request.POST if "keys" in _post: pk_list = json.loads(_post["keys"]) else: pk_list = [_post["key"]] srv_com = server_command.srv_command(command="get_config_vars") srv_com["devices"] = srv_com.builder( "devices", *[ srv_com.builder("device", pk="{:d}".format(int(cur_pk))) for cur_pk in pk_list ]) result = contact_server(request, icswServiceEnum.config_server, srv_com, timeout=30, log_result=False) if result: request.xml_response["result"] = E.devices() for dev_node in result.xpath(".//ns:device", smart_strings=False): res_node = E.device(dev_node.text, **dev_node.attrib) for sub_el in dev_node: res_node.append(sub_el) request.xml_response["result"].append(res_node) request.xml_response.log(int(dev_node.attrib["state_level"]), dev_node.attrib["info_str"], logger=logger)
def _reload_searches(self, *args, **kwargs): self.log("reloading searches") if len(args): srv_com = server_command.srv_command(source=args[0]) srv_com.set_result("ok reloading searches") self.send_pool_message("remote_call_async_result", str(srv_com)) self.check_for_searches()
def post(self, request): obj_pks = json.loads(request.POST.get("obj_pks")) model_name = request.POST.get("model") model = getattr(initat.cluster.backbone.models, model_name) for obj_pk in obj_pks: obj = model.objects.get(pk=obj_pk) if hasattr(obj, "enabled"): obj.enabled = False obj.save() if DeleteRequest.objects.filter(obj_pk=obj_pk, model=model_name).exists(): request.xml_response.error( "This object is already in the deletion queue.") else: del_req = DeleteRequest(obj_pk=obj_pk, model=model_name, delete_strategies=request.POST.get( "delete_strategies", None)) with transaction.atomic(): # save right away, not after request finishes, since cluster server is notified now del_req.save() srv_com = server_command.srv_command(command="handle_delete_requests") contact_server(request, icswServiceEnum.cluster_server, srv_com, log_result=False)
def handle_mon_command(self, srv_com): action = srv_com["*action"] t_type = srv_com["*type"] key_list = srv_com["*key_list"] # print key_list self.log("got mon_command action '{}' ({}, {})".format( action, t_type, logging_tools.get_plural("entry", len(key_list)), )) _ext_lines = [] if action == "ack": # acknowleded command if t_type == "hosts": _ext_lines = [ "[{:d}] ACKNOWLEDGE_HOST_PROBLEM;{};1;1;1;ich;done it". format(int(time.time()), _dev.full_name) for _dev in device.objects.filter(Q(pk__in=key_list)) ] else: self.log("") if _ext_lines: self.log("created {}".format( logging_tools.get_plural("line", len(_ext_lines)))) ext_com = server_command.srv_command( command="ext_command", lines=_ext_lines, ) self.send_to_process("syncer", "ext_command", unicode(ext_com))
def set_variable(opts): if not opts.name or not opts.value: print("Need variable name and value") sys.exit(1) _def_args = net_tools.SendCommandDefaults() _def_args.port = opts.server_port _def_args.host = opts.server_address my_com = net_tools.SendCommand(_def_args) my_com.init_connection() srv_com = server_command.srv_command( command="set_job_variable", jobid=opts.full_job_id, varname=opts.name, varvalue=opts.value, varunit=opts.unit, ) # print srv_com.pretty_print() if my_com.connect(): _reply = my_com.send_and_receive(srv_com) if _reply is None: print("Nothing returned from server") else: _ret_str, _ret_state = _reply.get_log_tuple() if _ret_state == logging_tools.LOG_LEVEL_OK: print(_ret_str) else: print("a problem occured: [{}]: {}".format( logging_tools.get_log_level_str(_ret_state), _ret_str, )) else: print("unable to connect") my_com.close()
def _process_partinfo(self, partinfo_tree): info_keys = ['dev_dict', 'partitions', 'lvm_dict', 'disk_usage'] # parse result XML result = srv_command(source=partinfo_tree) self._partinfo_tree = {} for info_key in info_keys: try: res_tree = result[info_key] except KeyError: pass else: if not isinstance(res_tree, dict): res_tree = result._interpret_el(res_tree) self._partinfo_tree[info_key] = res_tree # add disk usage information to logical disks disk_free = self._partinfo_tree.get('disk_usage') if disk_free: for usage in disk_free: mountpoint = usage['mountpoint'] try: logical = self._mount_point_logical_disks[mountpoint] except KeyError: pass else: logical.size = usage['total'] logical.free_space = usage['free'] self.logical_disks.append(logical)
def _get_node_rrd(request, dev_pks): srv_com = server_command.srv_command(command="get_node_rrd") srv_com["device_list"] = E.device_list( *[E.device(pk="{:d}".format(int(dev_pk))) for dev_pk in dev_pks], merge_results="1") result, _log_lines = contact_server(request, icswServiceEnum.grapher_server, srv_com, timeout=30) if result is not None: _log_str, _log_level = result.get_log_tuple() if _log_level <= logging_tools.LOG_LEVEL_WARN: node_results = result.xpath(".//ns:result", smart_strings=False) if len(node_results) and node_results[0].text: return HttpResponse(node_results[0].text, content_type="application/json") else: return HttpResponse(json.dumps({"error": "no node results"}), content_type="application/json") else: return HttpResponse(json.dumps({"error": _log_str}), content_type="application/json") else: return HttpResponse(json.dumps( {"error": ", ".join([_line for _level, _line in _log_lines])}), content_type="application/json")
def _queue_control(self, *args, **kwargs): srv_com = server_command.srv_command(source=args[0]) queue_action = srv_com["action"].text queue_spec = srv_com.xpath(".//ns:queue_list/ns:queue/@queue_spec", smart_strings=False)[0] self.log("queue action '{}' for job '{}'".format( queue_action, queue_spec)) if queue_action in ["enable", "disable", "clear_error"]: cur_stat, cur_out = call_command( "{} {} {}".format( self._get_sge_bin("qmod"), { "enable": "-e", "disable": "-d", "clear_error": "-c", }[queue_action], queue_spec, ), log_com=self.log, ) srv_com.set_result( "{} gave: {}".format(queue_action, cur_out), server_command.SRV_REPLY_STATE_ERROR if cur_stat else server_command.SRV_REPLY_STATE_OK) else: srv_com.set_result( "unknown job_action {}".format(queue_action), server_command.SRV_REPLY_STATE_ERROR, ) self.send_pool_message("remote_call_async_result", str(srv_com))
def distribution_info(self, dist_info): # dist gets called as soon as the syncer process is up and running for _entry in dist_info: # add device entries _entry["device"] = device.objects.get(Q(pk=_entry["pk"])) self.log( "got distribution info, found {}: {}".format( logging_tools.get_plural("slave server", len(dist_info) - 1), ", ".join( [ _entry["device"].full_name for _entry in dist_info if not _entry["master"] ] ) ) ) # create MonConfigContainer instances for entry in dist_info: cur_dev = entry["device"] if entry["master"]: self.__master_config = MainConfigContainer( self, cur_dev, ) self.__slave_configs = {} else: _slave_c = MainConfigContainer( self, cur_dev, slave_name=cur_dev.full_name, ) self.__slave_configs[cur_dev.pk] = _slave_c self.__ready = True if not self.__initial_reload_checked: self.__initial_reload_checked = True if global_config["BUILD_CONFIG_ON_STARTUP"] or global_config["INITIAL_CONFIG_RUN"]: srv_com = server_command.srv_command( command="build_host_config", ) self.handle_command(srv_com) if self.__pending_commands: self.log( "processing {}".format( logging_tools.get_plural("pending command", len(self.__pending_commands)) ) ) while self.__pending_commands: self.handle_command(server_command.srv_command(source=self.__pending_commands.pop(0)))
def post(self, request): srv_com = server_command.srv_command(command="build_host_config", ) result = contact_server(request, icswServiceEnum.monitor_server, srv_com, connection_id="wf_mdrc") if result: request.xml_response["result"] = E.devices()
def _get_config_srv_command(self, action, **kwargs): # server command to local md-config-server from distribution master # print "SI_COM", action return server_command.srv_command(command="slave_info", action=action, slave_uuid=self.slave_uuid, master="1" if self.master else "0", **kwargs)
def create_config(self, queue_id, s_req): # create a build_config request cur_com = server_command.srv_command(command="build_config") cur_com["devices"] = cur_com.builder( "devices", cur_com.builder("device", pk="{:d}".format(s_req.cc.device.pk))) cur_com["command"].attrib["source"] = "config_control" self._handle_wfe_command(None, str(queue_id), cur_com)