def post(self, request): _post = request.POST to_dev_pk = int(_post["device"]) to_dev = device.objects.prefetch_related( "netdevice_set__net_ip_set__network__network_type" ).get( Q(pk=to_dev_pk) ) # from-device is where virtual desktop client config is set server_by_type = config_tools.server_check(server_type="virtual_desktop_client") from_dev = server_by_type.effective_device if from_dev is None: # fall back to local device cur_routing = routing.SrvTypeRouting(force=True) from_dev = cur_routing.local_device from_server_check = config_tools.server_check(device=from_dev, config=None, server_type="node") to_server_check = config_tools.server_check(device=to_dev, config=None, server_type="node") # calc route to it and use target ip _router = config_tools.RouterObject(logger) route = from_server_check.get_route_to_other_device(_router, to_server_check, allow_route_to_other_networks=True, prefer_production_net=True) if route: ip = route[0][3][1][0] else: ip = "127.0.0.1" # try fallback (it might not work, but it will not make things more broken) return HttpResponse(json.dumps({"ip": ip}), content_type="application/json")
def _get_ip_to_host(self, to_dev): from_server_check = config_tools.server_check( device=SrvTypeRouting().local_device, config=None, server_type="node") to_server_check = config_tools.server_check(device=to_dev, config=None, server_type="node") route = from_server_check.get_route_to_other_device( self._get_router_obj(), to_server_check, allow_route_to_other_networks=True, prefer_production_net=True) if route: ip = route[-1][3][1][0] else: ip_db = net_ip.objects.filter(netdevice__device=to_dev).first() if ip_db: ip = ip_db.ip else: raise RuntimeError( "Failed to find IP address of {}".format(to_dev)) return ip
def check_config(self, loc_config): self.server_idx, self.act_config_name = (0, "") doit, srv_origin, err_str = (False, "---", "OK") if self.Meta.needed_configs: for act_c in self.Meta.needed_configs: # todo, move to icswServiceEnum _a = icswServiceEnum sql_info = config_tools.server_check(service_type_enum=act_c) # server_type="{}".format(act_c)) if sql_info.effective_device: doit, srv_origin = (True, sql_info.server_origin) if not self.server_idx: self.server_device_name = sql_info.effective_device.name self.server_idx, self.act_config_name = (sql_info.effective_device.pk, sql_info.effective_device.name) if doit: self.Meta.actual_configs = self.Meta.needed_configs else: err_str = "Server {} has no {} attribute".format( loc_config["SERVER_SHORT_NAME"], " or ".join([_enum.name for _enum in self.Meta.needed_configs]) ) else: doit = True if doit and self.Meta.needed_config_keys: for key in self.Meta.needed_config_keys: if key not in loc_config: self.log("key '{}' not defined in config".format(key), logging_tools.LOG_LEVEL_ERROR) doit = False if doit and srv_origin == "---": srv_origin = "yes" return (doit, srv_origin, err_str)
def _init_capabilities(self): self.__cap_list = [] if global_config["BACKUP_DATABASE"]: self.log("doing database backup, ignoring capabilities", logging_tools.LOG_LEVEL_WARN) else: # read caps _dir = os.path.dirname(__file__) self.log("init server capabilities from directory {}".format(_dir)) SRV_CAPS = [] for entry in os.listdir(_dir): if entry.endswith(".py") and entry not in ["__init__.py"]: _imp_name = "initat.cluster_server.capabilities.{}".format( entry.split(".")[0]) _mod = importlib.import_module(_imp_name) for _key in dir(_mod): _value = getattr(_mod, _key) if inspect.isclass(_value) and issubclass( _value, base.BackgroundBase ) and _value != base.BackgroundBase: SRV_CAPS.append(_value) self.log("checking {}".format( logging_tools.get_plural("capability", len(SRV_CAPS)))) self.__server_cap_dict = {} self.__cap_list = [] try: sys_cc = config_catalog.objects.get(Q(system_catalog=True)) except config_catalog.DoesNotExist: sys_cc = factories.ConfigCatalog(name="local", system_catalog=True) for _srv_cap in SRV_CAPS: cap_name = _srv_cap.Meta.name try: cap_descr = _srv_cap.Meta.description except: self.log( "capability {} has no description set, ignoring...". format(cap_name), logging_tools.LOG_LEVEL_ERROR) else: _new_c = factories.Config( name=cap_name, description=cap_descr, config_catalog=sys_cc, server_config=True, # system_config=True, ) _sql_info = config_tools.server_check(server_type=cap_name) if _sql_info.effective_device: self.__cap_list.append(cap_name) self.__server_cap_dict[cap_name] = _srv_cap( self, _sql_info) self.log("capability {} is enabled on {}".format( cap_name, unicode(_sql_info.effective_device), )) else: self.log("capability {} is disabled".format(cap_name))
def process_init(self): global_config.close() self.__log_template = logging_tools.get_logger( global_config["LOG_NAME"], global_config["LOG_DESTINATION"], zmq=True, context=self.zmq_context, init_logger=True) # close database connection db_tools.close_connection() self.register_func("rescan_kernels", self._rescan_kernels) self.kernel_dev = config_tools.server_check( service_type_enum=icswServiceEnum.kernel_server)
def __init__(self): self.__start_time = time.time() self.__verbose = global_config["VERBOSE"] self.__log_cache, self.__log_template = ([], None) threading_tools.process_pool.__init__( self, "main", zmq=True, ) self.register_exception("int_error", self._int_error) self.register_exception("term_error", self._int_error) self.register_func("compress_done", self._compress_done) self.__log_template = logging_tools.get_logger( global_config["LOG_NAME"], global_config["LOG_DESTINATION"], zmq=True, context=self.zmq_context ) # log config self._log_config() self.device = config_tools.server_check(service_type_enum=icswServiceEnum.image_server).effective_device if not self.device: self.log("not an image server", logging_tools.LOG_LEVEL_ERROR) self._int_error("not an image server") elif not process_tools.find_file("xmllint"): self.log("xmllint not found", logging_tools.LOG_LEVEL_ERROR) self._int_error("xmllint not found") elif global_config["CLEAR_LOCK"] or global_config["SET_LOCK"]: cur_img = self._get_image() if global_config["CLEAR_LOCK"]: _info_str = "lock cleared" cur_img.build_lock = False else: _info_str = "lock set" cur_img.build_lock = True cur_img.save() self._int_error("{} on image {}".format(_info_str, unicode(cur_img))) else: self.log("image server is '{}'".format(unicode(self.device) if self.device else "---")) self.__builder_names = [] for cur_num in xrange(global_config["BUILDERS"]): builder_name = "builder_{:d}".format(cur_num) self.__builder_names.append(builder_name) self.add_process(BuildProcess(builder_name), start=True) db_tools.close_connection() self.__build_lock = False if not self["exit_requested"]: self.init_build()
def _find_best_server(self, conf_list): dev_sc = config_tools.server_check(short_host_name=self.cc.device.name, server_type="node", fetch_network_info=True) bs_list = [] for cur_conf in conf_list: srv_routing = cur_conf.get_route_to_other_device( self.cc.router_obj, dev_sc, filter_ip=self.src_ip, allow_route_to_other_networks=False) if srv_routing: bs_list.append((srv_routing[0][0], cur_conf)) if bs_list: return sorted(bs_list)[0][1] else: self.log("no result in find_best_server ({})".format( logging_tools.get_plural("entry", len(conf_list)))) return None
def run(self, cur_bg): _src_com = server_command.srv_command(source=cur_bg.command_xml) # target command srv_com = server_command.srv_command(command="sync_sensor_threshold") _sc = config_tools.server_check( service_type_enum=icswServiceEnum.collectd_server) to_run = [] if _sc.effective_device: to_run.append(( background_job_run( background_job=cur_bg, server=_sc.effective_device, command_xml=unicode(srv_com), start=cluster_timezone.localize(datetime.datetime.now()), ), srv_com, icswServiceEnum.collectd_server, )) else: self.log("no valid rrd-collector found", logging_tools.LOG_LEVEL_ERROR) return to_run
def reconnect_to_clients(self): router_obj = config_tools.RouterObject(self.log) self.log("reconnecting to {}".format( logging_tools.get_plural("client", len(Client.name_set)))) all_servers = config_tools.device_with_config("package_server") if "package_server" not in all_servers: self.log("no package_server defined, strange...", logging_tools.LOG_LEVEL_ERROR) else: _pserver = all_servers["package_server"][0] if _pserver.effective_device.pk != global_config["SERVER_IDX"]: self.log( "effective_device pk differs from SERVER_IDX: {:d} != {:d}" .format(_pserver.effective_device.pk, global_config["SERVER_IDX"]), logging_tools.LOG_LEVEL_ERROR) else: for target_name in Client.name_set: cur_c = Client.get(target_name) dev_sc = config_tools.server_check(device=cur_c.device, config="", server_type="node", fetch_network_info=True) act_routing_info = _pserver.get_route_to_other_device( router_obj, dev_sc, allow_route_to_other_networks=True, prefer_production_net=True, ) if act_routing_info: _ip = act_routing_info[0][3][1][0] self.log("found routing_info for {}, IP is {}".format( unicode(cur_c.device), _ip)) self.connect_client(cur_c.device, _ip) # self.send_reply(cur_c.uid, server_command.srv_command(command="hello")) else: self.log("no routing_info found for {}".format( unicode(cur_c.device)))
def process_init(self): global_config.close() self.__log_template = logging_tools.get_logger( global_config["LOG_NAME"], global_config["LOG_DESTINATION"], zmq=True, context=self.zmq_context, init_logger=True) # close database connection db_tools.close_connection() simple_command.setup(self) self.router_obj = config_tools.RouterObject(self.log) self.snmp_sink = SNMPSink(self.log) self.sc = config_tools.server_check( service_type_enum=icswServiceEnum.mother_server) self.register_func("delay_command", self._delay_command) self.register_func("hard_control", self._hard_control) self.register_func("snmp_finished", self._snmp_finished) self.register_timer(self._check_commands, 10) HardControlCommand.setup(self) self.send_pool_message("register_return", "command", target="snmp_process")
def post(self, request): # create homedirs create_user_list = user.objects.exclude( Q(export=None) ).filter( Q(home_dir_created=False) & Q(active=True) & Q(group__active=True) ).select_related("export__device") logger.info("user homes to create: {:d}".format(len(create_user_list))) for create_user in create_user_list: logger.info( "trying to create user_home for '{}' on server {}".format( unicode(create_user), create_user.export.device.full_name, ) ) srv_com = server_command.srv_command(command="create_user_home") srv_com["server_key:username"] = create_user.login _result = contact_server(request, icswServiceEnum.cluster_server, srv_com, timeout=30, target_server_id=create_user.export.device_id) # force sync_users request.user.save() if config_tools.server_check(service_type_enum=icswServiceEnum.monitor_server).effective_device: srv_com = server_command.srv_command(command="sync_http_users") _result = contact_server(request, icswServiceEnum.monitor_server, srv_com)
def _build_resolv_dict(self): # local device _myself = server_check(server_type="", fetch_network_info=True) _router = RouterObject(self.logger) enum_names = set() # build reverse lut _rv_lut = {} _INSTANCES_WITH_NAMES = set() # list of configs with node-splitting enabled node_split_list = [] for _inst in _INSTANCE.get_all_instances(): _inst_name = _inst.attrib["name"] if _INSTANCE.do_node_split(_inst): node_split_list.append(_inst_name) for _enum_name in _INSTANCE.get_config_enums(_inst): _srv_enum = getattr(icswServiceEnum, _enum_name) if _srv_enum.value.server_service: enum_names.add(_enum_name) _INSTANCES_WITH_NAMES.add(_enum_name) _rv_lut.setdefault(_enum_name, []).append( _inst_name) # [_conf_name] = _inst_name # for key, value in _SRV_NAME_TYPE_MAPPING.iteritems(): # _rv_lut.update({_name: key for _name in value}) # resolve dict _resolv_dict = {} # list of all already used srv_type / config_name / device_idx tuples _used_tuples = set() # dict resolving all srv_type / device_idx to set configs _dev_srv_type_lut = {} # simple routing cache routing_cache = {} # unroutable configs _unroutable_configs = {} # get all configs for _enum_name in enum_names: _srv_type_list = _rv_lut[_enum_name] _sc = device_with_config( service_type_enum=getattr(icswServiceEnum, _enum_name)) if _enum_name in _sc: for _dev in _sc[_enum_name]: if _dev.effective_device is None: # may be the case when the local system is too old (database-wise) continue # routing info if _dev.effective_device.is_meta_device: # server-like config is set for an md-device, not good self.log( "device '{}' (srv_type_list {}) is a meta-device". format( _dev.effective_device.full_name, self._srv_type_to_string(_srv_type_list), ), logging_tools.LOG_LEVEL_ERROR) else: if _myself.device and _dev.effective_device.pk == _myself.device.pk: _first_ip = "127.0.0.1" _penalty = 1 else: # print _myself, dir(_myself) _ri = _dev.get_route_to_other_device( _router, _myself, allow_route_to_other_networks=True, prefer_production_net=True, cache=routing_cache, ) if _ri: _first_ri = _ri[0] _first_ip = _first_ri[2][1][0] _penalty = _first_ri[0] else: _first_ip = None if _first_ip: _srv_type = _enum_name # print "*", _srv_type_list # for _srv_type in _srv_type_list: if True: _add_t = (_srv_type, _enum_name, _dev.effective_device.pk) if _add_t not in _used_tuples: _used_tuples.add(_add_t) # lookup for simply adding new config names _dst_key = (_srv_type, _dev.effective_device.pk) if _dst_key in _dev_srv_type_lut: _ce = [ _entry for _entry in _resolv_dict[_srv_type] if _entry[2] == _dev.effective_device.pk ][0] _ce[4].append(_enum_name) else: _resolv_dict.setdefault( _srv_type, []).append(( _dev.effective_device. full_name, _first_ip, _dev.effective_device.pk, _penalty, [_enum_name], )) _dev_srv_type_lut.setdefault( _dst_key, []).append(_enum_name) self.log( "adding device '{}' (IP {}, EffPK={:d}) to srv_type {} (config {})" .format( _dev.effective_device.full_name, _first_ip, _dev.effective_device.pk, _srv_type, _enum_name, )) else: if not self.ignore_errors: self.log( "no route to device '{}' found (srv_type_list {}, config {})" .format( _dev.effective_device.full_name, self._srv_type_to_string( _srv_type_list), _enum_name, ), logging_tools.LOG_LEVEL_ERROR, ) _unroutable_configs.setdefault( _enum_name, []).append(_dev.effective_device.full_name) # missing routes _missing_srv = _INSTANCES_WITH_NAMES - set(_resolv_dict.keys()) if _missing_srv: for _srv_type in sorted(_missing_srv): self.log("no device for srv_type '{}' found".format(_srv_type), logging_tools.LOG_LEVEL_WARN) # sort entry for key, value in _resolv_dict.iteritems(): # format: device name, device IP, device_pk, penalty _resolv_dict[key] = [ _v2[1] for _v2 in sorted([(_v[3], _v) for _v in value]) ] # set local device if _myself.device is not None: _resolv_dict["_local_device"] = (_myself.device.pk, ) _resolv_dict["_server_info_str"] = _myself.server_info_str _resolv_dict["_alias_dict"] = _INSTANCE.get_alias_dict() _resolv_dict["_node_split_list"] = node_split_list _resolv_dict["_unroutable_configs"] = _unroutable_configs # import pprint # pprint.pprint(_resolv_dict) # valid for 15 minutes cache.set(self.ROUTING_KEY, json.dumps(_resolv_dict), 60 * 15) return _resolv_dict
def __init__(self, proc, monitor_server, **kwargs): """ holds information about remote monitoring satellites """ self.__process = proc self.__slave_name = kwargs.get("slave_name", None) self.__main_dir = global_config["MD_BASEDIR"] self.distributed = kwargs.get("distributed", False) self.master = True if not self.__slave_name else False if self.__slave_name: self.__dir_offset = os.path.join("slaves", self.__slave_name) master_cfg = config_tools.device_with_config( service_type_enum=icswServiceEnum.monitor_server) self.master_uuid = routing.get_server_uuid( icswServiceEnum.monitor_slave, master_cfg[icswServiceEnum.monitor_server] [0].effective_device.uuid, ) slave_cfg = config_tools.server_check( host_name=monitor_server.full_name, service_type_enum=icswServiceEnum.monitor_slave, fetch_network_info=True) self.slave_uuid = routing.get_server_uuid( icswServiceEnum.monitor_slave, monitor_server.uuid, ) route = master_cfg[ icswServiceEnum.monitor_server][0].get_route_to_other_device( self.__process.router_obj, slave_cfg, allow_route_to_other_networks=True, global_sort_results=True, ) if not route: self.slave_ip = None self.master_ip = None self.log( "no route to slave {} found".format( unicode(monitor_server)), logging_tools.LOG_LEVEL_ERROR) else: self.slave_ip = route[0][3][1][0] self.master_ip = route[0][2][1][0] self.log("IP-address of slave {} is {} (master ip: {})".format( unicode(monitor_server), self.slave_ip, self.master_ip)) # target config version directory for distribute self.__tcv_dict = {} else: # hm, for send_* commands self.slave_uuid = "" self.__dir_offset = "master" self.monitor_server = monitor_server self.__dict = {} self._create_directories() # flags # config state, one of # u .... unknown # b .... building # d .... done self.config_state = "u" # version of config build self.config_version_build = 0 # version of config in send state self.config_version_send = 0 # version of config installed self.config_version_installed = 0 # start of send self.send_time = 0 # lut: send_time -> config_version_send self.send_time_lut = {} # lut: config_version_send -> number transmitted self.num_send = {} # distribution state self.dist_ok = True # flag for reload after sync self.reload_after_sync_flag = False # relayer info (== icsw software version) # clear md_struct self.__md_struct = None # raw info self.__raw_info = { "version": { "relayer_version": "?.?-0", "mon_version": "?.?-0", "livestatus_version": "?.?", }, # system falgs "sysinfo": {}, "name": self.__slave_name, "master": self.master or "", "latest_contact": 0, } # try to get relayer / mon_version from latest build if self.master: _latest_build = mon_dist_master.objects.filter( Q(device=self.monitor_server)).order_by("-pk") else: _latest_build = mon_dist_slave.objects.filter( Q(device=self.monitor_server)).order_by("-pk") if len(_latest_build): _latest_build = _latest_build[0] for _attr in [ "mon_version", "relayer_version", "livestatus_version" ]: self.__raw_info["version"][_attr] = getattr( _latest_build, _attr) self.log("recovered {} from DB".format(self.vers_info))
def _generate_config(self, attr_dict, **kwargs): if global_config["DEBUG"]: cur_query_count = len(connection.queries) # get client cur_c = build_client.get_client(**attr_dict) cur_c.log("starting config build") s_time = time.time() dev_sc = None # get device by name try: if cur_c.name.count("."): b_dev = device.objects.select_related( "device_group").prefetch_related( "netdevice_set", "netdevice_set__net_ip_set").get( Q(name=cur_c.name.split(".")[0]) & Q(domain_tree_node__full_name=cur_c.name.split( ".", 1)[1])) else: b_dev = device.objects.select_related( "device_group").prefetch_related( "netdevice_set", "netdevice_set__net_ip_set").get(Q(name=cur_c.name)) except device.DoesNotExist: cur_c.log("device not found by name", logging_tools.LOG_LEVEL_ERROR, state="done") except device.MultipleObjectsReturned: cur_c.log("more than one device with name '{}' found".format( cur_c.name), logging_tools.LOG_LEVEL_ERROR, state="done") else: dev_sc = config_tools.server_check(host_name=cur_c.name, server_type="node", fetch_network_info=True) cur_c.log("server_check report(): {}".format(dev_sc.report())) cur_net_tree = network_tree() # sanity checks if not cur_c.create_config_dir(): cur_c.log("creating config_dir", logging_tools.LOG_LEVEL_ERROR, state="done") elif (b_dev.prod_link_id == 0 or not b_dev.prod_link): cur_c.log("no valid production_link set", logging_tools.LOG_LEVEL_ERROR, state="done") # elif len(cur_net_tree.get("b", {})) > 1: # cur_c.log("more than one boot network found", logging_tools.LOG_LEVEL_ERROR, state="done") elif not len(cur_net_tree.get("b", {})): cur_c.log("no boot network found", logging_tools.LOG_LEVEL_ERROR, state="done") elif not len(cur_net_tree.get("p", {})): cur_c.log("no production networks found", logging_tools.LOG_LEVEL_ERROR, state="done") else: cur_c.log("found {}: {}".format( logging_tools.get_plural("production network", len(cur_net_tree["p"])), ", ".join([ unicode(cur_net) for cur_net in cur_net_tree["p"].itervalues() ]), )) act_prod_net = None for prod_net in cur_net_tree["p"].itervalues(): cur_c.clean_directory(prod_net.identifier) cur_c.log("{} {}".format( "active" if prod_net.pk == b_dev.prod_link_id else "inactive", prod_net.get_info(), )) if prod_net.pk == b_dev.prod_link.pk: act_prod_net = prod_net if not act_prod_net: cur_c.log("invalid production link", logging_tools.LOG_LEVEL_ERROR, state="done") else: ips_in_prod = [ cur_ip.ip for cur_ip in dev_sc.identifier_ip_lut.get("p", []) ] if ips_in_prod: netdevices_in_net = [ dev_sc.ip_netdevice_lut[ip] for ip in ips_in_prod ] if b_dev.bootnetdevice_id and b_dev.bootnetdevice: net_devs_ok = [ net_dev for net_dev in netdevices_in_net if net_dev.pk == b_dev.bootnetdevice.pk ] net_devs_warn = [ net_dev for net_dev in netdevices_in_net if net_dev.pk != b_dev.bootnetdevice.pk ] else: net_devs_ok, net_devs_warn = ([], netdevices_in_net) if len(net_devs_ok) == 1: boot_netdev = net_devs_ok[0] # finaly, we have the device, the boot netdevice, actual production net self._generate_config_step2( cur_c, b_dev, act_prod_net, boot_netdev, dev_sc) elif len(net_devs_ok) > 1: cur_c.log( "too many netdevices ({:d}) with IP in production network found" .format(len(net_devs_ok)), logging_tools.LOG_LEVEL_ERROR, state="done") elif len(net_devs_warn) == 1: cur_c.log( " one netdevice with IP in production network found but not on bootnetdevice", logging_tools.LOG_LEVEL_ERROR, state="done") else: cur_c.log( "too many netdevices (%d) with IP in production network found (not on bootnetdevice!)" % (len(net_devs_warn)), logging_tools.LOG_LEVEL_ERROR, state="done") else: cur_c.log("no IP-address in production network", logging_tools.LOG_LEVEL_ERROR, state="done") cur_c.log_kwargs("after build", only_new=False) # done (yeah ?) # send result e_time = time.time() if dev_sc: dev_sc.device.add_log_entry( source=self.config_src, level=log_level_lookup(int(cur_c.state_level)), text="built config in {}".format( logging_tools.get_diff_time_str(e_time - s_time))) cur_c.log("built took {}".format( logging_tools.get_diff_time_str(e_time - s_time))) if global_config["DEBUG"]: tot_query_count = len(connection.queries) - cur_query_count cur_c.log("queries issued: {:d}".format(tot_query_count)) for q_idx, act_sql in enumerate( connection.queries[cur_query_count:], 1): cur_c.log(" {:4d} {}".format(q_idx, act_sql["sql"][:120])) # pprint.pprint(cur_c.get_send_dict()) self.send_pool_message("client_update", cur_c.get_send_dict())
def _get_valid_server_struct(self, s_list): # list of boot-related config names bsl_servers = set(["kernel_server", "image_server", "mother_server"]) # list of server_types which has to be mapped to the mother-server map_to_mother = set(["kernel_server", "image_server"]) for type_name in s_list: conf_list = config_tools.device_with_config(type_name).get( type_name, []) if conf_list: if type_name in bsl_servers: # config name (from s_list) is in bsl_servers valid_server_struct = None for srv_found in conf_list: # iterate over servers if srv_found.device and srv_found.device.pk == self.cc.device.bootserver_id: # found bootserver, match valid_server_struct = srv_found break else: valid_server_struct = self._find_best_server(conf_list) else: # no config found valid_server_struct = None if valid_server_struct: # exit if srv_struct found break if valid_server_struct and type_name in map_to_mother: # remap to mother_server valid_server_struct = config_tools.server_check( server_type="mother_server", short_host_name=valid_server_struct.short_host_name, fetch_network_info=True) if valid_server_struct: dev_sc = config_tools.server_check( short_host_name=self.cc.device.name, server_type="node", fetch_network_info=True) # check if there is a route between us and server srv_routing = valid_server_struct.get_route_to_other_device( self.cc.router_obj, dev_sc, filter_ip=self.src_ip, allow_route_to_other_networks=False, # prefer production routes prefer_production_net=True, ) # srv_routing = valid_server_struct.prefer_production_net(srv_routing) if not srv_routing: # check for updated network ? self.log( "found valid_server_struct {} but no route".format( valid_server_struct.server_info_str), logging_tools.LOG_LEVEL_ERROR) valid_server_struct = None else: # print "r", srv_routing self.server_ip = srv_routing[0][2][1][0] self.log("found valid_server_struct {} (device {}) with ip {}". format(valid_server_struct.server_info_str, unicode(valid_server_struct.device), self.server_ip)) else: self.log( "no valid server_struct found (search list: {})".format( ", ".join(s_list)), logging_tools.LOG_LEVEL_ERROR) return valid_server_struct
def check_config(self): # late import (for clients without django) if self.srv_type_enum.value.server_service: from initat.tools import config_tools from django.db.models import Q from initat.cluster.backbone.models import LogSource if self.srv_type_enum.value.instance_name is None: raise KeyError("No instance_name set for srv_type_enum '{}'".format(self.srv_type_enum.name)) self._instance = self._inst_xml[self.srv_type_enum.value.instance_name] # conf_names = self._inst_xml.get_config_names(self._instance) self.log( "check for service_type {} (==enum {})".format( self.srv_type_enum.value.name, self.srv_type_enum.name, ) ) _opts = [ ( "PID_NAME", configfile.str_c_var(self._inst_xml.get_pid_file_name(self._instance), source="instance", database=False) ), ] for _name, _value in self._inst_xml.get_port_dict(self._instance).iteritems(): _opts.append( ( "{}_PORT".format(_name.upper()), configfile.int_c_var(_value, source="instance", database=False) ), ) if self.srv_type_enum.value.server_service: self.__sql_info = config_tools.server_check(service_type_enum=self.srv_type_enum) if self.__sql_info is None or not self.__sql_info.effective_device: # this can normally not happen due to start / stop via meta-server self.log("Not a valid {}".format(self.srv_type_enum.name), logging_tools.LOG_LEVEL_ERROR) sys.exit(5) else: # check eggConsumers # set values _opts.extend( [ ( "SERVICE_ENUM_NAME", configfile.str_c_var(self.srv_type_enum.name), ), ( "SERVER_SHORT_NAME", configfile.str_c_var(process_tools.get_machine_name(True)), ), ( "SERVER_IDX", configfile.int_c_var(self.__sql_info.device.pk, database=False, source="instance") ), ( "CONFIG_IDX", configfile.int_c_var(self.__sql_info.config.pk, database=False, source="instance") ), ( "EFFECTIVE_DEVICE_IDX", configfile.int_c_var(self.__sql_info.effective_device.pk, database=False, source="instance") ), ( "LOG_SOURCE_IDX", configfile.int_c_var( LogSource.new(self.srv_type_enum.name, device=self.__sql_info.effective_device).pk, source="instance", ) ), ( "MEMCACHE_PORT", configfile.int_c_var(self._inst_xml.get_port_dict("memcached", command=True), source="instance") ), ] ) self.global_config.add_config_entries(_opts) if self.__init_msi_block: self.__pid_name = self.global_config["PID_NAME"] process_tools.save_pid(self.__pid_name) self.log("init MSI Block") self.__msi_block = process_tools.MSIBlock(self.srv_type_enum.value.msi_block_name) self.__msi_block.add_actual_pid(process_name="main") self.__msi_block.save()