def _get_ip_to_host(self, to_dev): from_server_check = config_tools.icswServerCheck( device=SrvTypeRouting().local_device, config=None, ) to_server_check = config_tools.icswServerCheck( device=to_dev, config=None, ) route = from_server_check.get_route_to_other_device( self._get_router_obj(), to_server_check, allow_route_to_other_networks=True, prefer_production_net=True) if route: ip = route[-1][3][1][0] else: ip_db = net_ip.objects.filter(netdevice__device=to_dev).first() if ip_db: ip = ip_db.ip else: raise RuntimeError( "Failed to find IP address of {}".format(to_dev)) return ip
def post(self, request): # create homedirs create_user_list = user.objects.exclude( Q(export=None) ).filter( Q(home_dir_created=False) & Q(active=True) & Q(group__active=True) ).select_related("export__device") logger.info("user homes to create: {:d}".format(len(create_user_list))) for create_user in create_user_list: logger.info( "trying to create user_home for '{}' on server {}".format( str(create_user), create_user.export.device.full_name, ) ) srv_com = server_command.srv_command(command="create_user_home") srv_com["server_key:username"] = create_user.login _result = contact_server( request, icswServiceEnum.cluster_server, srv_com, timeout=30, target_server_id=create_user.export.device_id ) # force sync_users request.user.save() if config_tools.icswServerCheck(service_type_enum=icswServiceEnum.monitor_server).effective_device: srv_com = server_command.srv_command(command="sync_http_users") _result = contact_server(request, icswServiceEnum.monitor_server, srv_com)
def check_config(self, loc_config): self.server_idx, self.act_config_name = (0, "") doit, srv_origin, err_str = (False, "---", "OK") if self.Meta.needed_configs: for act_c in self.Meta.needed_configs: sc_result = config_tools.icswServerCheck( service_type_enum=act_c).get_result() if sc_result.effective_device: doit, srv_origin = (True, sc_result.server_origin) if not self.server_idx: self.server_device_name = sc_result.effective_device.name self.server_idx, self.act_config_name = ( sc_result.effective_device.pk, sc_result.effective_device.name) if doit: self.Meta.actual_configs = self.Meta.needed_configs else: err_str = "Server {} has no {} attribute".format( loc_config["SERVER_SHORT_NAME"], " or ".join( [_enum.name for _enum in self.Meta.needed_configs])) else: doit = True if doit and self.Meta.needed_config_keys: for key in self.Meta.needed_config_keys: if key not in loc_config: self.log("key '{}' not defined in config".format(key), logging_tools.LOG_LEVEL_ERROR) doit = False if doit and srv_origin == "---": srv_origin = "yes" return (doit, srv_origin, err_str)
def post(self, request): _post = request.POST to_dev_pk = int(_post["device"]) to_dev = device.objects.prefetch_related( "netdevice_set__net_ip_set__network__network_type" ).get( Q(pk=to_dev_pk) ) # from-device is where virtual desktop client config is set server_by_type = config_tools.icswServerCheck( service_type_enum=icswServiceEnum.virtual_desktop_client ) from_dev = server_by_type.effective_device if from_dev is None: # fall back to local device cur_routing = routing.SrvTypeRouting(force=True) from_dev = cur_routing.local_device from_server_check = config_tools.icswServerCheck( device=from_dev, config=None ) to_server_check = config_tools.icswServerCheck( device=to_dev, config=None, ) # calc route to it and use target ip _router = config_tools.RouterObject(logger) route = from_server_check.get_route_to_other_device( _router, to_server_check, allow_route_to_other_networks=True, prefer_production_net=True ) if route: ip = route[0][3][1][0] else: ip = "127.0.0.1" # try fallback (it might not work, but it will not make things more broken) return HttpResponse(json.dumps({"ip": ip}), content_type="application/json")
def process_init(self): global_config.enable_pm(self) self.__log_template = logging_tools.get_logger( global_config["LOG_NAME"], global_config["LOG_DESTINATION"], context=self.zmq_context, ) # close database connection db_tools.close_connection() self.register_func("rescan_kernels", self._rescan_kernels) self.kernel_dev = config_tools.icswServerCheck( service_type_enum=icswServiceEnum.kernel_server).get_result()
def __init__(self): self.__start_time = time.time() self.__verbose = global_config["VERBOSE"] self.__log_cache, self.__log_template = ([], None) threading_tools.icswProcessPool.__init__( self, "main", ) self.register_exception("int_error", self._int_error) self.register_exception("term_error", self._int_error) self.register_func("compress_done", self._compress_done) self.__log_template = logging_tools.get_logger( global_config["LOG_NAME"], global_config["LOG_DESTINATION"], context=self.zmq_context ) # log config self._log_config() self.device = config_tools.icswServerCheck( service_type_enum=icswServiceEnum.image_server ).get_result().effective_device if not self.device: self.log("not an image server", logging_tools.LOG_LEVEL_ERROR) self._int_error("not an image server") elif not process_tools.find_file("xmllint"): self.log("xmllint not found", logging_tools.LOG_LEVEL_ERROR) self._int_error("xmllint not found") elif global_config["CLEAR_LOCK"] or global_config["SET_LOCK"]: cur_img = self._get_image() if global_config["CLEAR_LOCK"]: _info_str = "lock cleared" cur_img.build_lock = False else: _info_str = "lock set" cur_img.build_lock = True cur_img.save() self._int_error("{} on image {}".format(_info_str, str(cur_img))) else: self.log("image server is '{}'".format(str(self.device) if self.device else "---")) self.__builder_names = [] for cur_num in range(global_config["BUILDERS"]): builder_name = "builder_{:d}".format(cur_num) self.__builder_names.append(builder_name) self.add_process(BuildProcess(builder_name), start=True) db_tools.close_connection() self.__build_lock = False if not self["exit_requested"]: self.init_build()
def run(self, cur_bg): _src_com = server_command.srv_command(source=cur_bg.command_xml) # target command srv_com = server_command.srv_command(command="sync_sensor_threshold") _sc = config_tools.icswServerCheck(service_type_enum=icswServiceEnum.collectd_server) to_run = [] if _sc.get_result().effective_device: to_run.append( ( background_job_run( background_job=cur_bg, server=_sc.effective_device, command_xml=str(srv_com), start=cluster_timezone.localize(datetime.datetime.now()), ), srv_com, icswServiceEnum.collectd_server, ) ) else: self.log("no valid rrd-collector found", logging_tools.LOG_LEVEL_ERROR) return to_run
def process_init(self): global_config.enable_pm(self) self.__log_template = logging_tools.get_logger( global_config["LOG_NAME"], global_config["LOG_DESTINATION"], context=self.zmq_context, ) # close database connection db_tools.close_connection() MotherSimpleCommand.setup(self) self.router_obj = config_tools.RouterObject(self.log) self.snmp_sink = SNMPSink(self.log) self.sc = config_tools.icswServerCheck( service_type_enum=icswServiceEnum.mother_server) self.register_func("delay_command", self._delay_command) self.register_func("hard_control", self._hard_control) self.register_func("snmp_finished", self._snmp_finished) self.register_timer(self._check_commands, 10) HardControlCommand.setup(self) self.send_pool_message("register_return", "command", target="snmp_process")
def _init_capabilities(self): self.__cap_list = [] if global_config["BACKUP_DATABASE"]: self.log("doing database backup, ignoring capabilities", logging_tools.LOG_LEVEL_WARN) else: # read caps _dir = os.path.dirname(__file__) self.log("init server capabilities from directory {}".format(_dir)) SRV_CAPS = [] for entry in os.listdir(_dir): if entry.endswith(".py") and entry not in ["__init__.py"]: _imp_name = "initat.cluster_server.capabilities.{}".format( entry.split(".")[0]) _mod = importlib.import_module(_imp_name) for _key in dir(_mod): _value = getattr(_mod, _key) if inspect.isclass(_value) and issubclass( _value, base.BackgroundBase ) and _value != base.BackgroundBase: SRV_CAPS.append(_value) self.log("checking {}".format( logging_tools.get_plural("capability", len(SRV_CAPS)))) self.__server_cap_dict = {} self.__cap_list = [] for _srv_cap in SRV_CAPS: srv_type = _srv_cap.Meta.service_enum sc_result = config_tools.icswServerCheck( service_type_enum=srv_type).get_result() if sc_result.effective_device: self.__cap_list.append(srv_type.name) self.__server_cap_dict[srv_type.name] = _srv_cap( self, sc_result) self.log("capability {} is enabled on {}".format( srv_type.name, str(sc_result.effective_device), )) else: self.log("capability {} is disabled".format(srv_type.name))
def reconnect_to_clients(self): router_obj = config_tools.RouterObject(self.log) self.log("reconnecting to {}".format(logging_tools.get_plural("client", len(Client.name_set)))) all_servers = config_tools.icswDeviceWithConfig(service_type_enum=icswServiceEnum.package_server) if icswServiceEnum.package_server not in all_servers: self.log("no package_server defined, strange...", logging_tools.LOG_LEVEL_ERROR) else: _pserver = all_servers[icswServiceEnum.package_server][0] if _pserver.effective_device.pk != global_config["SERVER_IDX"]: self.log( "effective_device pk differs from SERVER_IDX: {:d} != {:d}".format( _pserver.effective_device.pk, global_config["SERVER_IDX"] ), logging_tools.LOG_LEVEL_ERROR ) else: for target_name in Client.name_set: cur_c = Client.get(target_name) dev_sc = config_tools.icswServerCheck( device=cur_c.device, # config="", fetch_network_info=True ) act_routing_info = _pserver.get_route_to_other_device( router_obj, dev_sc, allow_route_to_other_networks=True, prefer_production_net=True, ) if act_routing_info: _ip = act_routing_info[0][3][1][0] self.log("found routing_info for {}, IP is {}".format(str(cur_c.device), _ip)) self.connect_client(cur_c.device, _ip) # self.send_reply(cur_c.uid, server_command.srv_command(command="hello")) else: self.log("no routing_info found for {}".format(str(cur_c.device)))
def _generate_config(self, attr_dict, **kwargs): if global_config["DEBUG"]: cur_query_count = len(connection.queries) # get client cur_c = build_client.get_client(**attr_dict) cur_c.log("starting config build") s_time = time.time() dev_sc = None # get device by name try: if cur_c.name.count("."): b_dev = device.objects.select_related( "device_group").prefetch_related( "netdevice_set", "netdevice_set__net_ip_set").get( Q(name=cur_c.name.split(".")[0]) & Q(domain_tree_node__full_name=cur_c.name.split( ".", 1)[1])) else: b_dev = device.objects.select_related( "device_group").prefetch_related( "netdevice_set", "netdevice_set__net_ip_set").get(Q(name=cur_c.name)) except device.DoesNotExist: cur_c.log("device not found by name", logging_tools.LOG_LEVEL_ERROR, state="done") except device.MultipleObjectsReturned: cur_c.log("more than one device with name '{}' found".format( cur_c.name), logging_tools.LOG_LEVEL_ERROR, state="done") else: dev_sc = config_tools.icswServerCheck(host_name=cur_c.name, fetch_network_info=True) # FIXME, ToDo # cur_c.log("icswServerCheck report(): {}".format(dev_sc.report())) cur_net_tree = NetworkTree() # sanity checks if not cur_c.create_config_dir(): cur_c.log("creating config_dir", logging_tools.LOG_LEVEL_ERROR, state="done") elif (b_dev.prod_link_id == 0 or not b_dev.prod_link): cur_c.log("no valid production_link set", logging_tools.LOG_LEVEL_ERROR, state="done") # elif len(cur_net_tree.get("b", {})) > 1: # cur_c.log("more than one boot network found", logging_tools.LOG_LEVEL_ERROR, state="done") elif not len(cur_net_tree.get("b", {})): cur_c.log("no boot network found", logging_tools.LOG_LEVEL_ERROR, state="done") elif not len(cur_net_tree.get("p", {})): cur_c.log("no production networks found", logging_tools.LOG_LEVEL_ERROR, state="done") else: cur_c.log("found {}: {}".format( logging_tools.get_plural("production network", len(cur_net_tree["p"])), ", ".join([ str(cur_net) for cur_net in cur_net_tree["p"].values() ]), )) act_prod_net = None for prod_net in cur_net_tree["p"].values(): cur_c.clean_directory(prod_net.identifier) cur_c.log("{} {}".format( "active" if prod_net.pk == b_dev.prod_link_id else "inactive", prod_net.get_info(), )) if prod_net.pk == b_dev.prod_link.pk: act_prod_net = prod_net if not act_prod_net: cur_c.log("invalid production link", logging_tools.LOG_LEVEL_ERROR, state="done") else: ips_in_prod = [ cur_ip.ip for cur_ip in dev_sc.identifier_ip_lut.get("p", []) ] if ips_in_prod: netdevices_in_net = [ dev_sc.ip_netdevice_lut[ip] for ip in ips_in_prod ] if b_dev.bootnetdevice_id and b_dev.bootnetdevice: net_devs_ok = [ net_dev for net_dev in netdevices_in_net if net_dev.pk == b_dev.bootnetdevice.pk ] net_devs_warn = [ net_dev for net_dev in netdevices_in_net if net_dev.pk != b_dev.bootnetdevice.pk ] else: net_devs_ok, net_devs_warn = ([], netdevices_in_net) if len(net_devs_ok) == 1: boot_netdev = net_devs_ok[0] # finaly, we have the device, the boot netdevice, actual production net self._generate_config_step2( cur_c, b_dev, act_prod_net, boot_netdev, dev_sc) elif len(net_devs_ok) > 1: cur_c.log( "too many netdevices ({:d}) with IP in production network found" .format(len(net_devs_ok)), logging_tools.LOG_LEVEL_ERROR, state="done") elif len(net_devs_warn) == 1: cur_c.log( " one netdevice with IP in production network found but not on bootnetdevice", logging_tools.LOG_LEVEL_ERROR, state="done") else: cur_c.log( "too many netdevices (%d) with IP in production network found (not on bootnetdevice!)" % (len(net_devs_warn)), logging_tools.LOG_LEVEL_ERROR, state="done") else: cur_c.log("no IP-address in production network", logging_tools.LOG_LEVEL_ERROR, state="done") cur_c.log_kwargs("after build", only_new=False) # done (yeah ?) # send result e_time = time.time() if dev_sc: dev_sc.device.add_log_entry( source=self.config_src, level=log_level_lookup(int(cur_c.state_level)), text="built config in {}".format( logging_tools.get_diff_time_str(e_time - s_time))) cur_c.log("built took {}".format( logging_tools.get_diff_time_str(e_time - s_time))) if global_config["DEBUG"]: tot_query_count = len(connection.queries) - cur_query_count cur_c.log("queries issued: {:d}".format(tot_query_count)) for q_idx, act_sql in enumerate( connection.queries[cur_query_count:], 1): cur_c.log(" {:4d} {}".format(q_idx, act_sql["sql"][:120])) # pprint.pprint(cur_c.get_send_dict()) self.send_pool_message("client_update", cur_c.get_send_dict())
def _build_resolv_dict(self): # local device _myself = icswServerCheck(fetch_network_info=True) _router = RouterObject(self.logger) enum_set = set() # build reverse lut _rv_lut = {} _INSTANCES_WITH_NAMES = set() # list of configs with node-splitting enabled node_split_list = [] for _inst in _INSTANCE.get_all_instances(): _inst_name = _inst.attrib["name"] if _INSTANCE.do_node_split(_inst): node_split_list.append(_inst_name) for _enum_name in _INSTANCE.get_config_enums(_inst): _srv_enum = getattr(icswServiceEnum, _enum_name) if _srv_enum.value.server_service: enum_set.add(_srv_enum) _INSTANCES_WITH_NAMES.add(_srv_enum.name) _rv_lut.setdefault(_srv_enum, []).append( _inst_name) # [_conf_name] = _inst_name # for key, value in _SRV_NAME_TYPE_MAPPING.iteritems(): # _rv_lut.update({_name: key for _name in value}) # resolve dict _resolv_dict = {} # list of all already used srv_type / config_name / device_idx tuples _used_tuples = set() # dict resolving all srv_type / device_idx to set configs _dev_srv_type_lut = {} # simple routing cache routing_cache = {} # unroutable configs _unroutable_configs = {} # device -> network cache dn_cache = {} # get all configs for _enum in enum_set: _srv_type_list = _rv_lut[_enum] _sc = icswDeviceWithConfig( service_type_enum=_enum, dn_cache=dn_cache, ) if _enum in _sc: for _dev in _sc[_enum]: _dev_scr = _dev.get_result() if _dev_scr.effective_device is None: # may be the case when the local system is too old (database-wise) continue # routing info if _dev_scr.effective_device.is_meta_device: # server-like config is set for an md-device, not good self.log( "device '{}' (srv_type_list {}) is a meta-device". format( _dev_scr.effective_device.full_name, self._srv_type_to_string(_srv_type_list), ), logging_tools.LOG_LEVEL_ERROR) else: if _myself.device and _dev_scr.effective_device.pk == _myself.device.pk: _first_ip = "127.0.0.1" _penalty = 1 else: # print _myself, dir(_myself) _ri = _dev.get_route_to_other_device( _router, _myself, allow_route_to_other_networks=True, prefer_production_net=True, cache=routing_cache, ) if _ri: _first_ri = _ri[0] _first_ip = _first_ri[2][1][0] _penalty = _first_ri[0] else: _first_ip = None if _first_ip: # print "*", _srv_type_list # for _srv_type in _srv_type_list: if True: _add_t = (_enum, _dev_scr.effective_device.pk) if _add_t not in _used_tuples: _used_tuples.add(_add_t) # lookup for simply adding new config names _dst_key = (_enum, _dev_scr.effective_device.pk) if _dst_key in _dev_srv_type_lut: _ce = [ _entry for _entry in _resolv_dict[_enum] if _entry[2] == _dev_scr.effective_device.pk ][0] _ce[4].append(_enum.name) else: _resolv_dict.setdefault( _enum.name, []).append(( _dev_scr.effective_device. full_name, _first_ip, _dev_scr.effective_device.pk, _penalty, [_enum.name], )) _dev_srv_type_lut.setdefault( _dst_key, []).append(_enum_name) self.log( "adding device '{}' (IP {}, EffPK={:d}) to srv_type {}" .format( _dev_scr.effective_device. full_name, _first_ip, _dev_scr.effective_device.pk, _enum.name, )) else: if not self.ignore_errors: self.log( "no route to device '{}' found (srv_type_list {})" .format( _dev_scr.effective_device.full_name, self._srv_type_to_string( _srv_type_list), ), logging_tools.LOG_LEVEL_ERROR, ) _unroutable_configs.setdefault( _enum_name, []).append(_dev_scr.effective_device.full_name) # missing routes _missing_srv = _INSTANCES_WITH_NAMES - set(_resolv_dict.keys()) if _missing_srv: for _srv_type in sorted(_missing_srv): self.log("no device for srv_type '{}' found".format(_srv_type), logging_tools.LOG_LEVEL_WARN) # sort entry for key, value in _resolv_dict.items(): # format: device name, device IP, device_pk, penalty _resolv_dict[key] = [ _v2[1] for _v2 in sorted([(_v[3], _v) for _v in value]) ] # set local device if _myself.device is not None: _resolv_dict["_local_device"] = (_myself.device.pk, ) _resolv_dict["_server_info_str"] = _myself.get_result().server_info_str _resolv_dict["_alias_dict"] = _INSTANCE.get_alias_dict() _resolv_dict["_node_split_list"] = node_split_list _resolv_dict["_unroutable_configs"] = _unroutable_configs # import pprint # pprint.pprint(_resolv_dict) # valid for 15 minutes cache.set(self.ROUTING_KEY, json.dumps(_resolv_dict), 60 * 15) return _resolv_dict
def write_dhcp_config(self): if not global_config["WRITE_DHCP_CONFIG"]: self.log("altering the DHCP-config disabled", logging_tools.LOG_LEVEL_WARN) return is_authoritative = global_config["DHCP_AUTHORITATIVE"] self.log("writing dhcp-config, {}".format("auth" if is_authoritative else "not auth")) my_c = config_tools.icswServerCheck(service_type_enum=icswServiceEnum.mother_server) boot_ips = my_c.identifier_ip_lut.get("b", []) if not boot_ips: self.log( "error no boot-net found", logging_tools.LOG_LEVEL_ERROR, ) else: add_nets = list( [ ( cur_net.network_type.identifier, cur_net ) for cur_net in network.objects.exclude( pk__in=[boot_ip.network.pk for boot_ip in boot_ips] ).filter( Q(net_ip__netdevice__device=my_c.get_result().effective_device) & Q(network_type__identifier__in=["s", "p", "o"]) ).distinct() ] ) add_nets = sum( [ [ _sub_net for _value, _sub_net in add_nets if _value == _t_val ] for _t_val in ["p", "s", "o"] ], [] ) dhcpd_c = [ "", "# created from mother on {}".format(time.ctime(time.time())), "", "ddns-update-style none;", "omapi-port 7911;", "ddns-domainname \"{}\";".format(global_config["SERVER_SHORT_NAME"]), "allow booting;\nallow bootp;", "", "option space PXE;", "option PXE.mtftp-ip code 1 = ip-address;", "option PXE.mtftp-cport code 2 = unsigned integer 16;", "option PXE.mtftp-tmout code 4 = unsigned integer 8;", "option PXE.mtftp-delay code 5 = unsigned integer 8;", "option arch code 93 = unsigned integer 16;", "", ] if is_authoritative: dhcpd_c.extend( [ "authoritative;", "", ] ) # get gateway and domain-servers for the various nets gw_pri, gateway = (-10000, "0.0.0.0") cur_dc = config_tools.icswDeviceWithConfig(service_type_enum=icswServiceEnum.mother_server) found_dict = {} for act_net in [boot_ip.network for boot_ip in boot_ips] + add_nets: if act_net.gw_pri > gw_pri: gw_pri, gateway = (act_net.gw_pri, act_net.gateway) for key, configs, _add_dict in [ ("domain-name-servers", ["name_server", "name_slave"], {}), ("ntp-servers", ["xntp_server"], {}), ("nis-servers", ["yp_server"], {"domainname": "nis-domain"}) ]: found_confs = set(cur_dc.keys()) & set(configs) if found_confs: # some configs found for found_conf in found_confs: for cur_srv in cur_dc[found_conf]: match_list = [cur_ip for cur_ip in cur_srv.ip_list if cur_ip.network.pk == act_net.pk] if match_list: found_dict.setdefault(act_net.pk, {}).setdefault(key, []).append((cur_srv.device, match_list)) dhcpd_c.extend( [ "shared-network {} {{".format(global_config["SERVER_SHORT_NAME"]), # do not write routers (gateway may be invalid) # " option routers {};".format(gateway) ] ) DHCPNetwork.setup(my_c, found_dict) for act_net in [boot_ip.network for boot_ip in boot_ips] + add_nets: if act_net.netmask == "0.0.0.0": self.log( "refuse network {} with netmask '{}'".format( str(act_net), act_net.netmask, ), logging_tools.LOG_LEVEL_ERROR ) else: _net = DHCPNetwork(act_net) if global_config["DHCP_ONLY_BOOT_NETWORKS"] and act_net.network_type.identifier != "b": _net.comment_content() dhcpd_c.extend(_net.content) dhcpd_c.extend( [ "}", "", ] ) _target_file = None for _tf in ["/etc/dhcp/dhcpd.conf", "/etc/dhcp3/dhcpd.conf", "/etc/dhcpd.conf"]: if os.path.isfile(_tf): self.log("found dhcp-config in {}".format(_tf)) _target_file = _tf break if not _target_file: self.log("no DHCP config file found", logging_tools.LOG_LEVEL_ERROR) else: open(_target_file, "w").write("\n".join(dhcpd_c)) self.log("wrote DHCP config to {}".format(_target_file)) for _srv_name in self.srv_helper.find_services(".*dhcpd.*"): self.srv_helper.service_command(_srv_name, "restart")
def _call(self, cur_inst): file_list = [] server_idxs = [self.server_idx] # get additional idx if host is virtual server sc_result = config_tools.icswServerCheck(service_type_enum=icswServiceEnum.cluster_server).get_result() if sc_result.effective_device is not None and sc_result.effective_device.idx != self.server_idx: server_idxs.append(sc_result.effective_device.idx) # recognize for which devices i am responsible dev_r = cluster_location.DeviceRecognition() server_idxs = list(set(server_idxs) | set(dev_r.device_dict.keys())) # get all peers to local machine and local netdevices my_idxs = netdevice.objects.exclude( Q(enabled=False) ).filter( Q(device__in=server_idxs) & Q(device__enabled=True) & Q(device__device_group__enabled=True) ).values_list("pk", flat=True) # ref_table route_obj = config_tools.RouterObject(cur_inst.log) all_paths = [] for s_ndev in my_idxs: all_paths.extend(list(networkx.shortest_path(route_obj.nx, s_ndev, weight="weight").values())) # pprint.pprint(all_paths) nd_lut = { cur_nd.pk: cur_nd for cur_nd in netdevice.objects.all().select_related( "device" ).prefetch_related( "net_ip_set", "net_ip_set__network", "net_ip_set__domain_tree_node" ) } # fetch key-information ssh_vars = device_variable.objects.filter(Q(name="ssh_host_rsa_key_pub")).select_related("device") rsa_key_dict = {} for _db_rec in ssh_vars: pass # read pre/post lines from /etc/hosts pre_host_lines, post_host_lines = ([], []) # parse pre/post host_lines try: host_lines = [line.strip() for line in codecs.open(ETC_HOSTS_FILENAME, "r", "utf-8").read().split("\n")] except: self.log( "error reading / parsing {}: {}".format( ETC_HOSTS_FILENAME, process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR) else: mode, any_modes_found = (0, False) for line in host_lines: if line.lower().startswith("### aeh-start-pre"): mode, any_modes_found = (1, True) elif line.lower().startswith("### aeh-start-post"): mode, any_modes_found = (2, True) elif line.lower().startswith("### aeh-end"): mode, any_modes_found = (0, True) else: if mode == 1: pre_host_lines.append(line) elif mode == 2: post_host_lines.append(line) if not any_modes_found: self.log( "no ### aeh-.* stuff found in {}, copying to {}.orig".format( ETC_HOSTS_FILENAME, ETC_HOSTS_FILENAME ) ) try: pass except: self.log( "error writing {}.orig: {}".format( ETC_HOSTS_FILENAME, process_tools.get_except_info() ) ) # mapping from device_name to all names for ssh_host_keys name_dict = {} # ip dictionary ip_dict = {} # min_target_dict min_target_dict = {} for cur_path in all_paths: min_value = route_obj.get_penalty(cur_path) target_nd = nd_lut[cur_path[-1]] min_target_dict[target_nd] = min(min_target_dict.get(target_nd, 999999999), min_value) tl_dtn = domain_tree_node.objects.get(Q(depth=0)) for cur_path in all_paths: target_nd = nd_lut[cur_path[-1]] min_value = min_target_dict[target_nd] for cur_ip in nd_lut[cur_path[-1]].net_ip_set.all(): # get names host_names = [] cur_dtn = cur_ip.domain_tree_node or tl_dtn if not (cur_ip.alias.strip() and cur_ip.alias_excl): host_names.append("{}{}".format(target_nd.device.name, cur_dtn.node_postfix)) host_names.extend(["{}".format(cur_entry) for cur_entry in cur_ip.alias.strip().split()]) if "localhost" in [x.split(".")[0] for x in host_names]: host_names = [host_name for host_name in host_names if host_name.split(".")[0] == "localhost"] if cur_dtn.full_name: if cur_dtn.create_short_names: # also create short_names out_names = ( " ".join( [ "{}.{} {}".format(host_name, cur_dtn.full_name, host_name) for host_name in host_names if not host_name.count(".") ] ) ).split() else: # only print the long names out_names = ["{}.{}".format(host_name, cur_dtn.full_name) for host_name in host_names if not host_name.count(".")] else: if cur_dtn.create_short_names: # also create short_names out_names = (" ".join(["{}".format(host_name) for host_name in host_names if not host_name.count(".")])).split() else: # only print the long names out_names = ["{}".format(host_name) for host_name in host_names if not host_name.count(".")] # add names with dot out_names.extend([host_name for host_name in host_names if host_name.count(".")]) # name_dict without localhost name_dict.setdefault( target_nd.device.name, [] ).extend( [ out_name for out_name in out_names if out_name not in name_dict[target_nd.device.name] and not out_name.startswith("localhost") ] ) ip_dict.setdefault(cur_ip.ip, []) if out_names not in [entry[1] for entry in ip_dict[cur_ip.ip]]: if cur_ip.ip != "0.0.0.0": ip_dict[cur_ip.ip].append((min_value, out_names)) # out_list loc_dict = {} for ip, h_list in ip_dict.items(): all_values = sorted([entry[0] for entry in h_list]) if all_values: min_value = all_values[0] out_names = [] for val in all_values: for _act_val, act_list in [(x_value, x_list) for x_value, x_list in h_list if x_value == val]: out_names.extend([value for value in act_list if value not in out_names]) # print min_value, ip, out_names loc_dict.setdefault(min_value, []).append([ipvx_tools.IPv4(ip)] + out_names) pen_list = sorted(loc_dict.keys()) out_file = [] for pen_value in pen_list: act_out_list = logging_tools.NewFormList() for entry in sorted(loc_dict[pen_value]): act_out_list.append( [ logging_tools.form_entry(entry[0]) ] + [ logging_tools.form_entry(cur_e) for cur_e in entry[1:] ] ) host_lines = str(act_out_list).split("\n") out_file.extend( [ "# penalty {:d}, {}".format( pen_value, logging_tools.get_plural("host entry", len(host_lines)) ), "" ] + host_lines + [""] ) if not os.path.isdir(GROUP_DIR): try: os.makedirs(GROUP_DIR) except: pass if os.path.isdir(GROUP_DIR): # remove old files for file_name in os.listdir(GROUP_DIR): try: os.unlink(os.path.join(GROUP_DIR, file_name)) except: pass # get all devices with netips all_devs = device.objects.filter( Q(enabled=True) & Q(device_group__enabled=True) & Q(netdevice__net_ip__ip__contains=".") ).values_list( "name", "device_group__name" ).order_by( "device_group__name", "name" ) dg_dict = {} for dev_name, dg_name in all_devs: dg_dict.setdefault(dg_name, []).append(dev_name) for file_name, content in dg_dict.items(): codecs.open( os.path.join(GROUP_DIR, file_name), "w", "utf-8" ).write("\n".join(sorted(set(content)) + [""])) file_list.append(ETC_HOSTS_FILENAME) codecs.open(ETC_HOSTS_FILENAME, "w+", "utf-8").write( "\n".join( [ "### AEH-START-PRE insert pre-host lines below" ] + pre_host_lines + [ "### AEH-END-PRE insert pre-host lines above", "" ] + out_file + [ "", "### AEH-START-POST insert post-host lines below" ] + post_host_lines + [ "### AEH-END-POST insert post-host lines above", "" ] ) ) # write known_hosts_file if os.path.isdir(os.path.dirname(SSH_KNOWN_HOSTS_FILENAME)): skh_f = open(SSH_KNOWN_HOSTS_FILENAME, "w") for ssh_key_node in sorted(rsa_key_dict.keys()): skh_f.write( "{} {}\n".format( ",".join(name_dict.get(ssh_key_node, [ssh_key_node])), rsa_key_dict[ssh_key_node] ) ) skh_f.close() file_list.append(SSH_KNOWN_HOSTS_FILENAME) cur_inst.srv_com.set_result( "wrote {}".format(", ".join(sorted(file_list))) )