def __init__(self, name, ip=None, **kwargs): Host.init += 1 register = kwargs.get("register", True) self.monitor = kwargs.get("monitor", False) if self.monitor: Host.monitor += 1 Host.monitor_records.append(self) self.force_create_private = kwargs.get("force_create_private", False) self.forward_domain = kwargs.get("forward_domain", None) if ip is None: if name.count(":"): self.name = name.strip().split(":")[0] self.ip = ipvx_tools.IPv4(name.strip().split(":")[1]) if register: Host.feed(self.ip, self.name) self.public_ip, self.private_ip = (None, None) else: self.name = name self.ip = None self.public_ip = kwargs.get("public_ip") self.private_ip = kwargs.get("private_ip") if not isinstance(self.public_ip, ipvx_tools.IPv4): self.public_ip = ipvx_tools.IPv4(self.public_ip) if not isinstance(self.private_ip, ipvx_tools.IPv4): self.private_ip = ipvx_tools.IPv4(self.private_ip) if register: Host.feed(self.public_ip, self.name) Host.feed(self.private_ip, self.name) else: self.name = name if isinstance(ip, ipvx_tools.IPv4): self.ip = ip else: self.ip = ipvx_tools.IPv4(ip) if register: Host.feed(self.ip, self.name) self.public_ip, self.private_ip = (None, None) # set networks if self.ip: _ips = [self.ip] self.network = Network.get_network(self.ip) _cnws = [self.network] self.private = self.network.private if self.network else False else: _ips = [self.public_ip, self.private_ip] self.public_network = Network.get_network(self.public_ip) self.private_network = Network.get_network(self.private_ip) _cnws = [self.public_network, self.private_network] if kwargs.get("multiple", False): Host.mult_ips.extend(_ips) self.private = False for _cnw in _cnws: if _cnw: if _cnw.private: self.private = True
def get_free_ip(self): # see link() in backend/network.coffee _ignore_range = {None, "", "0.0.0.0"} free_ip = None if self.start_range not in _ignore_range and self.end_range not in _ignore_range: used_ips = {ipvx_tools.IPv4(_ip.ip) for _ip in self.net_ip_set.all()} offset = ipvx_tools.IPv4("0.0.0.1") free_ip = ipvx_tools.IPv4(self.start_range) while free_ip in used_ips: free_ip += offset if free_ip > ipvx_tools.IPv4(self.end_range): free_ip = None return free_ip
def get_or_create_network(self, network_addr, netmask, gateway=None, context=None): """ :type network_addr: ipvx_tools.icswIPv4 :type netmask: ipvx_tools.icswIPv4 :type gateway: ipvx_tools.IPv4 | None :param str context: string added to network name and info :rtype: network """ try: cur_nw = self.get(Q(network=str(network_addr)) & Q(netmask=str(netmask))) except self.model.DoesNotExist: if str(network_addr).startswith("127."): _identifier = "l" else: _identifier = "o" info_str = " from {}".format(context) if context else "" gateway = str(gateway) if gateway else str(network_addr + ipvx_tools.IPv4("0.0.0.1")) cur_nw = self.model( network_type=network_type.objects.get(Q(identifier=_identifier)), short_names=False, identifier=network.get_unique_identifier(), name="autogenerated{}".format(info_str), info="autogenerated{}".format(info_str), network=str(network_addr), netmask=str(netmask), gateway=gateway, broadcast=str(~netmask | (network_addr & netmask)), ) cur_nw.save() return cur_nw
def post(self, request): from initat.cluster.backbone.models import network if network.objects.all().count() and False: request.xml_response.warn("Networks already defined") else: _post = request.POST _ifs = netifaces.interfaces() # todo: add gateways _ = netifaces.gateways().get(netifaces.AF_INET, []) # dict: network -> network objects new_nets = {} for _if in _ifs: _addr = netifaces.ifaddresses(_if) if netifaces.AF_INET in _addr: for _net in _addr[netifaces.AF_INET]: _required_keys = {"addr", "netmask"} if _net["netmask"] != "255.255.255.255" and _required_keys == _required_keys & set( _net.keys()): from initat.tools import ipvx_tools netmask = ipvx_tools.IPv4(_net["netmask"]) address = ipvx_tools.IPv4(_net["addr"]) networkaddr = netmask & address if str(networkaddr) not in new_nets: from initat.cluster.backbone.models import network_type # get type if ipvx_tools.is_loopback_network(networkaddr): _type = network_type.objects.get( identifier="l") else: _type = network_type.objects.get( identifier="o") net_id = "scanned_{:d}".format( len(list(new_nets.keys())) + 1) new_nw = network( identifier=net_id, info="autogenerated", network=str(networkaddr), netmask=str(netmask), gateway="0.0.0.0", broadcast=str(~netmask | (networkaddr & netmask)), network_type=_type, ) new_nw.save() new_nets[str(networkaddr)] = new_nw request.xml_response.info("added {}".format( logging_tools.get_plural("network", len(new_nets))))
def __init__(self, srv_com): self.srv_com = srv_com self.id = SNMPBatch.next_snmp_batch_id() SNMPBatch.add_batch(self) # lock self.__dev_lock = None self.init_run(self.srv_com["*command"]) self.batch_valid = True def handle_error(msg): self.log(msg, logging_tools.LOG_LEVEL_ERROR, result=True) self.batch_valid = False self.finish() try: _dev = self.srv_com.xpath(".//ns:devices/ns:device")[0] self.device = device.objects.get(Q(pk=_dev.attrib["pk"])) self.log("device is {}".format(str(self.device))) self.set_snmp_props( int({"2c": "2"}.get(_dev.attrib["snmp_version"], _dev.attrib["snmp_version"])), _dev.attrib["scan_address"], _dev.attrib["snmp_community"], ) self.flags = { "strict": True if int(_dev.attrib.get("strict", "0")) else False, "modify_peering": True if int(_dev.attrib.get("modify_peering", "0")) else False, } except: handle_error("error setting device node: {}".format( process_tools.get_except_info())) else: try: ipvx_tools.IPv4(self.snmp_address) except ValueError: handle_error("Invalid IP: '{}'".format(self.snmp_address)) else: if not SNMPBatch.process.device_is_capable( self.device, ActiveDeviceScanEnum.SNMP): handle_error( "device is missing the required ComCapability 'snmp'") else: _new_lock = SNMPBatch.process.device_is_idle( self.device, ActiveDeviceScanEnum.SNMP) if _new_lock: self.__dev_lock = _new_lock self.log("SNMP scan started", result=True) self.start_run() self.send_return()
def update(self, dev, scheme, result_dict, oid_list, flags): # ip dict _ip_dict = {} # import pprint # pprint.pprint(result_dict) # pprint.pprint(simplify_dict(result_dict["1.3.6.1.2.1.4.22"], (1,))) if IP_NET_TO_MEDIA_TABLE in result_dict: for key, struct in simplify_dict(result_dict[IP_NET_TO_MEDIA_TABLE], (1,)).items(): # check for static entries if 4 in struct and struct[4] == 4: # build snmp_ip struct _ip = ipvx_tools.IPv4(".".join(["{:d}".format(_entry) for _entry in key[1:]])) _networks = _ip.find_matching_network(network.objects.all()) if _networks: self.log( "found {} for {}: {}".format( logging_tools.get_plural("matching network", len(_networks)), str(_ip), ", ".join([str(_net) for _net in _networks]), ) ) _nw = _networks[0] _dict = { 2: key[0], 1: struct[3], 3: "".join([chr(int(_value)) for _value in _nw[1].netmask.split(".")]), } try: _ip = ifSNMPIP(_dict) except: self.log( "error interpreting {} as IP: {}".format( str(struct), process_tools.get_except_info() ), logging_tools.LOG_LEVEL_ERROR, ) else: _ip_dict[key[0]] = _ip else: self.log("found no matching network for IP {}".format(str(_ip)), logging_tools.LOG_LEVEL_ERROR) else: self.log( "table {} not found in result".format( IP_NET_TO_MEDIA_TABLE ), logging_tools.LOG_LEVEL_ERROR ) if IP_ADDR_TABLE in result_dict: for key, value in simplify_dict(result_dict[IP_ADDR_TABLE], (1,)).items(): try: _ip = ifSNMPIP(value) except: self.log( "error interpreting {} as IP: {}".format( str(value), process_tools.get_except_info() ), logging_tools.LOG_LEVEL_ERROR, ) else: _ip_dict[key] = _ip else: self.log( "table {} not found in result".format( IP_ADDR_TABLE, ), logging_tools.LOG_LEVEL_ERROR ) if any([str(_value.address_ipv4) == "0.0.0.0" for _value in _ip_dict.values()]): self.log("ignoring zero IP address", logging_tools.LOG_LEVEL_WARN) _ip_dict = {key: value for key, value in _ip_dict.items() if str(value.address_ipv4) != "0.0.0.0"} if dev.domain_tree_node_id: _tln = dev.domain_tree_node else: _tln = domain_tree_node.objects.get(Q(depth=0)) if_lut = {_dev_nd.snmp_idx: _dev_nd for _dev_nd in netdevice.objects.filter(Q(snmp_idx__gt=0) & Q(device=dev))} # handle IPs _found_ip_ids = set() _added = 0 for ip_struct in _ip_dict.values(): if ip_struct.if_idx in if_lut: _dev_nd = if_lut[ip_struct.if_idx] # check for network _network_addr = ip_struct.address_ipv4 & ip_struct.netmask_ipv4 cur_nw = network.objects.get_or_create_network( network_addr=_network_addr, netmask=ip_struct.netmask_ipv4, context="SNMP", ) # check for existing IP try: _ip = net_ip.objects.get(Q(netdevice__device=dev) & Q(ip=ip_struct.address)) except net_ip.DoesNotExist: _added += 1 _ip = net_ip( ip=ip_struct.address, ) _ip.domain_tree_node = _tln _ip.network = cur_nw _ip.netdevice = _dev_nd _ip.save() _found_ip_ids.add(_ip.idx) if flags["strict"]: stale_ips = net_ip.objects.exclude(Q(pk__in=_found_ip_ids)).filter(Q(netdevice__device=dev)) if stale_ips.count(): stale_ips.delete() if _added: return ResultNode(ok="updated IPs (added: {:d})".format(_added)) else: return ResultNode()
def __init__(self, in_dict): self.address = ".".join(["{:d}".format(ord(_val)) for _val in in_dict[1]]) self.netmask = ".".join(["{:d}".format(ord(_val)) for _val in in_dict[3]]) self.address_ipv4 = ipvx_tools.IPv4(self.address) self.netmask_ipv4 = ipvx_tools.IPv4(self.netmask) self.if_idx = in_dict[2]
def net_ip_pre_save(sender, **kwargs): if "instance" in kwargs: cur_inst = kwargs["instance"] try: ipv_addr = ipvx_tools.IPv4(cur_inst.ip) except: raise ValidationError("not a valid IPv4 address") if not cur_inst.network_id: match_list = ipv_addr.find_matching_network(network.objects.all()) if len(match_list): cur_inst.network = match_list[0][1] if not cur_inst.network_id: _cs = config_store.ConfigStore(GEN_CS_NAME, quiet=True) if _cs["create.default.network"]: try: default_nw = network.objects.get(Q(network="0.0.0.0")) except network.DoesNotExist: default_nw = network.objects.create( network="0.0.0.0", netmask="0.0.0.0", broadcast="255.255.255.255", gateway="0.0.0.0", identifier="all", network_type=network_type.objects.get(Q(identifier="o")) ) cur_inst.network = default_nw else: raise NoMatchingNetworkFoundError("nothing found for '{}'".format(cur_inst.ip)) if not ipv_addr.network_matches(cur_inst.network): match_list = ipv_addr.find_matching_network(network.objects.all()) if match_list: cur_inst.network = match_list[0][1] else: raise NoMatchingNetworkFoundError("nothing found for '{}'".format(cur_inst.ip)) dev_ips = net_ip.objects.exclude( Q(pk=cur_inst.pk) ).filter( Q(netdevice__device=cur_inst.netdevice.device) ).values_list( "ip", "network_id" ) if (cur_inst.ip, cur_inst.network_id) in dev_ips: raise ValidationError( "Address {} already used, device {}".format( cur_inst.ip, str(cur_inst.netdevice.device) ) ) if cur_inst.network.enforce_unique_ips: try: present_ip = net_ip.objects.exclude(Q(pk=cur_inst.pk)).get(Q(network=cur_inst.network) & Q(ip=cur_inst.ip)) except net_ip.DoesNotExist: pass except net_ip.MultipleObjectsReturned: raise ValidationError( "IP already used more than once in network (force_unique_ips == True)" ) else: raise ValidationError( "IP already used for {} (enforce_unique_ips == True)".format( str(present_ip.netdevice.device) ) )
def network_pre_save(sender, **kwargs): if "instance" in kwargs: cur_inst = kwargs["instance"] # what was the changed attribute change_attr = getattr(cur_inst, "change_attribute", None) check_integer(cur_inst, "penalty", min_val=-100, max_val=100) nw_type = cur_inst.network_type.identifier if cur_inst.rel_master_network.all().count() and nw_type != "p": raise ValidationError("slave networks exists, cannot change type") if nw_type != "s" and cur_inst.master_network_id: raise ValidationError("only slave networks can have a master") if nw_type == "s": if not cur_inst.master_network_id: raise ValidationError("slave network needs a master network") else: # print cur_inst.pk, cur_inst.master_network_id if cur_inst.master_network.network_type.identifier != "p": raise ValidationError("master network must be a production network") # validate IP ip_dict = { key: None for key in ["network", "netmask", "broadcast", "gateway"] } for key in list(ip_dict.keys()): try: ip_dict[key] = ipvx_tools.IPv4(getattr(cur_inst, key)) except: raise ValidationError("{} is not an IPv4 address".format(key)) if not change_attr: change_attr = "network" if change_attr in ["network", "netmask"]: ip_dict["broadcast"] = ~ip_dict["netmask"] | (ip_dict["network"] & ip_dict["netmask"]) elif change_attr == "broadcast": ip_dict["netmask"] = ~(ip_dict["broadcast"] & ~ip_dict["network"]) elif change_attr == "gateway": # do nothing pass # check netmask _mask = 0 any_match = False for _idx in range(32, -1, -1): if _mask == ip_dict["netmask"].value(): any_match = True break _mask = _mask + 2 ** (_idx - 1) if not any_match: raise ValidationError("netmask is not valid") ip_dict["network"] = ip_dict["network"] & ip_dict["netmask"] # always correct gateway ip_dict["gateway"] = (ip_dict["gateway"] & ~ip_dict["netmask"]) | ip_dict["network"] if cur_inst._pre_enforce_unique_ips != cur_inst.enforce_unique_ips and cur_inst.enforce_unique_ips: ip_dict = {} for _ip in cur_inst.net_ip_set.all(): ip_dict.setdefault(_ip.ip, []).append(_ip) ip_dict = { key: value for key, value in ip_dict.items() if len(value) > 1 } if ip_dict: raise ValidationError( "not all IPs are unique: {}".format( ", ".join( [ "{}: used {}".format( _key, logging_tools.get_plural("time", len(_value)), ) for _key, _value in ip_dict.items() ] ) ) ) # check range _ignore_range = {None, "", "0.0.0.0"} if cur_inst.start_range not in _ignore_range and cur_inst.end_range not in _ignore_range: # validate range try: ip_dict["start_range"] = ipvx_tools.IPv4(cur_inst.start_range) ip_dict["end_range"] = ipvx_tools.IPv4(cur_inst.end_range) except: raise ValidationError( "start / end range {} / {} not valid".format( cur_inst.start_range, cur_inst.end_range, ) ) else: if ip_dict["end_range"] < ip_dict["start_range"]: raise ValidationError( "range end {} is below range start {}".format( str(ip_dict["end_range"]), str(ip_dict["start_range"]), ) ) if ip_dict["start_range"] <= ip_dict["network"]: raise ValidationError( "range start {} is less or equal to network {}".format( str(ip_dict["end_range"]), str(ip_dict["network"]), ) ) _highest = ip_dict["network"] | (ip_dict["broadcast"] & ~ip_dict["netmask"]) if ip_dict["end_range"] >= _highest: raise ValidationError( "range end {} is above or equal to highest IP {}".format( str(ip_dict["end_range"]), str(_highest), ) ) # set values for key, value in ip_dict.items(): setattr(cur_inst, key, str(value))
def _call(self, cur_inst): file_list = [] server_idxs = [self.server_idx] # get additional idx if host is virtual server sc_result = config_tools.icswServerCheck(service_type_enum=icswServiceEnum.cluster_server).get_result() if sc_result.effective_device is not None and sc_result.effective_device.idx != self.server_idx: server_idxs.append(sc_result.effective_device.idx) # recognize for which devices i am responsible dev_r = cluster_location.DeviceRecognition() server_idxs = list(set(server_idxs) | set(dev_r.device_dict.keys())) # get all peers to local machine and local netdevices my_idxs = netdevice.objects.exclude( Q(enabled=False) ).filter( Q(device__in=server_idxs) & Q(device__enabled=True) & Q(device__device_group__enabled=True) ).values_list("pk", flat=True) # ref_table route_obj = config_tools.RouterObject(cur_inst.log) all_paths = [] for s_ndev in my_idxs: all_paths.extend(list(networkx.shortest_path(route_obj.nx, s_ndev, weight="weight").values())) # pprint.pprint(all_paths) nd_lut = { cur_nd.pk: cur_nd for cur_nd in netdevice.objects.all().select_related( "device" ).prefetch_related( "net_ip_set", "net_ip_set__network", "net_ip_set__domain_tree_node" ) } # fetch key-information ssh_vars = device_variable.objects.filter(Q(name="ssh_host_rsa_key_pub")).select_related("device") rsa_key_dict = {} for _db_rec in ssh_vars: pass # read pre/post lines from /etc/hosts pre_host_lines, post_host_lines = ([], []) # parse pre/post host_lines try: host_lines = [line.strip() for line in codecs.open(ETC_HOSTS_FILENAME, "r", "utf-8").read().split("\n")] except: self.log( "error reading / parsing {}: {}".format( ETC_HOSTS_FILENAME, process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR) else: mode, any_modes_found = (0, False) for line in host_lines: if line.lower().startswith("### aeh-start-pre"): mode, any_modes_found = (1, True) elif line.lower().startswith("### aeh-start-post"): mode, any_modes_found = (2, True) elif line.lower().startswith("### aeh-end"): mode, any_modes_found = (0, True) else: if mode == 1: pre_host_lines.append(line) elif mode == 2: post_host_lines.append(line) if not any_modes_found: self.log( "no ### aeh-.* stuff found in {}, copying to {}.orig".format( ETC_HOSTS_FILENAME, ETC_HOSTS_FILENAME ) ) try: pass except: self.log( "error writing {}.orig: {}".format( ETC_HOSTS_FILENAME, process_tools.get_except_info() ) ) # mapping from device_name to all names for ssh_host_keys name_dict = {} # ip dictionary ip_dict = {} # min_target_dict min_target_dict = {} for cur_path in all_paths: min_value = route_obj.get_penalty(cur_path) target_nd = nd_lut[cur_path[-1]] min_target_dict[target_nd] = min(min_target_dict.get(target_nd, 999999999), min_value) tl_dtn = domain_tree_node.objects.get(Q(depth=0)) for cur_path in all_paths: target_nd = nd_lut[cur_path[-1]] min_value = min_target_dict[target_nd] for cur_ip in nd_lut[cur_path[-1]].net_ip_set.all(): # get names host_names = [] cur_dtn = cur_ip.domain_tree_node or tl_dtn if not (cur_ip.alias.strip() and cur_ip.alias_excl): host_names.append("{}{}".format(target_nd.device.name, cur_dtn.node_postfix)) host_names.extend(["{}".format(cur_entry) for cur_entry in cur_ip.alias.strip().split()]) if "localhost" in [x.split(".")[0] for x in host_names]: host_names = [host_name for host_name in host_names if host_name.split(".")[0] == "localhost"] if cur_dtn.full_name: if cur_dtn.create_short_names: # also create short_names out_names = ( " ".join( [ "{}.{} {}".format(host_name, cur_dtn.full_name, host_name) for host_name in host_names if not host_name.count(".") ] ) ).split() else: # only print the long names out_names = ["{}.{}".format(host_name, cur_dtn.full_name) for host_name in host_names if not host_name.count(".")] else: if cur_dtn.create_short_names: # also create short_names out_names = (" ".join(["{}".format(host_name) for host_name in host_names if not host_name.count(".")])).split() else: # only print the long names out_names = ["{}".format(host_name) for host_name in host_names if not host_name.count(".")] # add names with dot out_names.extend([host_name for host_name in host_names if host_name.count(".")]) # name_dict without localhost name_dict.setdefault( target_nd.device.name, [] ).extend( [ out_name for out_name in out_names if out_name not in name_dict[target_nd.device.name] and not out_name.startswith("localhost") ] ) ip_dict.setdefault(cur_ip.ip, []) if out_names not in [entry[1] for entry in ip_dict[cur_ip.ip]]: if cur_ip.ip != "0.0.0.0": ip_dict[cur_ip.ip].append((min_value, out_names)) # out_list loc_dict = {} for ip, h_list in ip_dict.items(): all_values = sorted([entry[0] for entry in h_list]) if all_values: min_value = all_values[0] out_names = [] for val in all_values: for _act_val, act_list in [(x_value, x_list) for x_value, x_list in h_list if x_value == val]: out_names.extend([value for value in act_list if value not in out_names]) # print min_value, ip, out_names loc_dict.setdefault(min_value, []).append([ipvx_tools.IPv4(ip)] + out_names) pen_list = sorted(loc_dict.keys()) out_file = [] for pen_value in pen_list: act_out_list = logging_tools.NewFormList() for entry in sorted(loc_dict[pen_value]): act_out_list.append( [ logging_tools.form_entry(entry[0]) ] + [ logging_tools.form_entry(cur_e) for cur_e in entry[1:] ] ) host_lines = str(act_out_list).split("\n") out_file.extend( [ "# penalty {:d}, {}".format( pen_value, logging_tools.get_plural("host entry", len(host_lines)) ), "" ] + host_lines + [""] ) if not os.path.isdir(GROUP_DIR): try: os.makedirs(GROUP_DIR) except: pass if os.path.isdir(GROUP_DIR): # remove old files for file_name in os.listdir(GROUP_DIR): try: os.unlink(os.path.join(GROUP_DIR, file_name)) except: pass # get all devices with netips all_devs = device.objects.filter( Q(enabled=True) & Q(device_group__enabled=True) & Q(netdevice__net_ip__ip__contains=".") ).values_list( "name", "device_group__name" ).order_by( "device_group__name", "name" ) dg_dict = {} for dev_name, dg_name in all_devs: dg_dict.setdefault(dg_name, []).append(dev_name) for file_name, content in dg_dict.items(): codecs.open( os.path.join(GROUP_DIR, file_name), "w", "utf-8" ).write("\n".join(sorted(set(content)) + [""])) file_list.append(ETC_HOSTS_FILENAME) codecs.open(ETC_HOSTS_FILENAME, "w+", "utf-8").write( "\n".join( [ "### AEH-START-PRE insert pre-host lines below" ] + pre_host_lines + [ "### AEH-END-PRE insert pre-host lines above", "" ] + out_file + [ "", "### AEH-START-POST insert post-host lines below" ] + post_host_lines + [ "### AEH-END-POST insert post-host lines above", "" ] ) ) # write known_hosts_file if os.path.isdir(os.path.dirname(SSH_KNOWN_HOSTS_FILENAME)): skh_f = open(SSH_KNOWN_HOSTS_FILENAME, "w") for ssh_key_node in sorted(rsa_key_dict.keys()): skh_f.write( "{} {}\n".format( ",".join(name_dict.get(ssh_key_node, [ssh_key_node])), rsa_key_dict[ssh_key_node] ) ) skh_f.close() file_list.append(SSH_KNOWN_HOSTS_FILENAME) cur_inst.srv_com.set_result( "wrote {}".format(", ".join(sorted(file_list))) )
def _call(self, cur_inst): _log_lines, sys_dict = process_tools.fetch_sysinfo("/") sys_version = sys_dict["version"] if (sys_dict["vendor"], sys_dict["version"].split(".")[0]) in [ ("centos", "6"), ]: named_dir = "/var/named" else: named_dir = "/var/lib/named" if not os.path.isdir(named_dir): cur_inst.srv_com.set_result( "error no named_dir {}".format(named_dir), server_command.SRV_REPLY_STATE_ERROR) return cur_config = config.objects.get(Q(name="name_server")) act_conf_dict = config_tools.get_config_var_list( cur_config, device.objects.get(Q(pk=self.server_idx))) # get domain of server (to be used in SOA records of reverse maps) top_level_name = device.objects.get( Q(pk=self.server_idx)).domain_tree_node.full_name # get user/group id # print act_conf_dict.get("USER", "root") if "USER" in act_conf_dict: named_user = act_conf_dict["USER"].value else: named_user = "******" if "GROUP" in act_conf_dict: named_group = act_conf_dict["GROUP"].value else: named_group = "root" try: named_uid = pwd.getpwnam(named_user)[2] except KeyError: named_uid = 0 try: named_gid = grp.getgrnam(named_group)[2] except KeyError: named_gid = 0 cf_lines = [ "options {", " default-server localhost;", "};", "server localhost {", " key key1;", "};", "key key1 {", " algorithm hmac-md5;" ] if "SECRET" in act_conf_dict: cf_lines.append(" secret \"{}\" ;".format( act_conf_dict["SECRET"].value)) cf_lines.append("};") ncf_lines = [ "options {", " directory \"{}\";\n".format(named_dir), " datasize default;", " stacksize default;", " coresize default;", " empty-zones-enable no;", # " files unlimited;", " auth-nxdomain no;", ] forwarders = [ act_conf_dict[key].value for key in act_conf_dict.keys() if key.startswith("FORWARDER") ] if len(forwarders): ncf_lines.append( " forwarders {\n%s\n };" % ("\n".join([" %s;" % (x) for x in forwarders if x]))) ncf_lines.append(" listen-on {") server_idxs = [self.server_idx] my_ips = net_ip.objects.filter( Q(netdevice__device__in=server_idxs)).values_list("ip", flat=True) for my_ip in my_ips: ncf_lines.append(" %s;" % (my_ip)) ncf_lines.extend([ " };", " allow-query { any; };", " allow-recursion { any; };", "};", "", "controls {", " inet * allow { any ; } keys { \"key1\"; };", "};", "", # "include \"/etc/rndc.key\";", # "", # ]) "key key1 {", " algorithm hmac-md5;", ]) if "SECRET" in act_conf_dict: ncf_lines.append(" secret \"%s\" ;" % (act_conf_dict["SECRET"].value)) ncf_lines.extend(["};"]) # ncf_lines.extend(["logging{", # " channel simple_log {", # " file \"/var/log/named/bind.log\" versions 3 size 5m;", # " severity warning;", # " print-time yes;", # " print-severity yes;", # " print-category yes;", # " };", # " category default{", # " simple_log;", # " };", # "};"]) if os.path.exists(os.path.join(named_dir, "root.hint")): ncf_lines.extend([ "\nzone \".\" IN {", " type hint;", " file \"root.hint\";", "};" ]) if os.path.exists("/etc/named.conf.include"): ncf_lines.extend([ "", "include \"/etc/named.conf.include\";", ]) # print ncf_lines real_config_name = "name_server" # self.act_config_name.replace("%", "") # call_params.dc.execute(sql_str) master_ips, slave_ips = ([], []) if False: if real_config_name == "name_server": # get slaves # slave_ips = [x["ip"] for x in call_params.dc.fetchall()] sub_dir = "master" elif real_config_name == "name_slave": # get masters # master_ips = [x["ip"] for x in call_params.dc.fetchall()] sub_dir = "slave" else: sub_dir = "master" # top level dtn tl_dtn = domain_tree_node.objects.get(Q(depth=0)) # print master_ips, slave_ips # loop 1: forward maps all_dtns = domain_tree_node.objects.filter( Q(write_nameserver_config=True)) cur_serial = int(time.strftime("%Y%m%d%H", time.localtime(time.time()))) CS_FILENAME = "/tmp/.cs_serial" if os.path.isfile(CS_FILENAME): try: last_serial = int(open(CS_FILENAME, "r").read().strip()) except: pass else: while cur_serial <= last_serial: cur_serial += 1 try: open(CS_FILENAME, "w").write("%d" % (cur_serial)) except: pass for cur_dtn in all_dtns: nwname = cur_dtn.full_name if not nwname: continue write_zone_file = True name, name2 = (nwname, nwname) ncf_lines.append("\nzone \"%s\" IN {" % (name)) zonefile_name = "%s.zone" % (name2) if nwname == "localdomain": # special handling ncf_lines.extend([ " type master;", " notify no;", " allow-transfer { none; };" ]) else: if real_config_name == "name_server": ncf_lines.append(" type master;") if len(slave_ips): ncf_lines.extend([ " notify yes;", " allow-transfer { %s; };" % ("; ".join(slave_ips)), " also-notify { %s; };\n" % ("; ".join(slave_ips)) ]) else: ncf_lines.extend([ " notify no;", " allow-transfer { none; };", " allow-update { none; };" ]) elif real_config_name == "name_slave": zonefile_name = "slave/%s.zone" % (name2) write_zone_file = False ncf_lines.extend([ " type slave;", " allow-transfer { none; };", " notify no;", " masters { %s; };" % ("; ".join(master_ips)) ]) ncf_lines.extend( [" file \"%s/%s\";" % (sub_dir, zonefile_name), "};"]) if write_zone_file: _lines = [] zname = "%s." % (nwname) _lines.extend([ "$ORIGIN %s" % (zname), "$TTL 30M", "%s IN SOA %s lang-nevyjel.%s. (" % (zname, nwname, nwname) ]) for what in [str(cur_serial), "1H", "15M", "1W", "30M"]: _lines.append("%s%s" % (" " * 10, what)) _lines.extend(["%s)" % (" " * 5), "; NS and MX-records"]) _form = logging_tools.form_list() _form.set_format_string(3, "s", "-", "; ") _form.add_line([ " ", "IN NS", "%s." % (global_config["SERVER_SHORT_NAME"]), "" ]) for dev_type in [0]: addstr = "real" # if net.identifier == "l": # sel_str = " AND d.name='%s'" % (global_config["SERVER_SHORT_NAME"]) # else: # sel_str = "" print_ips = net_ip.objects.filter( Q(domain_tree_node=cur_dtn) & Q(netdevice__device__enabled=True) & Q(netdevice__device__device_group__enabled=True) & Q(domain_tree_node__write_nameserver_config=True) & Q(netdevice__device__is_meta_device=False) ).select_related("netdevice__device", "domain_tree_node").order_by("ip") num_ips = print_ips.count() if num_ips: _form.add_line("; {} {}".format( addstr, logging_tools.get_plural("record", num_ips))) for ret in print_ips: out_names = [] if not (ret.alias.strip() and ret.alias_excl): out_names.append("%s%s" % (ret.netdevice.device.name, cur_dtn.node_postfix)) out_names.extend(ret.alias.strip().split()) first = True for s_name in out_names: if first: first = False f_name = s_name _form.add_line([ s_name, "IN A", ret.ip, ret.netdevice.device.comment ]) else: _form.add_line([ s_name, "CNAME", f_name, ret.netdevice.device.comment ]) _lines.extend(str(_form).split("\n")) _dir_name = os.path.join(named_dir, sub_dir) if not os.path.isdir(_dir_name): os.mkdir(_dir_name) os.chmod(_dir_name, 0o770) os.chown(_dir_name, named_uid, named_gid) _file_name = os.path.join(_dir_name, "{}.zone".format(nwname)) codecs.open(_file_name, "w", "utf-8").write("\n".join(_lines + [""])) os.chmod(_file_name, 0o660) os.chown(_file_name, named_uid, named_gid) # loop 2: reverse maps nets = network.objects.all() # nets = call_params.dc.fetchall() for net in nets: print("**", net) nw_ip = ipvx_tools.IPv4(net.network) nw_mask = ipvx_tools.IPv4(net.netmask) nw_ip_parts, nw_mask_parts = (nw_ip.parts, nw_mask.parts) network_parts = 4 while True: if not nw_mask_parts or nw_mask_parts[-1]: break network_parts -= 1 nw_mask_parts.pop(-1) nw_ip_parts.pop(-1) nw_flipped_parts = [value for value in nw_ip_parts] nw_flipped_parts.reverse() nw_flipped_ip = ".".join( ["%d" % (value) for value in nw_flipped_parts]) nw_ip = ".".join(["%d" % (value) for value in nw_ip_parts]) write_zone_file = True if not nw_flipped_ip: continue for name, name2 in [("%s.in-addr.arpa" % (nw_flipped_ip), nw_ip)]: ncf_lines.append("\nzone \"%s\" IN {" % (name)) zonefile_name = "%s.zone" % (name2) if net.identifier == "l": ncf_lines.extend([ " type master;", " notify no;", " allow-transfer { none; };" ]) else: if real_config_name == "name_server": ncf_lines.append(" type master;") if len(slave_ips): ncf_lines.extend([ " notify yes;", " allow-transfer { %s; };" % ("; ".join(slave_ips)), " also-notify { %s; };\n" % ("; ".join(slave_ips)) ]) else: ncf_lines.extend([ " notify no;", " allow-transfer { none; };", " allow-update { none; };" ]) elif real_config_name == "name_slave": zonefile_name = "slave/%s.zone" % (name2) write_zone_file = False ncf_lines.extend([ " type slave;", " allow-transfer { none; };", " notify no;", " masters { %s; };" % ("; ".join(master_ips)) ]) ncf_lines.extend( [" file \"%s/%s\";" % (sub_dir, zonefile_name), "};"]) if write_zone_file: _lines = [] zname = "%s.in-addr.arpa." % (nw_flipped_ip) _lines.extend([ "$ORIGIN %s" % (zname), "$TTL 30M", "%s IN SOA %s lang-nevyjel. (" % (zname, top_level_name) ]) for what in [str(cur_serial), "1H", "15M", "1W", "30M"]: _lines.append("%s%s" % (" " * 10, what)) _lines.extend(["%s)" % (" " * 5), "; NS and MX-records"]) _form = logging_tools.form_list() _form.set_format_string(3, "s", "-", "; ") _form.add_line([ " ", "IN NS", "%s%s.%s." % (global_config["SERVER_SHORT_NAME"], "init", "at"), "" ]) for dev_type in [0]: addstr = "real" # if net.identifier == "l": # sel_str = " AND d.name='%s'" % (global_config["SERVER_SHORT_NAME"]) # else: # sel_str = "" print_ips = net_ip.objects.filter( Q(netdevice__device__enabled=True) & Q(netdevice__device__device_group__enabled=True) & Q(domain_tree_node__write_nameserver_config=True) & Q(netdevice__device__is_meta_device=False) & Q(network=net)).select_related( "netdevice__device", "domain_tree_node").order_by("ip") num_ips = print_ips.count() if num_ips: _form.add_line("; {} {}".format( addstr, logging_tools.get_plural("record", num_ips))) for ret in print_ips: host_part = str( ipvx_tools.IPv4(ret.ip) & (~ipvx_tools.IPv4(net.network))).split(".") host_part.reverse() for _idx in range(network_parts): host_part.pop(-1) fiand = ".".join(reversed(host_part)) out_names = [] if ret.domain_tree_node_id: cur_dtn = ret.domain_tree_node else: cur_dtn = tl_dtn if not (ret.alias.strip() and ret.alias_excl): out_names.append("%s%s" % (ret.netdevice.device.name, cur_dtn.node_postfix)) out_names.extend(ret.alias.strip().split()) for s_name in out_names: _form.add_line([ fiand, "IN PTR", "%s.%s." % (s_name, cur_dtn.full_name), ret.netdevice.device.comment ]) _lines.extend(str(_form).split("\n")) file_name = os.path.join(named_dir, sub_dir, "{}.zone".format(nw_ip)) codecs.open(file_name, "w", "utf-8").write("\n".join(_lines + [""])) os.chmod(file_name, 0o660) os.chown(file_name, named_uid, named_gid) cfile = "/etc/rndc.conf" ncname = "/etc/named.conf" open(ncname, "w").write("\n".join(ncf_lines + [""])) open(cfile, "w").write("\n".join(cf_lines + [""])) os.chmod(cfile, 0o600) os.chmod(ncname, 0o600) os.chown(ncname, named_uid, named_gid) cstat, cout = subprocess.getstatusoutput("/usr/sbin/rndc reload") if cstat: cur_inst.srv_com.set_result( "wrote nameserver-config ({}), reloading gave: '{}".format( logging_tools.get_plural("network", len(nets)), cout), server_command.SRV_REPLY_STATE_ERROR) else: cur_inst.srv_com.set_result( "wrote nameserver-config ({}) and successfully reloaded configuration" .format(logging_tools.get_plural("network", len(nets)), ))
def create_noctua_fixtures(): print("Creating Noctua fixtures...") # first config catalog # category tree ct = category_tree() cat_serv = ct.add_category("/mon/services") cat_web = ct.add_category("/mon/services/web") cat_mail = ct.add_category("/mon/services/mail") # config if False: # old code print("Creating configurations.") ping_config = factories.Config(name="check_ping", ) snmp_config = factories.Config(name="check_snmp_info", ) ssh_config = factories.Config(name="check_ssh", ) http_config = factories.Config(name="check_http", ) https_config = factories.Config(name="check_https", ) ldap_config = factories.Config(name="check_ldap", ) imap_config = factories.Config(name="check_imap", ) imaps_config = factories.Config(name="check_imaps", ) pop3s_config = factories.Config(name="check_pop3s", ) smtps_config = factories.Config(name="check_smtps", ) print("Creating monitoring checks.") snmp_check = factories.MonCheckCommand( name="snmp_info", command_line= "$USER3$ -m $HOSTADDRESS$ -C $ARG1$ -V $ARG2$ snmp_info", ).categories.add(cat_serv) snmp_check.config_rel.add(snmp_config) factories.MonCheckCommand( name="check_ping", command_line="$USER2$ -m localhost ping $HOSTADDRESS$ 5 5.0", config=ping_config).categories.add(cat_serv) factories.MonCheckCommand( name="check_ssh", command_line="$USER1$/check_ssh $HOSTADDRESS$", config=ssh_config).categories.add(cat_serv) factories.MonCheckCommand( name="check_http", command_line="$USER1$/check_http -H $HOSTADDRESS$", config=http_config).categories.add(cat_web) factories.MonCheckCommand( name="check_imaps", command_line="$USER1$/check_imap -H $HOSTADDRESS$ -p 993 -S", config=imaps_config).categories.add(cat_mail) factories.MonCheckCommand( name="check_ldap", command_line= "$USER1$/check_ldap -H $HOSTADDRESS$ -b dc=init,dc=at -3", config=ldap_config).categories.add(cat_serv) factories.MonCheckCommand( name="check_https", command_line="$USER1$/check_http -S -H $HOSTADDRESS$ -C 30", config=https_config).categories.add(cat_web) factories.MonCheckCommand( name="check_imap", command_line="$USER1$/check_imap -H $HOSTADDRESS$ -p 143", config=imap_config).categories.add(cat_mail) factories.MonCheckCommand( name="check_pop3s", command_line="$USER1$/check_pop3 -H $HOSTADDRESS$ -p 995 -S", config=pop3s_config).categories.add(cat_mail) factories.MonCheckCommand( name="check_smtps", command_line="$USER1$/check_smtps -H $HOSTADDRESS$ -p 465 -S", config=smtps_config).categories.add(cat_mail) # domain name tree dnt = domain_name_tree() _top_level_dtn = dnt.get_domain_tree_node("") # device_group print("Creating device and device group.") first_devg = factories.DeviceGroup(name="server_group") first_dev = factories.Device( name=process_tools.get_machine_name(), device_group=first_devg, domain_tree_node=_top_level_dtn, ) factories.DeviceGroup(name="client_group") factories.DeviceGroup(name="switch_group") print("Creating device configurations.") # no longer needed, now done via icsw config --sync if False: configs = [ # (name of the service, should be assigned?, part icswServiceEnum?) ("cluster-server", True, True), ("collectd-server", True, True), ("grapher-server", True, True), ("discovery-server", True, True), ("monitor-server", True, True), ("monitor-slave", True, True), ("rrd-collector", True, False), ("server", True, False), ] for (service_name, assign, enum_service) in configs: config_service_enum = None if enum_service: enum_name = icswServiceEnum[service_name.replace('-', '_')].name config_service_enum = ConfigServiceEnum.objects.get( enum_name=enum_name, ) config = factories.Config( name=service_name, # config_catalog=first_cc, config_service_enum=config_service_enum, server_config=True, ) if assign: factories.DeviceConfig(device=first_dev, config=config) print("Creating monitoring periods.") initial_mon_period = factories.MonPeriod(name="always", sun_range="00:00-24:00", mon_range="00:00-24:00", tue_range="00:00-24:00", wed_range="00:00-24:00", thu_range="00:00-24:00", fri_range="00:00-24:00", sat_range="00:00-24:00") first_st = factories.MonServiceTempl( name="dummy_service_template", nsc_period=initial_mon_period, nsn_period=initial_mon_period, ) _first_dt = factories.MonDeviceTempl( name="dummy_device_template", mon_service_templ=first_st, mon_period=initial_mon_period, not_period=initial_mon_period, host_check_command=host_check_command.objects.get( Q(name="check-host-alive")), ) is_ucs = os.path.isfile("/usr/sbin/ucr") # the create_cluster script adds an admin user # if there are no users, or in case of an ucs system, if only this one new admin exists, # then we want an admin and a user user users = user.objects.all() empty_install = users.count() == 0 new_install = (users.count() == 1 and users[0].login == 'admin' and users[0].login_count == 0) if empty_install or (is_ucs and new_install): print('Creating user and groups.') user.objects.all().delete() group.objects.all().delete() # group / users _group = factories.Group( groupname="group", homestart="/", gid=100, ) _group.allowed_device_groups.add(first_devg) _user = factories.User( login="******", uid=400, group=_group, password="******", ) _user.allowed_device_groups.add(first_devg) _first_mc = factories.MonContact( user=_user, snperiod=initial_mon_period, hnperiod=initial_mon_period, ) _admin = user.objects.create_superuser( "admin", "*****@*****.**", "admin", ) # we need contacts for all initial users so that they can access icinga factories.MonContact( user=_admin, snperiod=initial_mon_period, hnperiod=initial_mon_period, ) _admin.allowed_device_groups.add(first_devg) # network if is_ucs: if_address = get_local_ip_address("62.99.204.238") # print if_address if_name = get_interface_by_ip(if_address) # print if_name p = subprocess.Popen( ['ucr', 'get', 'interfaces/%s/address' % (if_name)], stdout=subprocess.PIPE) if_address = p.stdout.read().strip().split("\n")[0] p = subprocess.Popen( ['ucr', 'get', 'interfaces/%s/network' % (if_name)], stdout=subprocess.PIPE) if_network = p.stdout.read().strip().split("\n")[0] p = subprocess.Popen( ['ucr', 'get', 'interfaces/%s/broadcast' % (if_name)], stdout=subprocess.PIPE) if_broadcast = p.stdout.read().strip().split("\n")[0] p = subprocess.Popen( ['ucr', 'get', 'interfaces/%s/netmask' % (if_name)], stdout=subprocess.PIPE) if_netmask = p.stdout.read().strip().split("\n")[0] p = subprocess.Popen(['ucr', 'get', 'gateway'], stdout=subprocess.PIPE) out = p.stdout.read().strip().split("\n")[0] if_gateway = out else: print( "Not installed on UCS, /usr/sbin/ucr not found. Using python-netifaces." ) if_address = get_local_ip_address("62.99.204.238") if_name = get_interface_by_ip(if_address) if_netmask = get_netmask_by_interface(if_name) if_broadcast = get_broadcast_by_interface(if_name) if_network = str( ipvx_tools.IPv4(if_netmask) & ipvx_tools.IPv4(if_broadcast)) if_gateway = get_default_gateway_linux() print('Creating network objects.') _network = factories.Network( identifier="lan", network_type=network_type.objects.get(Q(identifier="o")), name="lan", network=if_network, broadcast=if_broadcast, netmask=if_netmask, gateway=if_gateway, ) _netdevice = factories.NetDevice( device=first_dev, devname=if_name, routing=True, netdevice_speed=netdevice_speed.objects.get( Q(speed_bps=1000000000) & Q(full_duplex=True) & Q(check_via_ethtool=True)), snmp_network_type=snmp_network_type.objects.get(Q(if_type=6)), ) _net_ip = factories.NetIp( ip=if_address, network=_network, netdevice=_netdevice, domain_tree_node=_top_level_dtn, )
def check_ext_com(self): if all(ext_com.finished() is not None for ext_com in self._ext_coms.values()): outputs = { ext_com_key: ext_com.communicate() for ext_com_key, ext_com in self._ext_coms.items() } any_err = False for ext_com_key, ext_com in self._ext_coms.items(): if ext_com.result != 0: any_err = True self.log("Error querying {}, output:".format(ext_com_key), logging_tools.LOG_LEVEL_ERROR) self.log("Stdout: {}".format(outputs[ext_com_key][0]), logging_tools.LOG_LEVEL_ERROR) self.log("Stderr: {}".format(outputs[ext_com_key][1]), logging_tools.LOG_LEVEL_ERROR) if outputs[ext_com_key][1]: self.log( "Query for {} wrote to stderr: {}".format( ext_com_key, outputs[ext_com_key][1]), logging_tools.LOG_LEVEL_WARN) if not any_err: network_adapter_data = WmiUtils.parse_wmic_output( outputs[self.NETWORK_ADAPTER_MODEL][0]) network_adapter_configuration_data = WmiUtils.parse_wmic_output( outputs[self.NETWORK_ADAPTER_CONFIGURATION_MODEL][0]) nd_speed_lut = netdevice_speed.build_lut() updated_nds, created_nds, created_ips, existing_ips = [], [], [], [] # iterate by adapter since only adapters are filtered for adapter in network_adapter_data: adapter_index = int(adapter['Index']) adapter_name = adapter['Name'] # corresponding adapter and adapter_configuration have same index according to some sources # http://blogs.technet.com/b/heyscriptingguy/archive/2011/10/07/use-powershell-to-identify-your-real-network-adapter.aspx # http://blogs.technet.com/b/heyscriptingguy/archive/2005/06/14/how-can-i-associate-a-network-connection-with-an-ip-address.aspx adapter_configuration = next( c for c in network_adapter_configuration_data if int(c['Index']) == adapter_index) device_netdevices = netdevice.objects.filter( device=self.device) # find existing dev by idx or else by name present_nds = [ nd for nd in device_netdevices if nd.wmi_interface_index == adapter_index ] if not present_nds: present_nds = [ nd for nd in device_netdevices if nd.devname == adapter_name ] if len(present_nds) > 1: self.log( "Error: Found multiple netdevices matching specification:" + "Index: {}; Name: {}; Net devices: {}".format( adapter['Index'], adapter['Name'], present_nds) ) else: if present_nds: # only one nd = present_nds[0] updated_nds.append(nd) else: nd = netdevice( device=self.device, wmi_interface_index=adapter_index, ) created_nds.append(nd) nd.devname = adapter_name nd.macaddr = adapter[ 'MACAddress'] or "" # must not be None nd.mtu = adapter_configuration['MTU'] nd.speed = int(adapter['Speed']) nd.netdevice_speed = nd_speed_lut.get( int(adapter['Speed']), nd_speed_lut.get(0)) nd.save() for ip_found in WmiUtils.WmiList.handle( adapter_configuration['IPAddress']): try: ip_found_struct = ipvx_tools.IPv4(ip_found) except ValueError: self.log( "Found IP which is not supported: {}". format(ip_found), logging_tools.LOG_LEVEL_WARN) else: # find ipv4 subnet netmasks_found = [] for _nm in WmiUtils.WmiList.handle( adapter_configuration["IPSubnet"]): try: netmasks_found.append( ipvx_tools.IPv4(_nm)) except ValueError: pass if not netmasks_found: self.log( "No netmask found among: {}".format( adapter['IPSubnet'])) else: netmask_found_struct = netmasks_found[0] _gws = [] for _gw in WmiUtils.WmiList.handle( adapter_configuration[ "DefaultIPGateway"]): try: _gws.append(ipvx_tools.IPv4(_gw)) except ValueError: pass gw_found_struct = _gws[0] if _gws else None cur_nw = network.objects.get_or_create_network( network_addr=ip_found_struct & netmask_found_struct, netmask=netmask_found_struct, gateway=gw_found_struct, context="WMI", ) try: nip = net_ip.objects.get(netdevice=nd, ip=ip_found) existing_ips.append(nip) except net_ip.DoesNotExist: try: nip = net_ip( netdevice=nd, ip=ip_found, network=cur_nw, ) nip.save() created_ips.append(nip) except ValidationError as e: self.log( "Failed to create ip {} for netdevice {}: {}" .format(ip_found, nd, e), logging_tools.LOG_LEVEL_ERROR) self.log(traceback.format_exc(e)) self.log("Created {}, updated {}, created {}, found {}".format( logging_tools.get_plural("net device", len(created_ips)), logging_tools.get_plural("net device", len(updated_nds)), logging_tools.get_plural("ip", len(created_ips)), logging_tools.get_plural("existing ip", len(existing_ips)), )) self.finish()
def post(self, request): _post = request.POST source_dev = device.objects.get(pk=_post["source_dev"]) copy_coms = True if _post["copy_coms"].lower()[0] in ["1", "t", "y" ] else False target_devs = device.objects.exclude(pk=source_dev.pk).filter( pk__in=json.loads(_post["all_devs"])).prefetch_related( "netdevice_set", "netdevice_set__netdevice_speed", "netdevice_set__snmp_network_type", "netdevice_set__net_ip_set", "netdevice_set__net_ip_set__network", "netdevice_set__net_ip_set__network__network_type").order_by( "name") if len(target_devs): logger = logging.getLogger("cluster.network") diff_ip = ipvx_tools.IPv4("0.0.0.1") logger.info("source device is %s" % (str(source_dev))) logger.info("{}: {}".format( logging_tools.get_plural("target device", len(target_devs)), ", ".join([str(cur_dev) for cur_dev in target_devs]))) # read peer_informations src_nds = source_dev.netdevice_set.all().values_list("pk", flat=True) peer_dict = {} for peer_info in peer_information.objects.filter( Q(s_netdevice__in=src_nds) | Q(d_netdevice__in=src_nds)): s_local, d_local = (peer_info.s_netdevice_id in src_nds, peer_info.d_netdevice_id in src_nds) # print "*", s_local, d_local if s_local and d_local: if peer_info.s_netdevice_id != peer_info.d_netdevice_id: logger.critical("host peering detection, not handled") else: peer_dict.setdefault(peer_info.s_netdevice_id, []).append( (None, peer_info.penalty)) elif s_local: peer_dict.setdefault(peer_info.s_netdevice_id, []).append( (peer_info.d_netdevice, peer_info.penalty)) else: peer_dict.setdefault(peer_info.d_netdevice_id, []).append( (peer_info.s_netdevice, peer_info.penalty)) for target_num, target_dev in enumerate(target_devs): offset = target_num + 1 logger.info("operating on {}, offset is {:d}".format( str(target_dev), offset)) if target_dev.bootnetdevice_id: logger.info("removing bootnetdevice {}".format( str(target_dev.bootnetdevice))) target_dev.bootnetdevice = None target_dev.save() # preserve mac/fakemac addresses mac_dict, fmac_dict = ({}, {}) for cur_nd in target_dev.netdevice_set.all(): if int(cur_nd.macaddr.replace(":", ""), 16): mac_dict[cur_nd.devname] = cur_nd.macaddr if int(cur_nd.fake_macaddr.replace(":", ""), 16): fmac_dict[cur_nd.devname] = cur_nd.fake_macaddr # remove all netdevices cur_nd.delete() vlan_master_dict = {} bridge_master_dict = {} src_dict, dst_dict = ({}, {}) # copy from source for cur_nd in source_dev.netdevice_set.all().prefetch_related( "netdevice_speed", "snmp_network_type", "net_ip_set", "net_ip_set__network", "net_ip_set__domain_tree_node", "net_ip_set__network__network_type"): src_dict[cur_nd.devname] = cur_nd if cur_nd.master_device_id: vlan_master_dict[ cur_nd.devname] = cur_nd.master_device.devname if cur_nd.bridge_device_id: bridge_master_dict[ cur_nd.devname] = cur_nd.bridge_device.devname new_nd = cur_nd.copy() dst_dict[new_nd.devname] = new_nd if new_nd.devname in mac_dict: new_nd.macaddr = mac_dict[new_nd.devname] if new_nd.devname in fmac_dict: new_nd.fake_macaddr = fmac_dict[new_nd.devname] new_nd.device = target_dev new_nd.save() for cur_ip in cur_nd.net_ip_set.all().prefetch_related( "network", "network__network_type"): new_ip = cur_ip.copy() new_ip.netdevice = new_nd if cur_ip.network.network_type.identifier != "l": # increase IP for non-loopback addresses ip_val = ipvx_tools.IPv4(cur_ip.ip) for _seq in range(offset): ip_val += diff_ip new_ip.ip = str(ip_val) while not new_ip.is_unique: ip_val += diff_ip new_ip.ip = str(ip_val) new_ip.save() # peering if cur_nd.pk in peer_dict: for target_nd, penalty in peer_dict[cur_nd.pk]: if target_nd is None: # local peer peer_information( s_netdevice=new_nd, d_netdevice=new_nd, penalty=penalty, ).save() else: from django.db.utils import IntegrityError try: # remote peer peer_information( s_netdevice=new_nd, d_netdevice=target_nd, penalty=penalty, ).save() except IntegrityError: request.xml_response.warn( "cannot create peer", logger) # vlan masters for dst_name, src_name in list(vlan_master_dict.items()): dst_dict[dst_name].master_device = dst_dict[src_name] dst_dict[dst_name].save() # bridge masters for dst_name, src_name in list(bridge_master_dict.items()): dst_dict[dst_name].bridge_device = dst_dict[src_name] dst_dict[dst_name].save() if copy_coms: target_dev.com_capability_list.clear() for src_com in source_dev.com_capability_list.all(): target_dev.com_capability_list.add(src_com) target_dev.snmp_schemes.clear() for src_scheme in source_dev.snmp_schemes.all(): target_dev.snmp_schemes.add(src_scheme) request.xml_response.info( "copied network settings for {}".format( logging_tools.get_plural("device", len(target_devs)), ), logger) else: logger = logging.getLogger("cluster.network") request.xml_response.error("no target_devices", logger)
def deploy(create_dir): d_map = {} if Zone.opts.deploy_map: for _entry in Zone.opts.deploy_map.strip().split(","): _src_ip, _dst_ip = _entry.strip().split(":") d_map[ipvx_tools.IPv4(_src_ip)] = ipvx_tools.IPv4(_dst_ip) for _entry in Zone.Meta.nameservers: _entry.deploy_ip = d_map.get(_entry.ip, _entry.ip) print("Deploying to {}: {}".format( logging_tools.get_plural("Nameserver", len(Zone.Meta.nameservers)), ", ".join([str(_entry) for _entry in Zone.Meta.nameservers]), )) _dyn_dir = os.path.join(Zone.opts.named_run_dir, "dyn") _zone_file = os.path.join(Zone.opts.named_conf_dir, "zones") _zone_src_dir = os.path.join(create_dir, "zones", "dyn") for _srv in Zone.Meta.nameservers: s_time = time.time() print("") _local = _srv.name.split(".")[0] == Zone.opts.hostname _master = _srv.name in [ _entry.name for _entry in Zone.Meta.primary ] print("... to {} ({} {})".format( _srv.name, "local" if _local else "remote", "master" if _master else "slave", )) if _local: # local deploy shutil.copyfile( os.path.join( create_dir, "conf", "{}_zones".format("master" if _master else "slave")), _zone_file, ) os.chown(_zone_file, Zone.opts.uid, Zone.opts.gid) if os.path.isdir(_dyn_dir): shutil.rmtree(_dyn_dir) os.mkdir(_dyn_dir) os.chown(_dyn_dir, Zone.opts.uid, Zone.opts.gid) if _master: # copy zone files for _entry in os.listdir(_zone_src_dir): _dst = os.path.join(_dyn_dir, _entry) shutil.copyfile( os.path.join(_zone_src_dir, _entry), _dst, ) os.chown(_dst, Zone.opts.uid, Zone.opts.gid) else: # remote deploy _cmd_list = [ ( os.path.join( create_dir, "conf", "{}_zones".format( "master" if _master else "slave")), _zone_file, ), "chown {:d}:{:d} {}".format( Zone.opts.uid, Zone.opts.gid, _zone_file, ), ] if Zone.opts.restart: _cmd_list.extend([ "rm -rf {}".format(_dyn_dir, ), "mkdir {}".format(_dyn_dir, ), "chown -R {:d}:{:d} {}".format( Zone.opts.uid, Zone.opts.gid, _dyn_dir, ) ]) if _master: _cmd_list.extend([( os.path.join(create_dir, "zones", "dyn", "*"), _dyn_dir, "-pr", ), "chown {:d}:{:d} {}/*".format( Zone.opts.uid, Zone.opts.gid, _dyn_dir, )]) print("{}".format( logging_tools.get_plural("remote command", len(_cmd_list)))) for _cmd in _cmd_list: if isinstance(_cmd, tuple): # copy if len(_cmd) == 3: _opts = _cmd[2] else: _opts = "-p" _rcom = "scp {} {} root@{}:{}".format( _opts, _cmd[0], _srv.deploy_ip, _cmd[1]) else: # command _rcom = "ssh root@{} {}".format( _srv.deploy_ip, _cmd, ) _stat, _out = Zone.call_command(_rcom) e_time = time.time() print("deployment took {}".format( logging_tools.get_diff_time_str(e_time - s_time))) print("") # reload / restart nameservers _mode = "restart" if Zone.opts.restart else "reload" for _is_master in [True, False]: for _srv in Zone.Meta.nameservers: _local = _srv.name.split(".")[0] == Zone.opts.hostname _master = _srv.name in [ _entry.name for _entry in Zone.Meta.primary ] if _master == _is_master: if _local: print("{}ing local {} {}".format( _mode, "master" if _master else "slave", str(_srv))) Zone.call_command("/etc/init.d/named {}".format(_mode)) else: print("{}ing remote {} {}".format( _mode, "master" if _master else "slave", str(_srv))) Zone.call_command( "ssh root@{} /etc/init.d/named {}".format( _srv.deploy_ip, _mode)) print("")