def menu(request): menu_role = sql_menu_role() user_type = menu_role[0]['id'] # 用户拥有的菜单 menu = sql_user_type_menu(user_type) # 总菜单 all_menu = sql_url_menu() if request.method == 'POST': asd = request.POST.get('asd', None) action = request.POST.get('type', None) user_type = request.POST.get('user_type', None) if user_type: user_type = int(user_type) else: ret = error(4) return HttpResponse(ret) # 修改操作 if action == 'ask': result = sql_update_menu_role(user_type, asd) if result == 'ok': ret = error(0) else: ret = error(4) else: # 用户拥有的菜单 menu = sql_user_type_menu(user_type) ret = error(0, menu) return HttpResponse(ret) return render(request, 'menu.html', locals())
def create_ldif(networks, ignore_size_change): f = io.StringIO() dn = cfg['ldif']['dn'] head_entry = make_head_entry(cfg) f.write(entry_string(head_entry)) range_attrs = { 4: ('uioIpAddressRangeStart', 'uioIpAddressRangeEnd'), 6: ('uioIpV6AddressRangeStart', 'uioIpV6AddressRangeEnd'), } for network, i in networks.items(): rangeStart, rangeEnd = range_attrs[network.version] cn = i['network'] entry = { 'dn': f'cn={cn},{dn}', 'cn': cn, 'objectClass': ('top', 'ipNetwork', 'uioIpNetwork'), 'description': i['description'], 'ipNetworkNumber': str(network.network_address), 'ipNetmaskNumber': str(network.netmask), 'uioNetworkCategory': sorted(i['category'].split(' ')), 'uioNetworkLocation': sorted(i['location'].split(' ')), rangeStart: int(network.network_address), rangeEnd: int(network.broadcast_address), } if i['vlan'] is not None: entry['uioVlanID'] = i['vlan'] f.write(entry_string(entry)) try: common.utils.write_file(cfg['default']['filename'], f, ignore_size_change=ignore_size_change) except common.utils.TooManyLineChanges as e: error(e.message)
def modify_pwd(request): if request.method == 'POST': old_pwd = request.REQUEST.get("old_pwd", None) new_pwd = request.REQUEST.get("new_pwd", None) new_again_pwd = request.REQUEST.get("new_again_pwd", None) if old_pwd and new_pwd and new_again_pwd: if new_pwd == new_again_pwd: # 从数据库获取密码 username = request.session["username"] password = sql_username_password(username) old_pwd += username # 加密进行验证 old_pwd = md5_16(old_pwd) if password == old_pwd: new_pwd += username new_pwd = md5_16(new_pwd) result = sql_update_password(new_pwd, username) if result == 'ok': ret = error(0) else: ret = error(2) else: ret = error(1) else: ret = error(3) return HttpResponse(ret) elif request.method == 'GET': return render(request, 'modify-pwd.html')
def main(): global cfg, conn, logger parser = argparse.ArgumentParser( description="Export hostpolicies from mreg as a textfiles.") parser.add_argument("--config", default="get-hostpolicy.conf", help="path to config file (default: %(default)s)") parser.add_argument('--force', action='store_true', help='force update') args = parser.parse_args() cfg = configparser.ConfigParser() cfg.optionxform = str cfg.read(args.config) for i in ( 'default', 'mreg', ): if i not in cfg: error(f"Missing section {i} in config file", os.EX_CONFIG) common.utils.cfg = cfg logger = common.utils.getLogger() conn = common.connection.Connection(cfg['mreg']) dump_hostpolicies(args.force)
def create_ldif(hosts, srvs, ignore_size_change): def _base_entry(name): return { 'dn': f'host={name},{dn}', 'host': name, 'objectClass': 'uioHostinfo', } def _write(entry): f.write(entry_string(entry)) f = io.StringIO() dn = cfg['ldif']['dn'] _write(make_head_entry(cfg)) for i in hosts: entry = _base_entry(i["name"]) entry.update({ 'uioHostComment': i['comment'], 'uioHostContact': i['contact'], }) mac = {ip['macaddress'] for ip in i['ipaddresses'] if ip['macaddress']} if mac: entry['uioHostMacAddr'] = sorted(mac) _write(entry) for cinfo in i["cnames"]: _write(_base_entry(cinfo["name"])) for i in srvs: _write(_base_entry(i["name"])) try: common.utils.write_file(cfg['default']['filename'], f, ignore_size_change=ignore_size_change) except common.utils.TooManyLineChanges as e: error(e.message)
def main(): global cfg, conn, logger parser = argparse.ArgumentParser( description="Download zonefiles from mreg.") parser.add_argument( '--config', default='get-zonefiles.conf', help='path to config file (default: get-zonefiles.conf)') parser.add_argument('--force', action='store_true', default=False, help='force update of all zones') args = parser.parse_args() cfg = configparser.ConfigParser(allow_no_value=True) cfg.read(args.config) for i in ('default', 'mreg', 'zones'): if i not in cfg: error(f"Missing section {i} in config file", os.EX_CONFIG) common.utils.cfg = cfg logger = common.utils.getLogger() conn = common.connection.Connection(cfg['mreg'], logger=logger) get_zonefiles(args.force)
def main(): global cfg, conn, logger parser = argparse.ArgumentParser( description="Create dhcp config from mreg.") parser.add_argument( "--config", default="get-dhcphosts.conf", help="path to config file (default: get-dhcphosts.conf)") parser.add_argument( "--one-file", action="store_true", help="Write all hosts to one file, instead of per domain") parser.add_argument('--force', action='store_true', help='force update') args = parser.parse_args() cfg = configparser.ConfigParser() cfg.read(args.config) for i in ('default', 'mreg'): if i not in cfg: error(f"Missing section {i} in config file", os.EX_CONFIG) common.utils.cfg = cfg logger = common.utils.getLogger() conn = common.connection.Connection(cfg['mreg']) dhcphosts(args)
def main(): global cfg, conn, logger parser = argparse.ArgumentParser(description="Export hosts from mreg as a ldif.") parser.add_argument("--config", default="hosts-ldif.conf", help="path to config file (default: %(default)s)") parser.add_argument('--force-check', action='store_true', help='force refresh of data from mreg') parser.add_argument('--ignore-size-change', action='store_true', help='ignore size changes') args = parser.parse_args() cfg = configparser.ConfigParser() cfg.optionxform = str cfg.read(args.config) for i in ('default', 'mreg', 'ldif'): if i not in cfg: error(f"Missing section {i} in config file", os.EX_CONFIG) if 'filename' not in cfg['default']: error(f"Missing 'filename' in default section in config file", os.EX_CONFIG) common.utils.cfg = cfg logger = common.utils.getLogger() conn = common.connection.Connection(cfg['mreg']) hosts_ldif(args)
def main(): global cfg, conn, logger parser = argparse.ArgumentParser( description="Export hostgroups from mreg as a ldif.") parser.add_argument("--config", default="hostgroup-ldif.conf", help="path to config file (default: %(default)s)") parser.add_argument('--force', action='store_true', help='force update') args = parser.parse_args() cfg = configparser.ConfigParser() cfg.optionxform = str cfg.read(args.config) for i in ('default', 'mreg', 'ldif'): if i not in cfg: error(f"Missing section {i} in config file", os.EX_CONFIG) if 'filename' not in cfg['default']: error(f"Missing 'filename' in default section in config file", os.EX_CONFIG) common.utils.cfg = cfg logger = common.utils.getLogger() conn = common.connection.Connection(cfg['mreg']) url = requests.compat.urljoin(cfg["mreg"]["url"], '/api/v1/hostgroups/') hostgroup_ldif(args, url)
def get_zonefiles(force): for i in ( 'destdir', 'workdir', ): common.utils.mkdir(cfg['default'][i]) lockfile = opj(cfg['default']['workdir'], 'lockfile') lock = fasteners.InterProcessLock(lockfile) if lock.acquire(blocking=False): updated = False allzoneinfo = get_current_zoneinfo() for zone in cfg['zones']: if zone not in allzoneinfo: error(f"Zone {zone} not in mreg") # Check if using a overriden filename from config if cfg['zones'][zone]: filename = cfg['zones'][zone] else: filename = zone if update_zone(zone, filename, allzoneinfo[zone]) or force: updated = True get_zone(zone, filename, force) if updated and 'postcommand' in cfg['default']: common.utils.run_postcommand() lock.release() else: logger.warning(f"Could not lock on {lockfile}")
def get_extradata(name): if cfg['default']['extradir']: extrafile = opj(cfg['default']['extradir'], f"{name}_extra") try: with open(extrafile, 'r') as extra: return extra.read() except FileNotFoundError: pass except PermissionError as e: error(f"{e}", code=e.errno) return None
def check_changes_size(ipversion, num_current, args, *changes): changed = sum(map(len, changes)) if num_current and changed != 0: diffsize = (changed / num_current) * 100 if diffsize > args.max_size_change and not args.force_size_change: error( f"The import will change {diffsize:.0f}% of the ipv{ipversion} networks. " f"Limit is {args.max_size_change}%. Requires force.") else: logging.info( f"Changing {diffsize:.0f}% of the ipv{ipversion} networks.")
def compare_with_mreg(ipversion, import_data, mreg_data): networks_delete = mreg_data.keys() - import_data.keys() networks_post = import_data.keys() - mreg_data.keys() networks_keep = import_data.keys() & mreg_data.keys() networks_patch = defaultdict(dict) networks_grow = defaultdict(set) networks_shrink = defaultdict(set) # Check if a network destined for removal is actually just resized for existing in networks_delete: existing_net = ipaddress.ip_network(existing) for new in networks_post: new_net = ipaddress.ip_network(new) if subnet_of(existing_net, new_net): networks_grow[new].add(existing) elif supernet_of(existing_net, new_net): networks_shrink[existing].add(new) for newnet, oldnets in networks_grow.items(): networks_delete -= oldnets networks_post.remove(newnet) for oldnet, newnets in networks_shrink.items(): check_removable(oldnet, newnets=newnets) networks_delete.remove(oldnet) networks_post -= newnets # Check if networks marked for deletion is removable for network in networks_delete: check_removable(network) if unremoveable_networks: error(''.join(unremoveable_networks)) # Check if networks marked for creation have any overlap with existing networks # We also check this serverside, but just in case... for network_new in networks_post: network_object = ipaddress.ip_network(network_new) for network_existing in networks_keep: if network_object.overlaps(ipaddress.ip_network(network_existing)): error(f"Overlap found between new network {network_new} " f"and existing network {network_existing}") # Check which existing networks need to be patched for network in networks_keep: current_data = mreg_data[network] new_data = import_data[network] for i in ('description', 'vlan', 'category', 'location'): if new_data[i] != current_data[i]: networks_patch[network][i] = new_data[i] return networks_post, networks_patch, networks_delete, networks_grow, networks_shrink
def overlap_check(network, tree, points): # Uses an IntervalTree to do fast lookups of overlapping networks. # begin = int(network.network_address) end = int(network.broadcast_address) if tree[begin:end]: overlap = tree[begin:end] data = [str(i.data) for i in overlap] error(f"Network {network} overlaps {data}") # For one-host networks, as ipv4 /32 and ipv6 /128, IntervalTree causes # a bit extra work as it does not include upper bound in intervals when # searching, thus point search failes for a broadcast address. Also one # can not add a interval with begin == end, so keep track of one-host # networks in a seperate "points" set. elif network.version == 4 and network.prefixlen == 32 or \ network.version == 6 and network.prefixlen == 128: if begin in points: error(f"Network {network} already in file") elif tree.overlaps(begin): error(f"Network {network} overlaps {tree[begin].pop().data}") elif tree.overlaps(begin - 1): error(f"Network {network} overlaps {tree[begin-1].pop().data}") else: points.add(begin) else: tree[begin:end] = network
def read_tags(): if 'tagsfile' in cfg['default']: filename = cfg['default']['tagsfile'] else: return with open(filename, 'r') as tagfile: flag_re = re.compile( r"""^ ((?P<location>[a-zA-Z0-9]+)+\s+:\s+Plassering) |(?P<category>[a-zA-Z0-9]+) """, re.X) for line_number, line in enumerate(tagfile, 1): line = line.strip() if line.startswith("#") or len(line) == 0: continue res = flag_re.match(line) if res.group('location'): location_tags.add(res.group('location')) elif res.group('category'): category_tags.add(res.group('category')) else: error('In {}, wrong format on line: {} - {}'.format( filename, line_number, line))
def users_edit(request): """ 对用户重置密码,更改状态等 :param request: :return: """ if request.method == 'POST': # 数据id username = request.POST.get("username") action = request.POST.get("action") user_info = sql_my_profile(username) if user_info and user_info["user_type"] != 1: ret = error(1, "操作失败了") # 初始化密码 if action == "init": new_pwd = username + username[::-1] # 用户名和密码一起加密 new_pwd += username new_pwd = md5_16(new_pwd) result = sql_update_password(new_pwd, username) if result == 'ok': ret = error(0) else: ret = error(1, "重置密码失败了") elif action == "delete": result = delete_user(username) if result == 'ok': ret = error(0) else: ret = error(1, "删除用户失败了") elif action == "update": status = request.POST.get("status") status = int(status) if status in [1, 2, 3]: result = sql_update_status(status, username) if result == 'ok': ret = error(0) else: ret = error(1, "更改用户失败了") return HttpResponse(ret) else: return HttpResponse(json.dumps({"status": -1, "data": "用户信息为空"})) return HttpResponse(json.dumps({"status": -1}))
def create_url(): path = '/api/v1/dhcphosts/' if 'hosts' in cfg['mreg']: hosts = cfg['mreg']['hosts'] if hosts not in ('ipv4', 'ipv6', 'ipv6byipv4'): error("'hosts' must be one of 'ipv4', 'ipv6', 'ipv6byipv4'") path += f'{hosts}/' else: error("Missing 'hosts' in mreg section of config") if 'range' in cfg['mreg']: try: ipaddress.ip_network(cfg['mreg']['range']) except ValueError as e: error(f'Invalid range in config: {e}') path += cfg['mreg']['range'] return requests.compat.urljoin(cfg["mreg"]["url"], path)
def post(self, channel_id): logger.debug("top of POST /channels/{channel_id}") body = request.json # TODO need to check the user permission to update channel status # TODO Convert to Status Enum if body['status'] == 'ACTIVE': body['status'] = 'enabled' elif body['status'] == 'INACTIVE': body['status'] = 'disabled' logger.debug(body) else: raise errors.ResourceError(msg=f'Invalid POST data: {body}.') result = {} try: result, msg = kapacitor.update_channel_status(channel_id, body) except Exception as e: logger.debug(type(e)) logger.debug(e.args) msg = f"Could not update the channel status: {channel_id}; exception: {e} " logger.debug(msg) logger.debug(result) if result: return utils.ok(result=meta.strip_meta(result), msg=msg) return utils.error(result=result, msg=msg)
def _error(message): error(f"{filename} line {line_number}: {message}")
converter.convert_models(args, parsed_ctds) return 0 except KeyboardInterrupt: print("Interrupted...") return 0 except ApplicationException, e: traceback.print_exc() utils.error("CTDConverter could not complete the requested operation.", 0) utils.error("Reason: " + e.msg, 0) return 1 except ModelError, e: traceback.print_exc() utils.error("There seems to be a problem with one of your input CTDs.", 0) utils.error("Reason: " + e.msg, 0) return 1 except Exception, e: traceback.print_exc() utils.error("CTDConverter could not complete the requested operation.", 0) utils.error("Reason: " + e.msg, 0) return 2 def validate_and_prepare_common_arguments(args): # flatten lists of lists to a list containing elements lists_to_flatten = ["input_files", "blacklisted_parameters"] for list_to_flatten in lists_to_flatten: utils.flatten_list_of_lists(args, list_to_flatten)
return 0 except KeyboardInterrupt: print("Interrupted...") return 0 except ApplicationException, e: traceback.print_exc() utils.error("CTDConverter could not complete the requested operation.", 0) utils.error("Reason: " + e.msg, 0) return 1 except ModelError, e: traceback.print_exc() utils.error("There seems to be a problem with one of your input CTDs.", 0) utils.error("Reason: " + e.msg, 0) return 1 except Exception, e: traceback.print_exc() utils.error("CTDConverter could not complete the requested operation.", 0) utils.error("Reason: " + e.msg, 0) return 2 def validate_and_prepare_common_arguments(args): # flatten lists of lists to a list containing elements lists_to_flatten = ["input_files", "blacklisted_parameters"] for list_to_flatten in lists_to_flatten:
def main(argv=None): if argv is None: argv = sys.argv else: sys.argv.extend(argv) # check that we have, at least, one argument provided # at this point we cannot parse the arguments, because each converter takes different arguments, meaning each # converter will register its own parameters after we've registered the basic ones... we have to do it old school if len(argv) < 2: utils.error("Not enough arguments provided") print( "\nUsage: $ python convert.py [TARGET] [ARGUMENTS]\n\n" + "Where:\n" + " target: one of 'cwl' or 'galaxy'\n\n" + "Run again using the -h/--help option to print more detailed help.\n" ) return 1 # TODO: at some point this should look like real software engineering and use a map containing converter instances # whose keys would be the name of the converter (e.g., cwl, galaxy), but for the time being, only two formats # are supported target = str.lower(argv[1]) if target == 'cwl': from cwl import converter elif target == 'galaxy': from galaxy import converter elif target == '-h' or target == '--help' or target == '--h' or target == 'help': print(program_license) return 0 else: utils.error( "Unrecognized target engine. Supported targets are 'cwl' and 'galaxy'." ) return 1 utils.info("Using %s converter" % target) try: # Setup argument parser parser = ArgumentParser(prog="CTDConverter", description=program_license, formatter_class=RawDescriptionHelpFormatter, add_help=True) utils.add_common_parameters(parser, program_version_message, program_build_date) # add tool-specific arguments converter.add_specific_args(parser) # parse arguments and perform some basic, common validation args = parser.parse_args() validate_and_prepare_common_arguments(args) # parse the input CTD files into CTDModels parsed_ctds = utils.parse_input_ctds( args.xsd_location, args.input_files, args.output_destination, converter.get_preferred_file_extension()) # let the converter do its own thing converter.convert_models(args, parsed_ctds) return 0 except KeyboardInterrupt: print("Interrupted...") return 0 except ApplicationException, e: traceback.print_exc() utils.error("CTDConverter could not complete the requested operation.", 0) utils.error("Reason: " + e.msg, 0) return 1
def main(argv=None): if argv is None: argv = sys.argv else: sys.argv.extend(argv) # check that we have, at least, one argument provided # at this point we cannot parse the arguments, because each converter takes different arguments, meaning each # converter will register its own parameters after we've registered the basic ones... we have to do it old school if len(argv) < 2: utils.error("Not enough arguments provided") print("\nUsage: $ python convert.py [TARGET] [ARGUMENTS]\n\n" + "Where:\n" + " target: one of 'cwl' or 'galaxy'\n\n" + "Run again using the -h/--help option to print more detailed help.\n") return 1 # TODO: at some point this should look like real software engineering and use a map containing converter instances # whose keys would be the name of the converter (e.g., cwl, galaxy), but for the time being, only two formats # are supported target = str.lower(argv[1]) if target == 'cwl': from cwl import converter elif target == 'galaxy': from galaxy import converter elif target == '-h' or target == '--help' or target == '--h' or target == 'help': print(program_license) return 0 else: utils.error("Unrecognized target engine. Supported targets are 'cwl' and 'galaxy'.") return 1 utils.info("Using %s converter" % target) try: # Setup argument parser parser = ArgumentParser(prog="CTDConverter", description=program_license, formatter_class=RawDescriptionHelpFormatter, add_help=True) utils.add_common_parameters(parser, program_version_message, program_build_date) # add tool-specific arguments converter.add_specific_args(parser) # parse arguments and perform some basic, common validation args = parser.parse_args() validate_and_prepare_common_arguments(args) # parse the input CTD files into CTDModels parsed_ctds = utils.parse_input_ctds(args.xsd_location, args.input_files, args.output_destination, converter.get_preferred_file_extension()) # let the converter do its own thing converter.convert_models(args, parsed_ctds) return 0 except KeyboardInterrupt: print("Interrupted...") return 0 except ApplicationException, e: traceback.print_exc() utils.error("CTDConverter could not complete the requested operation.", 0) utils.error("Reason: " + e.msg, 0) return 1