def directory(request: Request, response: Response): try: pipe = rdbconn.pipeline() # IP LIST OF SIP PROFILE AND REALM profilenames = rdbconn.smembers('nameset:sipprofile') for profilename in profilenames: pipe.hget(f'sipprofile:{profilename}', 'realm' ) realms = pipe.execute() sipprofiles = dict() for profilename, realm in zip(profilenames, realms): sipprofiles.update({profilename: realm}) # IP LIST OF INBOUND INTERCONNECTION # {profilename: {profilename, sipaddrs, secret}} for profilename in profilenames: pipe.smembers(f'engagement:sipprofile:{profilename}') intconsets = pipe.execute() intconnameids = [item for sublist in intconsets for item in sublist if item.startswith('in:')] for intconnameid in intconnameids: pipe.hmget(f'intcon:{intconnameid}', 'sipprofile', 'sipaddrs', 'secret', 'authscheme', 'routing', 'ringready') details = pipe.execute() directories = dict() for intconnameid, detail in zip(intconnameids, details): intconname = getaname(intconnameid) profilename = detail[0] sipaddrs = fieldjsonify(detail[1]) secret = detail[2] authscheme = detail[3] routing = detail[4] ringready = fieldjsonify(detail[5]) if authscheme=='IP': password = DEFAULT_PASSWORD cidrs = sipaddrs elif authscheme=='DIGEST': password = secret cidrs = list() else: password = secret cidrs = sipaddrs for _profilename, _realm in sipprofiles.items(): if _profilename == profilename: a1hash = hashlib.md5(f'{intconname}:{_realm}:{password}'.encode()).hexdigest() directory = {'id': intconname, 'cidrs': cidrs, 'a1hash': a1hash, 'routing': routing, 'ringready': ringready} if _realm in directories: directories[_realm].append(directory) else: directories[_realm] = [directory] result = templates.TemplateResponse("directory.j2.xml", {"request": request, "directories": directories}, media_type="application/xml") response.status_code = 200 except Exception as e: response.status_code, result = 500, str() logify(f"module=liberator, space=fsxmlapi, section=directory, requestid={get_request_uuid()}, exception={e}, traceback={traceback.format_exc()}") finally: return result
def acl(request: Request, response: Response): try: pipe = rdbconn.pipeline() # IP LIST OF SIP PROFILE AND REALM # {profilename: realm} KEYPATTERN = 'sipprofile:*' next, mainkeys = rdbconn.scan(0, KEYPATTERN, SCAN_COUNT) while next: next, tmpkeys = rdbconn.scan(next, KEYPATTERN, SCAN_COUNT) mainkeys += tmpkeys for mainkey in mainkeys: pipe.hget(mainkey, 'realm') realms = pipe.execute() sipprofiles = dict() for mainkey, realm in zip(mainkeys, realms): sipprofiles[getnameid(mainkey)] = realm # DEFINED ACL LIST # [{'name': name, 'action': default-action, 'rules': [{'action': allow/deny, 'key': domain/cidr, 'value': ip/domain-value}]}] KEYPATTERN = 'base:acl:*' next, mainkeys = rdbconn.scan(0, KEYPATTERN, SCAN_COUNT) while next: next, tmpkeys = rdbconn.scan(next, KEYPATTERN, SCAN_COUNT) mainkeys += tmpkeys for mainkey in mainkeys: pipe.hgetall(mainkey) defined_acls = list() for detail in pipe.execute(): if detail: name = detail.get('name') action = detail.get('action') rulestrs = fieldjsonify(detail.get('rules')) rules = list( map( lambda rule: { 'action': rule[0], 'key': rule[1], 'value': rule[2] }, map(listify, rulestrs))) defined_acls.append({ 'name': name, 'action': action, 'rules': rules }) result = templates.TemplateResponse("acl.j2.xml", { "request": request, "sipprofiles": sipprofiles, "defined_acls": defined_acls }, media_type="application/xml") response.status_code = 200 except Exception as e: response.status_code, result = 500, str() logify( f"module=liberator, space=fsxmlapi, section=acl, requestid={get_request_uuid()}, exception={e}, traceback={traceback.format_exc()}" ) finally: return result
def acl(request: Request, response: Response): try: pipe = rdbconn.pipeline() # SIP PROFILE AND REALM profilenames = rdbconn.smembers('nameset:sipprofile') for profilename in profilenames: pipe.hget(f'sipprofile:{profilename}', 'realm') realms = pipe.execute() sipprofiles = dict() for profilename, realm in zip(profilenames, realms): sipprofiles.update({profilename: realm}) # ENGAGMENT ACL LIST # [{'name': name, 'action': default-action, 'rules': [{'action': allow/deny, 'key': domain/cidr, 'value': ip/domain-value}]}] for profilename in profilenames: pipe.hget(f'sipprofile:{profilename}', 'local_network_acl') engagedacls = [ acl for acl in pipe.execute() if acl not in _BUILTIN_ACLS_ ] for engagedacl in engagedacls: pipe.hgetall(f'base:acl:{engagedacl}') details = pipe.execute() acls = list() for detail in details: if detail: name = detail.get('name') action = detail.get('action') rules = fieldjsonify(detail.get('rules')) acls.append({'name': name, 'action': action, 'rules': rules}) result = fstpl.TemplateResponse("acl.j2.xml", { "request": request, "sipprofiles": sipprofiles, "acls": acls }, media_type="application/xml") response.status_code = 200 except Exception as e: response.status_code, result = 500, str() logify( f"module=liberator, space=cfgapi, section=acl, requestid={get_request_uuid()}, exception={e}, traceback={traceback.format_exc()}" ) finally: return result
def acl(request: Request, response: Response): try: pipe = rdbconn.pipeline() # IP LIST OF INBOUND INTERCONNECTION # {sipprofile: [list of ips]} KEYPATTERN = 'intcon:in:*' next, mainkeys = rdbconn.scan(0, KEYPATTERN, SCAN_COUNT) while next: next, tmpkeys = rdbconn.scan(next, KEYPATTERN, SCAN_COUNT) mainkeys += tmpkeys for mainkey in mainkeys: pipe.hmget(mainkey, 'sipprofile', 'sip_ips', 'auth_username') sipprofile_ips = dict() for details in pipe.execute(): if details: if not fieldjsonify(details[2]): sipprofile = details[0] sip_ips = fieldjsonify(details[1]) if sipprofile in sipprofile_ips: sipprofile_ips[sipprofile] += sip_ips else: sipprofile_ips[sipprofile] = sip_ips # DEFINED ACL LIST # [{'name': name, 'action': default-action, 'rules': [{'action': allow/deny, 'key': domain/cidr, 'value': ip/domain-value}]}] KEYPATTERN = 'base:acl:*' next, mainkeys = rdbconn.scan(0, KEYPATTERN, SCAN_COUNT) while next: next, tmpkeys = rdbconn.scan(next, KEYPATTERN, SCAN_COUNT) mainkeys += tmpkeys for mainkey in mainkeys: pipe.hgetall(mainkey) defined_acls = list() for detail in pipe.execute(): if detail: name = detail.get('name') action = detail.get('action') rulestrs = fieldjsonify(detail.get('rules')) rules = list( map( lambda rule: { 'action': rule[0], 'key': rule[1], 'value': rule[2] }, map(listify, rulestrs))) defined_acls.append({ 'name': name, 'action': action, 'rules': rules }) result = templates.TemplateResponse("acl.j2.xml", { "request": request, "sipprofile_ips": sipprofile_ips, "defined_acls": defined_acls }, media_type="application/xml") response.status_code = 200 except Exception as e: response.status_code, result = 500, str() logify( f"module=liberator, space=fsxmlapi, section=acl, requestid={get_request_uuid()}, exception={e}, traceback={traceback.format_exc()}" ) finally: return result
def sip(request: Request, response: Response): try: pipe = rdbconn.pipeline() # get netalias # {profilename1: profiledata1, profilename2: profiledata2} KEYPATTERN = 'base:netalias:*' next, mainkeys = rdbconn.scan(0, KEYPATTERN, SCAN_COUNT) while next: next, tmpkeys = rdbconn.scan(next, KEYPATTERN, SCAN_COUNT) mainkeys += tmpkeys for mainkey in mainkeys: pipe.hget(mainkey, 'addresses') details = pipe.execute() netaliases = dict() for mainkey, detail in zip(mainkeys, details): aliasname = getnameid(mainkey) addresses = list(map(listify, fieldjsonify(detail))) netaliases[aliasname] = { address[0]: { 'listen': address[1], 'advertise': address[2] } for address in addresses } # get the maping siprofile and data # {profilename1: profiledata1, profilename2: profiledata2} KEYPATTERN = 'sipprofile:*' next, mainkeys = rdbconn.scan(0, KEYPATTERN, SCAN_COUNT) while next: next, tmpkeys = rdbconn.scan(next, KEYPATTERN, SCAN_COUNT) mainkeys += tmpkeys for mainkey in mainkeys: pipe.hgetall(mainkey) details = pipe.execute() sipprofiles = dict() for mainkey, detail in zip(mainkeys, details): sipprofiles[getnameid(mainkey)] = jsonhash(detail) # get the mapping siprofile name and interconnection name # {profilename1: [intconname,...], profilename2: [intconname,...]} KEYPATTERN = 'intcon:out:*' next, mainkeys = rdbconn.scan(0, KEYPATTERN, SCAN_COUNT) while next: next, tmpkeys = rdbconn.scan(next, KEYPATTERN, SCAN_COUNT) mainkeys += tmpkeys for mainkey in mainkeys: if not mainkey.endswith('_gateways'): pipe.hget(mainkey, 'sipprofile') profilenames = pipe.execute() profile_intcons_maps = dict() for mainkey, profilename in zip(mainkeys, profilenames): intconname = getnameid(mainkey) if profilename not in profile_intcons_maps: profile_intcons_maps[profilename] = [intconname] else: if profilename not in profile_intcons_maps[profilename]: profile_intcons_maps[profilename].append(profilename) # get the mapping siprofile name and gateway name # {profilename1: [gateway,...], profilename2: [gateway,...]} profile_gwnames_maps = dict() for profile, intcons in profile_intcons_maps.items(): for intcon in intcons: pipe.hkeys(f'intcon:out:{intcon}:_gateways') profile_gwnames_maps[profile] = list( set([gw for gws in pipe.execute() for gw in gws])) # add gateway data to sip profile data profile_gateways_maps = dict() for profile, gwnames in profile_gwnames_maps.items(): for gwname in gwnames: pipe.hgetall(f'gateway:{gwname}') profile_gateways_maps[profile] = list(map(jsonhash, pipe.execute())) for sipprofile in sipprofiles: if 'gateways' in sipprofiles[sipprofile]: sipprofiles[sipprofile]['gateways'] = profile_gateways_maps[ sipprofile] # template result = templates.TemplateResponse("sip-setting.j2.xml", { "request": request, "sipprofiles": sipprofiles, 'netaliases': netaliases, 'NODEID': NODEID }, media_type="application/xml") response.status_code = 200 except Exception as e: response.status_code, result = 500, str() logify( f"module=liberator, space=fsxmlapi, section=sip-setting, requestid={get_request_uuid()}, exception={e}, traceback={traceback.format_exc()}" ) finally: return result
def sip(request: Request, response: Response): try: pipe = rdbconn.pipeline() fsgvars = list() # get netalias netaliasnames = rdbconn.smembers('nameset:netalias') for netaliasname in netaliasnames: pipe.hget(f'base:netalias:{netaliasname}', 'addresses') details = pipe.execute() netaliases = dict() for netaliasname, detail in zip(netaliasnames, details): addresses = [ address for address in fieldjsonify(detail) if address.get('member') == NODEID ][0] netaliases.update({netaliasname: addresses}) # get the maping siprofile and data # {profilename1: profiledata1, profilename2: profiledata2} profilenames = rdbconn.smembers('nameset:sipprofile') for profilename in profilenames: pipe.hgetall(f'sipprofile:{profilename}') details = pipe.execute() sipprofiles = dict() for profilename, detail in zip(profilenames, details): sipdetail = jsonhash(detail) sip_address = sipdetail.pop('sip_address') sip_ip = netaliases[sip_address]['listen'] ext_sip_ip = netaliases[sip_address]['advertise'] rtp_address = sipdetail.pop('rtp_address') rtp_ip = netaliases[rtp_address]['listen'] ext_rtp_ip = netaliases[rtp_address]['advertise'] sipdetail.update({ 'sip_ip': sip_ip, 'ext_sip_ip': ext_sip_ip, 'rtp_ip': rtp_ip, 'ext_rtp_ip': ext_rtp_ip }) sipprofiles.update({profilename: sipdetail}) # prepare vars fsgvars.append(f'{profilename}:advertising={ext_sip_ip}') # get the mapping siprofile name and interconnection name # {profilename1: [intconname,...], profilename2: [intconname,...]} map_profilename_intconnames = {} for profilenames in sipprofiles.keys(): intcon_names = rdbconn.smembers( f'engagement:sipprofile:{profilenames}') out_intcon_names = list( filter(lambda name: name.startswith('out:'), intcon_names)) map_profilename_intconnames[profilenames] = out_intcon_names # get the mapping siprofile name and gateway name # {profilename1: [gateway,...], profilename2: [gateway,...]} map_profilename_gwnames = dict() for profilename, intcons in map_profilename_intconnames.items(): for intcon in intcons: pipe.hkeys(f'intcon:{intcon}:_gateways') allgws = pipe.execute() map_profilename_gwnames[profilename] = list( set([gw for gws in allgws for gw in gws])) # add gateway data to sip profile data map_profilename_gateways = dict() for profilename, gwnames in map_profilename_gwnames.items(): for gwname in gwnames: pipe.hgetall(f'base:gateway:{gwname}') map_profilename_gateways[profilename] = list( filter(lambda gwdata: gwdata, map(jsonhash, pipe.execute()))) for sipprofile in sipprofiles: gateways = map_profilename_gateways.get(sipprofile) if gateways: sipprofiles[sipprofile]['gateways'] = gateways # set var profile address by separated thread rdbconn.publish( NODEID_CHANNEL, json.dumps({ 'portion': 'cfgapi:sip', 'delay': 30, 'fsgvars': fsgvars, 'requestid': get_request_uuid() })) # template result = fstpl.TemplateResponse("sip-setting.j2.xml", { "request": request, "sipprofiles": sipprofiles, 'netaliases': netaliases, 'NODEID': NODEID }, media_type="application/xml") response.status_code = 200 except Exception as e: response.status_code, result = 500, str() logify( f"module=liberator, space=cfgapi, section=sip-setting, requestid={get_request_uuid()}, exception={e}, traceback={traceback.format_exc()}" ) finally: return result
def directory(request: Request, response: Response): try: pipe = rdbconn.pipeline() # IP LIST OF SIP PROFILE AND REALM # {profilename: realm} KEYPATTERN = 'sipprofile:*' next, mainkeys = rdbconn.scan(0, KEYPATTERN, SCAN_COUNT) while next: next, tmpkeys = rdbconn.scan(next, KEYPATTERN, SCAN_COUNT) mainkeys += tmpkeys for mainkey in mainkeys: pipe.hget(mainkey, 'realm' ) realms = pipe.execute() sipprofiles = dict() for mainkey, realm in zip(mainkeys, realms): sipprofiles[getnameid(mainkey)] = realm # IP LIST OF INBOUND INTERCONNECTION # {profilename: {profilename, sipaddrs, secret}} KEYPATTERN = 'intcon:in:*' next, mainkeys = rdbconn.scan(0, KEYPATTERN, SCAN_COUNT) while next: next, tmpkeys = rdbconn.scan(next, KEYPATTERN, SCAN_COUNT) mainkeys += tmpkeys for mainkey in mainkeys: pipe.hmget(mainkey, 'sipprofile', 'sipaddrs', 'secret', 'authscheme', 'routing') directories = dict() for mainkey, details in zip(mainkeys, pipe.execute()): intconname = getnameid(mainkey) profilename = details[0] sipaddrs = fieldjsonify(details[1]) secret = details[2] authscheme = details[3] routing = details[4] if authscheme=='IP': password = DEFAULT_PASSWORD cidrs = sipaddrs elif authscheme=='DIGEST': password = secret cidrs = list() else: password = secret cidrs = sipaddrs for _profilename, _realm in sipprofiles.items(): if _profilename == profilename: a1hash = hashlib.md5(f'{intconname}:{_realm}:{password}'.encode()).hexdigest() if _realm in directories: directories[_realm].append({'id': intconname, 'cidrs': cidrs, 'a1hash': a1hash, 'routing': routing}) else: directories[_realm] = [{'id': intconname, 'cidrs': cidrs, 'a1hash': a1hash, 'routing': routing}] result = templates.TemplateResponse("directory.j2.xml", {"request": request, "directories": directories}, media_type="application/xml") response.status_code = 200 except Exception as e: response.status_code, result = 500, str() logify(f"module=liberator, space=fsxmlapi, section=directory, requestid={get_request_uuid()}, exception={e}, traceback={traceback.format_exc()}") finally: return result
def nftupdate(data): result = True try: requestid = data.get('requestid') pipe = rdbconn.pipeline() # FIREWARESET whiteset = rdbconn.smembers(f'firewall:whiteset') blackset = rdbconn.smembers(f'firewall:blackset') # RTP PORTRANGE rtpportrange = list(map(fieldjsonify ,rdbconn.hmget('cluster:attributes', 'rtp_start_port', 'rtp_end_port'))) # NETALIAS netaliasnames = rdbconn.smembers('nameset:netalias') for netaliasname in netaliasnames: pipe.hget(f'base:netalias:{netaliasname}', 'addresses') details = pipe.execute() netaliases = dict() for netaliasname, detail in zip(netaliasnames, details): addresses = [address for address in fieldjsonify(detail) if address.get('member') == NODEID][0] netaliases.update({netaliasname: addresses}) # SIP PROFILES AND LISTEN ADDRESS/PORT profilenames = rdbconn.smembers('nameset:sipprofile') sipprofiles = dict() for profilename in profilenames: sip_port, sips_port, sip_address, rtp_address = rdbconn.hmget(f'sipprofile:{profilename}', 'sip_port', 'sips_port', 'sip_address', 'rtp_address') sip_ip = netaliases[sip_address]['listen'] rtp_ip = netaliases[rtp_address]['listen'] intconnameids = [item for item in rdbconn.smembers(f'engagement:sipprofile:{profilename}')] for intconnameid in intconnameids: pipe.hget(f'intcon:{intconnameid}', 'rtpaddrs') rtpaddrstrlist = pipe.execute() farendrtpaddrs = set([rtpaddr for rtpaddrstr in rtpaddrstrlist for rtpaddr in fieldjsonify(rtpaddrstr)]) farendsipaddrs = rdbconn.smembers(f'farendsipaddrs:in:{profilename}') sipprofiles[profilename] = {'siptcpports': set([fieldjsonify(port) for port in [sip_port, sips_port] if port]), 'sipudpports': fieldjsonify(sip_port), 'sip_ip': sip_ip, 'rtp_ip': rtp_ip, 'farendrtpaddrs': [ip for ip in farendrtpaddrs if not IPv4Network(ip).is_loopback], 'farendsipaddrs': [ip for ip in farendsipaddrs if not IPv4Network(ip).is_loopback]} # ACCESS LAYERS layernames = rdbconn.smembers('nameset:access:service') accesslayers = dict() for layername in layernames: transports, sip_port, sips_port, sip_address, whiteips, blackips = rdbconn.hmget(f'access:service:{layername}', 'transports', 'sip_port', 'sips_port', 'sip_address', 'whiteips', 'blackips') whiteips = fieldjsonify(whiteips) blackips = fieldjsonify(blackips) if not whiteips: whiteips = {'0.0.0.0/0'} transports = fieldjsonify(transports) sipudpports = None if 'udp' in transports: sipudpports = fieldjsonify(sip_port) siptcpports = [] if 'tcp' in transports: siptcpports.append(fieldjsonify(sip_port)) if 'tls' in transports: siptcpports.append(fieldjsonify(sips_port)) accesslayers[layername] = {'whiteips': whiteips, 'blackips': blackips, 'sip_ip': netaliases[sip_address]['listen'], 'sipudpports': sipudpports, 'siptcpports': set(siptcpports)} # RULE FILE template = _NFT.get_template("nftables.j2.conf") stream = template.render(whiteset=whiteset, blackset=blackset, rtpportrange=rtpportrange, sipprofiles=sipprofiles, accesslayers=accesslayers, dftbantime=_DFTBANTIME) nftfile = '/etc/nftables.conf.new' with open(nftfile, 'w') as nftf: nftf.write(stream) nftcmd = Popen(['/usr/sbin/nft', '-f', nftfile], stdout=PIPE, stderr=PIPE) _, stderr = bdecode(nftcmd.communicate()) if stderr: result = False stderr = stderr.replace('\n', '') logify(f"module=liberator, space=basemgr, action=nftupdate, requestid={requestid}, nftfile={nftfile}, error={stderr}") else: old = osrename('/etc/nftables.conf', '/etc/nftables.conf.old') new = osrename('/etc/nftables.conf.new', '/etc/nftables.conf') if not (old and new): logify(f"module=liberator, space=basemgr, action=osrename, requestid={requestid}, subtasks=rename:{old}:{new}") else: logify(f"module=liberator, space=basemgr, action=nftupdate, requestid={requestid}, result=success") except Exception as e: result = False logify(f"module=liberator, space=basemgr, action=nftupdate, data={data}, exception={e}, traceback={traceback.format_exc()}") finally: return result
def kaminstance(data): result = True try: PIDDIR = f'/run/kamailio' CFGDIR = f'/usr/local/etc/kamailio' requestid = data.get('requestid') layer = data.get('layer') _layer = data.get('_layer') # ------------------------------------------------------------ # TEARDOWN THE EXISTENT INSTANCE # ------------------------------------------------------------ if _layer: pidkill = '/bin/pkill' _pidfile = f'{PIDDIR}/{_layer}.pid' _cfgfile = f'{CFGDIR}/{_layer}.cfg' _luafile = f'{CFGDIR}/{_layer}.lua' kamend = Popen([pidkill, '-F', _pidfile], stdout=PIPE, stderr=PIPE) _, stderr = bdecode(kamend.communicate()) if stderr: stderr = stderr.replace('\n', '') logify(f"module=liberator, space=basemgr, action=kaminstance.kamend, requestid={requestid}, error={stderr}") else: logify(f"module=liberator, space=basemgr, action=kaminstance.kamend, requestid={requestid}, result=success") cfgdel = osdelete(_cfgfile) luadel = osdelete(_luafile) logify(f"module=liberator, space=basemgr, action=kaminstance.filedel, requestid={requestid}, cfgdel={cfgdel}, luadel={luadel}") # ------------------------------------------------------------ # LAUNCH THE NEW INSTANCE # ------------------------------------------------------------ if layer: pipe = rdbconn.pipeline() kambin = '/usr/local/sbin/kamailio' pidfile = f'{PIDDIR}/{layer}.pid' cfgfile = f'{CFGDIR}/{layer}.cfg' luafile = f'{CFGDIR}/{layer}.lua' kamcfgs = jsonhash(rdbconn.hgetall(f'access:service:{layer}')) netaliases = fieldjsonify(rdbconn.hget(f'base:netalias:{kamcfgs.get("sip_address")}', 'addresses')) addresses = [address for address in netaliases if address.get('member') == NODEID][0] kamcfgs.update({'listen': addresses.get('listen'), 'advertise': addresses.get('advertise')}) if 'topology_hiding' in kamcfgs: kamcfgs.update({'randomsecret': randomstr()}) domains = kamcfgs.get('domains') for domain in domains: pipe.hgetall(f'access:policy:{domain}') sockets = pipe.execute() policies = dict() swipaddrs = set() for domain, socket in zip(domains, sockets): srcsocket = listify(socket.get('srcsocket')) dstsocket = listify(socket.get('dstsocket')) policies[domain] = {'srcsocket': {'transport': srcsocket[0], 'ip': srcsocket[1], 'port': srcsocket[2]}, 'dstsocket': {'transport': dstsocket[0], 'ip': dstsocket[1], 'port': dstsocket[2]}} swipaddrs.add(dstsocket[1]) kamcfgs.update({'policies': policies}) # default domain if len(domains)==1: dftdomain = domains[0] else: dftdomain = 'default.domain' # configuration cfgtemplate = _KAM.get_template("layer.j2.cfg") cfgstream = cfgtemplate.render(_KAMCONST=_KAMCONST, kamcfgs=kamcfgs, layer=layer, piddir=PIDDIR, cfgdir=CFGDIR, nodeid=NODEID) with open(cfgfile, 'w') as kmf: kmf.write(cfgstream) # localization luatemplate = _KAM.get_template("layer.j2.lua") luastream = luatemplate.render(_KAMCONST=_KAMCONST, kamcfgs=kamcfgs, layer=layer, swipaddrs=swipaddrs, jsonpolicies=json.dumps(policies), dftdomain=dftdomain) with open(luafile, 'w') as lf: lf.write(luastream) kamrun = Popen([kambin, '-S', '-M', '16', '-P', pidfile, '-f', cfgfile], stdout=PIPE, stderr=PIPE) _, stderr = bdecode(kamrun.communicate()) if stderr: result = False stderr = stderr.replace('\n', '') logify(f"module=liberator, space=basemgr, action=kaminstance.kamrun, requestid={requestid}, cfgfile={cfgfile}, error={stderr}") else: logify(f"module=liberator, space=basemgr, action=kaminstance.kamrun, requestid={requestid}, result=success") except Exception as e: result = False logify(f"module=liberator, space=basemgr, action=kaminstance, data={data}, exception={e}, traceback={traceback.format_exc()}") finally: return result
def nftupdate(): result = True try: pipe = rdbconn.pipeline() # RTP PORTRANGE rtpportrange = list( map( fieldjsonify, rdbconn.hmget('cluster:attributes', 'rtp_start_port', 'rtp_end_port'))) # NETALIAS netaliasnames = rdbconn.smembers('nameset:netalias') for netaliasname in netaliasnames: pipe.hget(f'base:netalias:{netaliasname}', 'addresses') details = pipe.execute() netaliases = dict() for netaliasname, detail in zip(netaliasnames, details): addresses = [ address for address in fieldjsonify(detail) if address.get('member') == NODEID ][0] netaliases.update({netaliasname: addresses}) # SIP PROFILES AND LISTEN ADDRESS/PORT profilenames = rdbconn.smembers('nameset:sipprofile') sipprofiles = dict() for profilename in profilenames: sip_port, sips_port, sip_address, rtp_address = rdbconn.hmget( f'sipprofile:{profilename}', 'sip_port', 'sips_port', 'sip_address', 'rtp_address') sip_ip = netaliases[sip_address]['listen'] rtp_ip = netaliases[rtp_address]['listen'] intconnameids = [ item for item in rdbconn.smembers( f'engagement:sipprofile:{profilename}') ] for intconnameid in intconnameids: pipe.hget(f'intcon:{intconnameid}', 'rtpaddrs') rtpaddrstrlist = pipe.execute() farendrtpaddrs = set([ rtpaddr for rtpaddrstr in rtpaddrstrlist for rtpaddr in fieldjsonify(rtpaddrstr) ]) farendsipaddrs = rdbconn.smembers( f'farendsipaddrs:in:{profilename}') sipprofiles[profilename] = { 'siptcpports': set([ fieldjsonify(port) for port in [sip_port, sips_port] if port ]), 'sipudpports': set([fieldjsonify(sip_port)]), 'sip_ip': sip_ip, 'rtp_ip': rtp_ip, 'farendrtpaddrs': stringify(farendrtpaddrs, ','), 'farendsipaddrs': stringify(farendsipaddrs, ',') } template = _ENV.get_template("nftables.j2.conf") stream = template.render(sipprofiles=sipprofiles, rtpportrange=rtpportrange) nftfile = '/etc/nftables.conf.new' with open(nftfile, 'w') as nftf: nftf.write(stream) nftcmd = Popen(['/usr/sbin/nft', '-f', nftfile], stdout=PIPE, stderr=PIPE) _, stderr = bdecode(nftcmd.communicate()) if stderr: result = False stderr = stderr.replace('\n', '') logify( f"module=liberator, space=basemgr, action=nftupdate, nftfile={nftfile}, error={stderr}" ) else: old = osrename('/etc/nftables.conf', '/etc/nftables.conf.old') new = osrename('/etc/nftables.conf.new', '/etc/nftables.conf') if not (old and new): logify( f"module=liberator, space=basemgr, action=osrename, subtasks=rename:{old}:{new}" ) else: logify( f"module=liberator, space=basemgr, action=nftupdate, result=success" ) except Exception as e: result = False logify( f"module=liberator, space=basemgr, action=nftupdate, exception={e}, traceback={traceback.format_exc()}" ) finally: return result