def test_out_of_bounds_negative_index_should_raise(self): i = IPRange(IP('10.0.42.0'), IP('10.0.42.127')) self.assertRaises(IndexError, lambda x: i[x], -129)
def __init__(self, ip_addr, net_type): super(IpNode, self).__init__() self._ip = IP(ip_addr) self.net_type = net_type
def packet_in_handler(self,ev): msg = ev.msg datapath = msg.datapath ofproto = datapath.ofproto ofp_parser = datapath.ofproto_parser in_port = msg.match['in_port'] dpid = datapath.id pkt = packet.Packet(msg.data) eth_pkt = pkt.get_protocol(ethernet.ethernet) src = eth_pkt.src dst = eth_pkt.dst out_port = datapath.ofproto.OFPP_FLOOD if eth_pkt.ethertype == ether_types.ETH_TYPE_ARP: arp_pkt = pkt.get_protocol(arp) src_ip = arp_pkt.src_ip dst_ip = arp_pkt.dst_ip self.arp_table[src_ip] = src if src_ip not in self.network and src_ip!='66.66.66.66': self.network.add_node(src_ip) # switch和主机之间的链路及switch转发端口 self.network.add_edge(dpid, src_ip, attr_dict={'port':in_port}) self.network.add_edge(src_ip, dpid) self.paths.setdefault(src_ip, {}) for arr in GATEWAY_IP.values(): if dst_ip in arr: self.reply_arp(datapath,eth_pkt,arp_pkt,src_ip,in_port) return # if arp_pkt.opcode == 2: # self.arp_table[src_ip] = src # # print(dpid) # self.network.add_node(src_ip) # # switch和主机之间的链路及switch转发端口 # self.network.add_edge(dpid, src_ip, attr_dict={'port':in_port}) # self.network.add_edge(src_ip, dpid) # self.paths.setdefault(src_ip, {}) # return if eth_pkt.ethertype == ether_types.ETH_TYPE_IP: # self.handle_ip(msg,datapath,pkt,eth,in_port) ipv4_pkt = pkt.get_protocol(ipv4.ipv4) dst_ip = ipv4_pkt.dst src_ip = ipv4_pkt.src for ele in GATEWAY_IP[dpid]: if IP(dst_ip).make_net('255.255.255.0') == IP(ele).make_net('255.255.255.0'): if dst_ip not in self.arp_table: src = "00:00:00:00:00:66" src_ip = "66.66.66.66" dst = "ff:ff:ff:ff:ff:ff" out_port = ofproto.OFPP_FLOOD self.send_arp(datapath, 1, src, src_ip, dst, dst_ip, out_port) return else: dst = self.arp_table[dst_ip] out_port = self.get_out_port(datapath,src_ip,dst_ip,in_port) print(out_port) actions = [ofp_parser.OFPActionSetField(eth_dst=dst)] actions.append(ofp_parser.OFPActionSetField(eth_src=self.switches[dpid][out_port])) actions.append(ofp_parser.OFPActionOutput(out_port)) out = ofp_parser.OFPPacketOut( datapath=datapath,buffer_id=msg.buffer_id,in_port=in_port, actions=actions,data=msg.data) datapath.send_msg(out) match = ofp_parser.OFPMatch(in_port=in_port, eth_type=ether_types.ETH_TYPE_IP, eth_dst=dst, eth_src=src) self.add_flow(datapath, 1, match, actions) return else: out_port = self.get_out_port(datapath,src_ip,dst_ip,in_port) # out_port = self.get_out_port(datapath,src,dst,in_port) actions = [ofp_parser.OFPActionOutput(out_port)] # if out_port != ofproto.OFPP_FLOOD: # match = ofp_parser.OFPMatch(in_port=in_port,eth_dst=dst) # self.add_flow(datapath,1,match,actions) out = ofp_parser.OFPPacketOut( datapath=datapath,buffer_id=msg.buffer_id,in_port=in_port, actions=actions,data=msg.data ) datapath.send_msg(out)
def parse_network_CF(resources_data): """Parses the CloudFormation resources data for network information. Various checks are conducted on the Cloudformation template within the scope of network related issues. The various Type keys are used from the JSON which is passed and they are iterated over, various criteria is checked if they are present. Any issue will cause the pipeline to fail and a dictionary, data_response, containing the checks and other log information will be returned. The pipeline will receive a successful job if the end of the function is reached without any issues, data_response is also returned in this case for logging purposes. Args: <job_id on live Lambda>: Job_id from CodePipeline. resources_data: The relevant data contained from the 'Resources' key from the JSON CloudFormation template. Returns: data_response: A dictionary which contains the data property that is checked, the key, and a log about the particular property, this is the value. If there are any errors the job will fail and a value of "ERROR: [message here]" will be tied to the corresponding key. """ data_response = {} error_counter = 0 error_keys = [] for key in resources_data.keys(): if 'AWS::EC2::VPC' == resources_data[key]['Type']: data_response['{}-VPC'.format(key)] = "Present" try: vpc_properties = resources_data[key]['Properties'] data_response['{}-VPC Properties'.format( key)] = "Properties exist" if 'CidrBlock' in vpc_properties: if IP(vpc_properties['CidrBlock']).iptype() != "PRIVATE": error_counter += 1 data_response['{}-VPC-CidrBlock'.format(key)] = { "Error": "IP Addresses should only be RFC1918 compliant.", "Address Type": IP(vpc_properties['Cidr_block']).iptype() } error_keys.append(key) else: data_response['{}-VPC-CidrBlock'.format(key)] = \ {vpc_properties['CidrBlock']: IP(vpc_properties['CidrBlock']).iptype()} except KeyError: data_response['{}-VPC'.format(key)] = { "Error": "VPC Property was not present." } error_counter += 1 error_keys.append(key) if 'AWS::EC2::Route' == resources_data[key]['Type']: if 'RouteTableId' in resources_data[key]['Properties'] and \ 'VpcPeeringConnectionId' in resources_data[key]['Properties']: data_response['{}-RouteTableId-VpcPeeringConnection'.format( key)] = "Valid Route" elif 'RouteTableId' in resources_data[key]['Properties'] and \ 'VpcId' in resources_data[key]['Properties']: data_response['{}-RouteTableId-VpcId'.format( key)] = "Valid Route" elif 'RouteTableId' in resources_data[key]['Properties']: data_response['{}-RouteTableId-VpcId'.format( key)] = "Valid Route" else: data_response['{}-RouteTableId'.format(key)] = \ {"Error": "Either property was not present in the EC2::Route."} error_counter += 1 error_keys.append(key) if 'EC2::InternetGateway' in resources_data[key]['Type']: try: # InternetGateway must have a Properties key, even if it is blank with an empty dict {}. should_exist = resources_data[key]['Properties'] data_response['{}-InternetGateway'.format( key)] = "Properties exist" except KeyError: data_response['{}-InternetGateway'.format(key)] = { "Error": "VpnGatewayId or InternetGatewayId not present." } error_counter += 1 error_keys.append(key) if 'EC2::VPCGatewayAttachment' in resources_data[key]['Type']: if 'VpcId' in resources_data[key]['Properties']: data_response['VPCGWAttach-VpcId'] = "VpcId referenced" if 'VpnGatewayId' in resources_data[key]['Properties'] or \ 'InternetGatewayId' in resources_data[key]['Properties']: data_response[ 'VPCGWAttach-VPN/IGW'] = "VpnGatewayId or InternetGatewayId present" else: data_response['{}-VPCGWAttach'.format(key)] = { "Error": "VpnGatewayId or InternetGatewayId not present." } error_keys.append(key) error_counter += 1 if 'AWS::EC2::VPNGateway' == resources_data[key]['Type']: if 'Type' in resources_data[key]['Properties']: if resources_data[key]['Properties']['Type'] != "ipsec.1": data_response['{}-VPNGateway'.format(key)] = { "Error": "The only allowed type is 'ipsec.1'." } error_keys.append(key) error_counter += 1 else: data_response['{}-VPNGateway'.format( key)] = "Valid VPN type present." if 'AWS::EC2::VPNGatewayRoutePropagation' == resources_data[key][ 'Type']: if 'RouteTableIds' in resources_data[key]['Properties']: if isinstance( resources_data[key]['Properties']['RouteTableIds'], list): data_response['{}-VPNGatewayRoutePropagation'.format( key)] = "Value is list of strings." else: data_response['{}-VPNGatewayRoutePropagation'.format( key )] = { "Error": "Value must be list of string corresponding to RouteTableIds." } error_counter += 1 error_keys.append(key) if 'VpnGatewayId' in resources_data[key]['Properties']: data_response['{}-VPNGatewayRoutePropagation'.format( key)] = "VpnGatewayId present." else: data_response['{}-VPNGatewayRoutePropagation'.format(key)] = { "Error": "VpnGatewayId is required." } error_counter += 1 error_keys.append(key) if 'AWS::EC2::VPNConnection' == resources_data[key]['Type']: if 'CustomerGatewayId' in resources_data[key]['Properties']: data_response['{}-VPNConnection'.format( key)] = "CustomerGatewayId present." if 'TransitGatewayId' in resources_data[key]['Properties'] and \ 'VpnGatewayId' in resources_data[key]['Properties']: data_response['{}-Transit_AND_VPNGW'.format(key)] = { "Error": "Both TransitGatewayId AND VpnGatewayId cannot be used within the VPNConnection Type." } error_keys.append(key) error_counter += 1 if 'TransitGatewayId' in resources_data[key]['Properties'] or \ 'VpnGatewayId' in resources_data[key]['Properties']: data_response['{}-Transit_OR_VPNGW'.format( key )] = "Either TransitGatewayId or VpnGatewayId present, but not both." if error_counter == 0: data_response['Errors'] = { "Count": "{}".format(error_counter), "Keys with errors": error_keys } pipeline_job_success(job) else: data_response['Errors'] = { "Count": "{}".format(error_counter), "Keys with errors": error_keys } pipeline_job_fail(job) print("End of function") print(data_response) return data_response
def cook_response(self, d, qname, fake_records): fake_record = fake_records[qtype] # Create a custom response to the query response = DNSRecord(DNSHeader(id=d.header.id, bitmap=d.header.bitmap, qr=1, aa=1, ra=1), q=d.q) self.log("cooking the response of type '%s' for %s to %s" % (qtype, qname, fake_record)) # IPv6 needs additional work before inclusion: if qtype == "AAAA": ipv6 = IP(fake_record) ipv6_bin = ipv6.strBin() ipv6_hex_tuple = [ int(ipv6_bin[i:i + 8], 2) for i in xrange(0, len(ipv6_bin), 8) ] response.add_answer( RR(qname, getattr(QTYPE, qtype), rdata=RDMAP[qtype](ipv6_hex_tuple))) elif qtype == "SOA": mname, rname, t1, t2, t3, t4, t5 = fake_record.split(" ") times = tuple([int(t) for t in [t1, t2, t3, t4, t5]]) mname, rname = rmdots(mname), rmdots(rname) response.add_answer( RR(qname, getattr(QTYPE, qtype), rdata=RDMAP[qtype](mname, rname, times))) elif qtype == "NAPTR": order, preference, flags, service, regexp, replacement = fake_record.split( " ") order = int(order) preference = int(preference) replacement = rmdots(replacement) response.add_answer( RR(qname, getattr(QTYPE, qtype), rdata=RDMAP[qtype](order, preference, flags, service, regexp, DNSLabel(replacement)))) elif qtype == "SRV": priority, weight, port, target = fake_record.split(" ") priority = int(priority) weight = int(weight) port = int(port) target = rmdots(target) response.add_answer( RR(qname, getattr(QTYPE, qtype), rdata=RDMAP[qtype](priority, weight, port, target))) elif qtype == "DNSKEY": flags, protocol, algorithm, key = fake_record.split(" ") flags = int(flags) protocol = int(protocol) algorithm = int(algorithm) key = b64decode(("".join(key)).encode('ascii')) response.add_answer( RR(qname, getattr(QTYPE, qtype), rdata=RDMAP[qtype](flags, protocol, algorithm, key))) elif qtype == "RRSIG": covered, algorithm, labels, orig_ttl, sig_exp, sig_inc, key_tag, name, sig = fake_record.split( " ") covered = getattr(QTYPE, covered) # NOTE: Covered QTYPE algorithm = int(algorithm) labels = int(labels) orig_ttl = int(orig_ttl) sig_exp = int(mktime(strptime(sig_exp + 'GMT', "%Y%m%d%H%M%S%Z"))) sig_inc = int(mktime(strptime(sig_inc + 'GMT', "%Y%m%d%H%M%S%Z"))) key_tag = int(key_tag) name = rmdots(name) sig = b64decode(("".join(sig)).encode('ascii')) response.add_answer( RR(qname, getattr(QTYPE, qtype), rdata=RDMAP[qtype](covered, algorithm, labels, orig_ttl, sig_exp, sig_inc, key_tag, name, sig))) else: fake_record = rmdots(fake_record) response.add_answer( RR(qname, getattr(QTYPE, qtype), rdata=RDMAP[qtype](fake_record))) response = response.pack() return response
def set_ipv4_address(self, address): self.__ipv4_address = IP(address) start = int(self.__ipv4_address.len() / 2) end = self.__ipv4_address.len() - 2 self.__ipv4_start = str(self.__ipv4_address[start]) self.__ipv4_end = str(self.__ipv4_address[end])
def ver_address(self, version=4): try: ip_obj = IP(self.address) return True if ip_obj.version() == version else False except ValueError: return False
def getAddress(self): ip, port = AbstractChecker.getAddress(self) addr = IP(ip) if addr.version() == 6: ip = '[%s]' % ip return ip, port
def get_ip(self): #获取待扫描地址段 iplist = IP(self.target) for ip in list(iplist): self.ips.append(str(ip))
def IPauto(ipl): ipr = IP(ipl) for ip in ipr: ipList.append(str(ip))
def check_IP(self): try: IP(self.target) return self.target except ValueError: return socket.gethostbyname(self.target)
def test_assembled_ipv4_range_should_parse(self): i = IPRange.from_string('10.0.42.0-127') self.assertEquals(i[0], IP('10.0.42.0')) self.assertEquals(i[-1], IP('10.0.42.127'))
def test_simple_ipv6_range_should_parse(self): i = IPRange.from_string('fe80:700:1::-fe80:700:1::f') self.assertEquals(i[0], IP('fe80:700:1::')) self.assertEquals(i[-1], IP('fe80:700:1::f'))
def test_simple_ipv4_range_should_parse(self): i = IPRange.from_string('10.0.42.0-10.0.42.63') self.assertEquals(i[0], IP('10.0.42.0')) self.assertEquals(i[-1], IP('10.0.42.63'))
fake_record = fake_records[qtype] response = DNSRecord(DNSHeader(id=d.header.id, bitmap=d.header.bitmap, qr=1, aa=1, ra=1), q=d.q) print "[%s] %s: cooking the response of type '%s' for %s to %s" % ( time.strftime("%H:%M:%S"), self.client_address[0], qtype, qname, fake_record) # IPv6 needs additional work before inclusion: if qtype == "AAAA": ipv6 = IP(fake_record) ipv6_bin = ipv6.strBin() ipv6_hex_tuple = [ int(ipv6_bin[i:i + 8], 2) for i in xrange(0, len(ipv6_bin), 8) ] response.add_answer( RR(qname, QTYPE[qtype], rdata=RDMAP[qtype](ipv6_hex_tuple))) elif qtype == "SOA": mname, rname, t1, t2, t3, t4, t5 = fake_record.split( " ") times = tuple([int(t) for t in [t1, t2, t3, t4, t5]])
print "\===============Auto Upload Shell==================" print "\n[1] Check the Vulnerability" #IP=raw_input("enter your target ip like 1.1.1.0/24:") #inp = raw_input("enter your choice:") #if inp == '1': # options() # if options() is None: # put() # move() #IP=raw_input("enter your txt position:") #f=open(IP,'r') #lines=f.readlines() #print "input info" + IP lines = IP("61.139.94.0/24") #b = 'nmap'+' '+'--open '+''+'-p 80 ' + '' + IP #print b +'\r\n'+'Now go for find the dork server' #a = str(os.popen(b).readlines()) #r1 = re.compile("\d*\.\d*\.\d*\.\d*") #ip = r1.findall(a) ips = [] #if ip==[]: # sys.exit() for x in lines: #x=x.strip() ips.append(x) """ for y in ips:
def isIP(str): try: IP(str) except ValueError: return False return True
def from_db_value(self, value, expression, connection, context): if value is None: return value return IP(value)
#_*_ coding:utf-8_*_ from IPy import IP import xlrd,xlwt from xlutils.copy import copy # 向sheet页中写入数据,copy模版样式 oldWb = xlrd.open_workbook('input.xls',formatting_info=True) wb = copy(oldWb) oldsheet = oldWb.sheets()[0] sheet1 = wb.get_sheet(0) nrows = oldsheet.nrows for i in range(1, nrows): ip_net = oldsheet.cell_value(i, 0) try: ip = IP(ip_net) except: continue ips = '' for x in ip: ips = ips+str(x)+'\n' sheet1.write(i, 1, ips.strip()) wb.save('output.xls') print("It is done!") '''ip = IP('36.111.89.0/24') f = open('1','w') for x in ip: #print(x) f.write(str(x)+'\n') print('over')'''
def Ip_address(): try: print('\n') print('===' * 10) iprange = input( "Enter the IP range(either format: x.x.x.x/24 or x.x.x.x/255.255.255.0): " ) print('===' * 10) user_input = input( "Enter '1' to display the file or '2' to print to csv file: ") while user_input != ' ': if user_input == '1': try: print('\n') if '/' in iprange: n = ipaddress.ip_network(iprange.strip()) first, last = n[0], n[-1] print('===' * 5 + ' ' + "Network Address: " + str(first) + ' ' + '===' * 5) print("Available adresses:") for x in n.hosts(): print(x) print('===' * 5 + ' ' + "Broadcast Address: " + str(last) + ' ' + '===' * 5) print('===' * 5 + ' ' + 'Mask: ' + str(IP(iprange).netmask()) + ' ' + '===' * 5) menu1() else: print('===' * 10) print("\nIvalid Ip and Mask!. Try again...") print('===' * 10) Ip_address() except ValueError: print('===' * 10) print("\nIvalid Ip and Mask!. Try again...") print('===' * 10) Ip_address() elif user_input == '2': try: if '/' in iprange: print('===' * 10) path = input( 'Provide the path to the "IPranges" folder in the main folder(copy/paste from the folder) or any folder you want:\n' ) path = path.split(',') dst = os.path.join(*path) outputfile = str(date.today()) file1 = ('IP_range_' + outputfile + ".csv") f = open(file1, 'w') writer = csv.DictWriter(f, fieldnames=[ " IP ADDRESSES ", " NETWORK", " BROADCAST", " MASK" ], lineterminator='\n', delimiter=',') writer.writeheader() network = IP(iprange) n = ipaddress.IPv4Network(iprange) first, last = n[0], n[-1] mask = str(IP(iprange).netmask()) f.write('{0},{1},{2},{3}\n'.format( 'Usable Addresses', first, last, mask)) f.flush() for ip in list(network)[1:-1]: f.write('{0}\n'.format(ip)) f.flush() f.close() print(' ' * 30) print('===' * 30) print('***' * 8 + ' Completed! The file: ' + file1 + ' has been created!' + '***' * 8) print('===' * 30) print(' ' * 30) for filecsv in os.listdir(): #if filecsv.endswith(".csv"): if 'IP_range_' in filecsv: shutil.move(filecsv, os.path.join(dst, filecsv)) menu1() else: print('===' * 10) print("\nIvalid Ip and Mask!. Try again...") print('===' * 10) Ip_address() except ValueError: print("\n\nIvalid Ip and Mask!. Try again...\n") Ip_address() else: print('===' * 10) print("Ivalid Entry!. Try again...") print('===' * 10) user_input = input( "Enter '1' to display the file or '2' to print to csv file: " ) except KeyboardInterrupt: print("\n\nProgram aborted by user. Exiting...\n") sys.exit()
def taskfunc(scannode, printed): ''' ### task线程函数,定时从数据库读取未完成任务,设置好参数后调用插件执行任务 - 参数说明 - scannode:调用本线程函数的scanNode类实例 - printed:上次打印的信息 - 这个线程函数一放在类里面就报错,exec语句无法执行,所以放外面了,拿scannode做参数传过来 ''' tag = 'task线程->' dbo = dboperator(scannode.dbpath) # 先删除标记成删除的任务 task = dbo.getOneTaskForExecute() r = '目前无可执行任务。' # 如果无任务,打印信息,定时下一次执行 if task == None: if printed != r: logging.info(tag + r) printed = r timer = threading.Timer( scannode.do_task_inteval, taskfunc, (scannode, printed)) timer.start() return # 如果有任务 nodeTaskId, taskName, plugin, ipranges_str, iptotal = task # 获取task各字段(task是个元组) # 把后缀名.py去掉 plugin = plugin[0:len(plugin) - 3] r = '未找到插件:' try: exec("from plugin import " + plugin + " as scanning_plugin") # 如果未找到插件(执行exec出错),则打印信息,定时下一次执行 except: if printed != r: logging.info(tag + r + plugin) printed = r timer = threading.Timer( scannode.do_task_inteval, taskfunc, (scannode, printed)) timer.start() return r = "开始任务%s" % (nodeTaskId) logging.info(tag + r) printed = r ipranges = eval(ipranges_str) # 获取ip集 lastip = dbo.getLastIpById(nodeTaskId) # 加载执行进度 # 计算需要扫描的ip集,已经扫描的个数 ipRangesForScan, ipOkCount = caculateScaningIpRange( ipranges, lastip) stepcounter = 0 # 每满step个存储一次进度 # 构造multiThread的一个参数,详见multiThread类的说明 dp=multiThread(scannode.thread_count,scanning_plugin.scan,recordResult) f = open('result/' + taskName + '_' + nodeTaskId + '.txt', 'a') for iprange in ipRangesForScan: start, end = iprange for i in range(start, end + 1): # 计数,到step个时,存储扫描到哪里,扫描了几个 stepcounter = stepcounter + 1 ipOkCount = ipOkCount + 1 p=str(IP(i)) dp.dispatch((p,),(f,)) #个数达到指定个数后保存执行进度 if stepcounter == scannode.step_recoard_progress: r=dp.getAllThreadProcess()#获取每个线程执行到哪 #取得最小 p,=r[0] least=IP(p).int() for item in r: p,=item x=IP(p).int() if x<least: least=x str_least=str(IP(least)) dbo.updateLastIpById(nodeTaskId, str_least) # 保存执行进度 dbo.updateIpFinishedById(nodeTaskId, ipOkCount) stepcounter = 0 print '任务%s扫描完成进度:' % (nodeTaskId) + str(ipOkCount) + '/' + str(iptotal) + '\r', sys.stdout.flush() # 查看任务的指令是否变化 newInstruction = dbo.getInstructionById(nodeTaskId) # 如果指令不是执行 if newInstruction != instructionOptions['执行']: # 等5秒 再执行 timer = threading.Timer( 5, taskfunc, (scannode, printed)) timer.start() # 本次任务退出,等5秒,确保线程都结束了,再关闭文件 sleep(5) f.close() return f.close() dbo.updateStatusById(nodeTaskId, statusOptions['完成']) # todo:删除lastip表中的这个id记录,因为已经完成了 logging.info(tag + "完成任务%s" % (nodeTaskId)) # 完成一个后,直接执行下一个 timer = threading.Timer( 0, taskfunc, (scannode, printed)) timer.start()
import datetime import socket try: from IPy import IP except ImportError: try: command_to_execute = "pip install IPy || easy_install IPy" os.system(command_to_execute) except OSError: print "Can NOT install IPy, Aborted!" sys.exit(1) except Exception as e: print "Uncaught exception, %s" % e.message sys.exit(1) from IPy import IP i = datetime.datetime.now() print i.strftime('%Y/%m/%d %H:%M:%S') hostname = socket.getfqdn(socket.gethostname()) print hostname # TODO(Guodong Ding) Get A Wrong IP Address When There Are More Than One NIC Adapter Or IP Address!!! ip = socket.gethostbyname(hostname) print ip print IP(ip).version() print IP(ip).reverseName()
def cook_response_any(self, d, qname, fake_records): self.log( "cooking the response of type ANY for %s with all known fake records." % qname) response = DNSRecord(DNSHeader(id=d.header.id, bitmap=d.header.bitmap, qr=1, aa=1, ra=1), q=d.q) for qtype, fake_record in fake_records.items(): if fake_record: # NOTE: RDMAP is a dictionary map of qtype strings to handling classses # IPv6 needs additional work before inclusion: if qtype == "AAAA": ipv6 = IP(fake_record) ipv6_bin = ipv6.strBin() fake_record = [ int(ipv6_bin[i:i + 8], 2) for i in xrange(0, len(ipv6_bin), 8) ] elif qtype == "SOA": mname, rname, t1, t2, t3, t4, t5 = fake_record.split(" ") times = tuple([int(t) for t in [t1, t2, t3, t4, t5]]) mname, rname = rmdots(mname), rmdots(rname) response.add_answer( RR(qname, getattr(QTYPE, qtype), rdata=RDMAP[qtype](mname, rname, times))) elif qtype == "NAPTR": order, preference, flags, service, regexp, replacement = fake_record.split( " ") order = int(order) preference = int(preference) replacement = rmdots(replacement) # if replacement and replacement[-1] == ".": replacement = replacement[:-1] response.add_answer( RR(qname, getattr(QTYPE, qtype), rdata=RDMAP[qtype](order, preference, flags, service, regexp, replacement))) elif qtype == "SRV": priority, weight, port, target = fake_record.split(" ") priority = int(priority) weight = int(weight) port = int(port) target = rmdots(target) response.add_answer( RR(qname, getattr(QTYPE, qtype), rdata=RDMAP[qtype](priority, weight, port, target))) elif qtype == "DNSKEY": flags, protocol, algorithm, key = fake_record.split(" ") flags = int(flags) protocol = int(protocol) algorithm = int(algorithm) key = b64decode(("".join(key)).encode('ascii')) response.add_answer( RR(qname, getattr(QTYPE, qtype), rdata=RDMAP[qtype](flags, protocol, algorithm, key))) elif qtype == "RRSIG": covered, algorithm, labels, orig_ttl, sig_exp, sig_inc, key_tag, name, sig = fake_record.split( " ") covered = getattr(QTYPE, covered) # NOTE: Covered QTYPE algorithm = int(algorithm) labels = int(labels) orig_ttl = int(orig_ttl) sig_exp = int( mktime(strptime(sig_exp + 'GMT', "%Y%m%d%H%M%S%Z"))) sig_inc = int( mktime(strptime(sig_inc + 'GMT', "%Y%m%d%H%M%S%Z"))) key_tag = int(key_tag) name = rmdots(name) sig = b64decode(("".join(sig)).encode('ascii')) response.add_answer( RR(qname, getattr(QTYPE, qtype), rdata=RDMAP[qtype](covered, algorithm, labels, orig_ttl, sig_exp, sig_inc, key_tag, name, sig))) else: fake_record = rmdots(fake_record) response.add_answer( RR(qname, getattr(QTYPE, qtype), rdata=RDMAP[qtype](fake_record))) response = response.pack() return response
from IPy import IP import xlrd filename = 'ip.xlsx' f = xlrd.open_workbook(filename) # 打开excel table = f.sheet_by_name("Sheet1") # 打开sheet nrows = table.nrows # sheet的行数 print(nrows) net_list = list() for i in range(nrows): # print(table.row_values(i)[0]) net_list.append(IP(table.row_values(i)[0])) while True: count = 0 for i in range(len(net_list)): try: net_list[i] += net_list[i + 1] except: count += 1 continue net_list.pop(i + 1) if count == len(net_list): break print(net_list)
def local_audit(audit_ip, user_name, pem_file, password): win_path = 'tools/Windows-Workstation-and-Server-Audit' lyn_path = 'tools/lynis' ip = IP(audit_ip) type = ip.iptype() default_region = subprocess.check_output( ['aws', 'configure', 'get', 'region']).strip() if type == 'PUBLIC': operating_sys = subprocess.check_output([ 'aws', 'ec2', 'describe-instances', '--region', '%s' % default_region, '--filters', 'Name=ip-address,Values=%s' % (audit_ip), '--query', 'Reservations[*].Instances[*].[Platform]', '--output', 'text' ]).strip() private_ip = subprocess.check_output([ 'aws', 'ec2', 'describe-instances', '--region', '%s' % default_region, '--filters', 'Name=ip-address,Values=%s' % (audit_ip), '--query', 'Reservations[*].Instances[*].[PrivateIpAddress]', '--output', 'text' ]).strip() public_ip = audit_ip elif type == 'PRIVATE': operating_sys = subprocess.check_output([ 'aws', 'ec2', 'describe-instances', '--region', '%s' % default_region, '--filters', 'Name=network-interface.addresses.private-ip-address,Values=%s' % (audit_ip), '--query', 'Reservations[*].Instances[*].[Platform]', '--output', 'text' ]).strip() public_ip = subprocess.check_output([ 'aws', 'ec2', 'describe-instances', '--region', '%s' % default_region, '--filters', 'Name=network-interface.addresses.private-ip-address,Values=%s' % (audit_ip), '--query', 'Reservations[*].Instances[*].[PublicIpAddress]', '--output', 'text' ]).strip() private_ip = audit_ip if public_ip == 'None': public_ip = "" else: dns_name = subprocess.check_output(['host', public_ip]).strip().split(' ')[4] if operating_sys == 'windows': print("WINDOWS BOX FOUND!!!") if (audit_ip and not (user_name or pem_file or password)): subprocess.call([ './windows_remote.sh', account_name, dns_name, private_ip, public_ip ], cwd=win_path) elif audit_ip and user_name and not (pem_file or password): subprocess.call([ './windows_remote.sh', account_name, dns_name, private_ip, public_ip, user_name ], cwd=win_path) elif audit_ip and pem_file and not (user_name or password): subprocess.call([ './windows_remote.sh', account_name, dns_name, private_ip, public_ip, "", pem_file ], cwd=win_path) elif audit_ip and password and not (user_name or pem_file): subprocess.call([ './windows_remote.sh', account_name, dns_name, private_ip, public_ip, "", "", password ], cwd=win_path) elif audit_ip and user_name and password and not (pem_file): subprocess.call([ './windows_remote.sh', account_name, dns_name, private_ip, public_ip, user_name, "", password ], cwd=win_path) elif audit_ip and user_name and pem_file and not (password): subprocess.call([ './windows_remote.sh', account_name, dns_name, private_ip, public_ip, user_name, pem_file ], cwd=win_path) elif audit_ip and password and pem_file and not (password): subprocess.call([ './windows_remote.sh', account_name, dns_name, private_ip, public_ip, "", pem_file, password ], cwd=win_path) else: subprocess.call([ './windows_remote.sh', account_name, dns_name, private_ip, public_ip, user_name, pem_file, password ], cwd=win_path) else: print("LINUX BOX FOUND!!!") if (audit_ip and not (user_name or pem_file or password)): subprocess.call([ './lynis_remote.sh', account_name, dns_name, private_ip, public_ip ], cwd=lyn_path) elif audit_ip and user_name and not (pem_file or password): subprocess.call([ './lynis_remote.sh', account_name, dns_name, private_ip, public_ip, user_name ], cwd=lyn_path) elif audit_ip and pem_file and not (user_name or password): subprocess.call([ './lynis_remote.sh', account_name, dns_name, private_ip, public_ip, "", pem_file ], cwd=lyn_path) elif audit_ip and password and not (user_name or pem_file): subprocess.call([ './lynis_remote.sh', account_name, dns_name, private_ip, public_ip, "", "", password ], cwd=lyn_path) elif audit_ip and user_name and password and not (pem_file): subprocess.call([ './lynis_remote.sh', account_name, dns_name, private_ip, public_ip, user_name, "", password ], cwd=lyn_path) elif audit_ip and user_name and pem_file and not (password): subprocess.call([ './lynis_remote.sh', account_name, dns_name, private_ip, public_ip, user_name, pem_file, "" ], cwd=lyn_path) elif audit_ip and password and pem_file and not (password): subprocess.call([ './lynis_remote.sh', account_name, dns_name, private_ip, public_ip, "", pem_file, password ], cwd=lyn_path) else: subprocess.call([ './lynis_remote.sh', account_name, dns_name, private_ip, public_ip, user_name, pem_file, password ], cwd=lyn_path)
def run(self, conf, args, plugins): if "subcommand" in args: if args.subcommand == "info": if not is_ip(unbracket(args.IP)): print("Invalid IP address") sys.exit(1) # FIXME: move code here in a library ip = unbracket(args.IP) try: ipy = IP(ip) except ValueError: print("Invalid IP format, quitting...") return ipinfo = self.ipinfo(ip) print("MaxMind: Located in %s, %s" % (ipinfo["city"], ipinfo["country"])) if ipinfo["asn"] == 0: print("MaxMind: IP not found in the ASN database") else: print("MaxMind: ASN%i, %s" % (ipinfo["asn"], ipinfo["asn_name"])) print("CAIDA Type: %s" % ipinfo["asn_type"]) try: asndb2 = pyasn.pyasn(self.asncidr) res = asndb2.lookup(ip) except OSError: print("Configuration files are not available") print("Please run harpoon update before using harpoon") sys.exit(1) if res[1] is None: print("IP not found in ASN database") else: # Search for name f = open(self.asnname, "r") found = False line = f.readline() name = "" while not found and line != "": s = line.split("|") if s[0] == str(res[0]): name = s[1].strip() found = True line = f.readline() print("ASN %i - %s (range %s)" % (res[0], name, res[1])) if ipinfo["hostname"] != "": print("Hostname: %s" % ipinfo["hostname"]) if ipinfo["specific"] != "": print("Specific: %s" % ipinfo["specific"]) if ipy.iptype() == "PRIVATE": "Private IP" print("") if ipy.version() == 4: print("Censys:\t\thttps://censys.io/ipv4/%s" % ip) print("Shodan:\t\thttps://www.shodan.io/host/%s" % ip) print("IP Info:\thttp://ipinfo.io/%s" % ip) print("BGP HE:\t\thttps://bgp.he.net/ip/%s" % ip) print( "IP Location:\thttps://www.iplocation.net/?query=%s" % ip) elif args.subcommand == "intel": if not is_ip(unbracket(args.IP)): print("Invalid IP address") sys.exit(1) # Start with MISP and OTX to get Intelligence Reports print("###################### %s ###################" % unbracket(args.IP)) passive_dns = [] urls = [] malware = [] files = [] # MISP misp_e = plugins["misp"].test_config(conf) if misp_e: print("[+] Downloading MISP information...") server = ExpandedPyMISP(conf["Misp"]["url"], conf["Misp"]["key"]) misp_results = server.search("attributes", value=unbracket(args.IP)) # Binary Edge be_e = plugins["binaryedge"].test_config(conf) if be_e: try: print("[+] Downloading BinaryEdge information...") be = BinaryEdge(conf["BinaryEdge"]["key"]) # FIXME: this only get the first page res = be.domain_ip(unbracket(args.IP)) for d in res["events"]: passive_dns.append({ "domain": d["domain"], "first": parse(d["updated_at"]).astimezone(pytz.utc), "last": parse(d["updated_at"]).astimezone(pytz.utc), "source": "BinaryEdge", }) except BinaryEdgeException: print( "BinaryEdge request failed, you need a paid subscription" ) # OTX otx_e = plugins["otx"].test_config(conf) if otx_e: print("[+] Downloading OTX information....") otx = OTXv2(conf["AlienVaultOtx"]["key"]) res = otx.get_indicator_details_full( IndicatorTypes.IPv4, unbracket(args.IP)) otx_pulses = res["general"]["pulse_info"]["pulses"] # Get Passive DNS if "passive_dns" in res: for r in res["passive_dns"]["passive_dns"]: passive_dns.append({ "domain": r["hostname"], "first": parse(r["first"]).astimezone(pytz.utc), "last": parse(r["last"]).astimezone(pytz.utc), "source": "OTX", }) if "url_list" in res: for r in res["url_list"]["url_list"]: if "result" in r: urls.append({ "date": parse(r["date"]).astimezone(pytz.utc), "url": r["url"], "ip": r["result"]["urlworker"]["ip"] if "ip" in r["result"]["urlworker"] else "", "source": "OTX", }) else: urls.append({ "date": parse(r["date"]).astimezone(pytz.utc), "url": r["url"], "ip": "", "source": "OTX", }) # RobTex print("[+] Downloading Robtex information....") rob = Robtex() try: res = rob.get_ip_info(unbracket(args.IP)) except RobtexError: print("Error with Robtex") else: for d in ["pas", "pash", "act", "acth"]: if d in res: for a in res[d]: passive_dns.append({ "first": a["date"].astimezone(pytz.utc), "last": a["date"].astimezone(pytz.utc), "domain": a["o"], "source": "Robtex", }) # PT pt_e = plugins["pt"].test_config(conf) if pt_e: out_pt = False print("[+] Downloading Passive Total information....") client = DnsRequest(conf["PassiveTotal"]["username"], conf["PassiveTotal"]["key"]) try: raw_results = client.get_passive_dns( query=unbracket(args.IP)) if "results" in raw_results: for res in raw_results["results"]: passive_dns.append({ "first": parse(res["firstSeen"]).astimezone( pytz.utc), "last": parse(res["lastSeen"]).astimezone( pytz.utc), "domain": res["resolve"], "source": "PT", }) if "message" in raw_results: if "quota_exceeded" in raw_results["message"]: print("Quota exceeded for Passive Total") out_pt = True pt_osint = {} except requests.exceptions.ReadTimeout: print("Timeout on Passive Total requests") if not out_pt: try: client2 = EnrichmentRequest( conf["PassiveTotal"]["username"], conf["PassiveTotal"]["key"], ) # Get OSINT # TODO: add PT projects here pt_osint = client2.get_osint( query=unbracket(args.IP)) # Get malware raw_results = client2.get_malware( query=unbracket(args.IP)) if "results" in raw_results: for r in raw_results["results"]: malware.append({ "hash": r["sample"], "date": parse(r["collectionDate"]), "source": "PT (%s)" % r["source"], }) except requests.exceptions.ReadTimeout: print("Timeout on Passive Total requests") # Urlhaus uh_e = plugins["urlhaus"].test_config(conf) if uh_e: print("[+] Checking urlhaus data...") try: urlhaus = UrlHaus(conf["UrlHaus"]["key"]) res = urlhaus.get_host(unbracket(args.IP)) except UrlHausError: print("Error with the query") else: if "urls" in res: for r in res["urls"]: urls.append({ "date": parse(r["date_added"]).astimezone( pytz.utc), "url": r["url"], "source": "UrlHaus", }) # VT vt_e = plugins["vt"].test_config(conf) if vt_e: if conf["VirusTotal"]["type"] != "public": print("[+] Downloading VT information....") vt = PrivateApi(conf["VirusTotal"]["key"]) res = vt.get_ip_report(unbracket(args.IP)) if "results" in res: if "resolutions" in res["results"]: for r in res["results"]["resolutions"]: passive_dns.append({ "first": parse(r["last_resolved"]).astimezone( pytz.utc), "last": parse(r["last_resolved"]).astimezone( pytz.utc), "domain": r["hostname"], "source": "VT", }) if "undetected_downloaded_samples" in res[ "results"]: for r in res["results"][ "undetected_downloaded_samples"]: files.append({ "hash": r["sha256"], "date": parse(r["date"]), "source": "VT", }) if "undetected_referrer_samples" in res["results"]: for r in res["results"][ "undetected_referrer_samples"]: if "date" in r: files.append({ "hash": r["sha256"], "date": parse(r["date"]), "source": "VT", }) else: # FIXME : should consider data without dates files.append({ "hash": r["sha256"], "date": datetime.datetime(1970, 1, 1), "source": "VT", }) if "detected_downloaded_samples" in res["results"]: for r in res["results"][ "detected_downloaded_samples"]: malware.append({ "hash": r["sha256"], "date": parse(r["date"]), "source": "VT", }) if "detected_referrer_samples" in res["results"]: for r in res["results"][ "detected_referrer_samples"]: if "date" in r: malware.append({ "hash": r["sha256"], "date": parse(r["date"]), "source": "VT", }) else: vt_e = False print("[+] Downloading GreyNoise information....") gn = GreyNoise(conf["GreyNoise"]["key"]) if gn == "": print("Greynoise API key is not set up.") greynoise = gn.ip(unbracket(args.IP)) tg_e = plugins["threatgrid"].test_config(conf) if tg_e: print("[+] Downloading Threat Grid....") try: tg = ThreatGrid(conf["ThreatGrid"]["key"]) res = tg.search_samples(unbracket(args.IP), type="ip") already = [] if "items" in res: for r in res["items"]: if r["sample_sha256"] not in already: d = parse(r["ts"]) d = d.replace(tzinfo=None) malware.append({ "hash": r["sample_sha256"], "date": d, "source": "TG", }) already.append(r["sample_sha256"]) except ThreatGridError as e: print("Error with threat grid: {}".format(e.message)) # ThreatMiner print("[+] Downloading ThreatMiner....") tm = ThreatMiner() response = tm.get_report(unbracket(args.IP)) if response["status_code"] == "200": tmm = response["results"] else: tmm = [] if response["status_code"] != "404": print("Request to ThreatMiner failed: {}".format( response["status_message"])) response = tm.get_related_samples(unbracket(args.IP)) if response["status_code"] == "200": for r in response["results"]: malware.append({ "hash": r, "date": None, "source": "ThreatMiner" }) print("----------------- Intelligence Report") ctor = CommandTor() tor_list = ctor.get_list() if tor_list: if unbracket(args.IP) in tor_list: print("{} is a Tor Exit node".format(unbracket( args.IP))) else: print("Impossible to reach the Tor Exit Node list") if otx_e: if len(otx_pulses): print("OTX:") for p in otx_pulses: print("- %s (%s - %s)" % ( p["name"], p["created"][:10], "https://otx.alienvault.com/pulse/" + p["id"], )) else: print("OTX: Not found in any pulse") if misp_e: if len(misp_results["Attribute"]) > 0: print("MISP:") for event in misp_results["Attribute"]: print("- {} - {}".format(event["Event"]["id"], event["Event"]["info"])) if len(greynoise) > 0: print("GreyNoise: IP identified as") for key, value in greynoise.items(): print(key, "->", value) else: print("GreyNoise: Not found") if pt_e: if "results" in pt_osint: if len(pt_osint["results"]): if len(pt_osint["results"]) == 1: if "name" in pt_osint["results"][0]: print("PT: %s %s" % ( pt_osint["results"][0]["name"], pt_osint["results"][0]["sourceUrl"], )) else: print("PT: %s" % pt_osint["results"][0]["sourceUrl"]) else: print("PT:") for r in pt_osint["results"]: if "name" in r: print("-%s %s" % (r["name"], r["sourceUrl"])) else: print("-%s" % r["sourceUrl"]) else: print("PT: Nothing found!") else: print("PT: Nothing found!") # ThreatMiner if len(tmm) > 0: print("ThreatMiner:") for r in tmm: print("- {} {} - {}".format(r["year"], r["filename"], r["URL"])) if len(malware) > 0: print("----------------- Malware") for r in malware: print("[%s] %s %s" % ( r["source"], r["hash"], r["date"].strftime("%Y-%m-%d") if r["date"] else "", )) if len(files) > 0: print("----------------- Files") for r in sorted(files, key=lambda x: x["date"]): print("[%s] %s %s" % (r["source"], r["hash"], r["date"].strftime("%Y-%m-%d"))) if len(passive_dns) > 0: print("----------------- Passive DNS") for r in sorted(passive_dns, key=lambda x: x["first"], reverse=True): print("[+] %-40s (%s -> %s)(%s)" % ( r["domain"], r["first"].strftime("%Y-%m-%d"), r["last"].strftime("%Y-%m-%d"), r["source"], )) if len(urls) > 0: print("----------------- Urls") for r in sorted(urls, key=lambda x: x["date"], reverse=True): print("[%s] %s - %s" % (r["source"], r["url"], r["date"].strftime("%Y-%m-%d"))) else: self.parser.print_help() else: self.parser.print_help()
def make_prefix_heap( prefixes, initial_children=None, family=None, sort_fn=None, show_available=False, show_unused=False, ): """Return a prefix heap of all prefixes. Might optionally filter out IPv4 and IPv6 as needed. Args: prefixes: a queryset for or list of nav.models.manage.Prefix initial_children: a list of IPNode to initialize the root with family: a list of address types to allow (of "ipv4", "ipv6", "rfc1918") sort_fn: function used to sort the children upon serialization show_available: whether or not to create "fake" children for all ranges not spanned by the node's children or found otherwhere in NAV show_unused: like above, but only creates such nodes for prefixes that are in fact found within NAV Returns: A prefix heap (tree) """ rfc1918 = IPSet( [IP("10.0.0.0/8"), IP("172.16.0.0/12"), IP("192.168.0.0/16")]) def accept(prefix): "Helper function for filtering prefixes by IP family" ip_addr = IP(prefix.net_address) if "ipv4" in family and ip_addr.version( ) == 4 and ip_addr not in rfc1918: return True if "ipv6" in family and ip_addr.version() == 6: return True if "rfc1918" in family and ip_addr in rfc1918: return True return False heap = PrefixHeap(initial_children) filtered = (prefix for prefix in prefixes if accept(prefix)) nodes = (PrefixNode(prefix, sort_fn=sort_fn) for prefix in filtered) for node in sorted(nodes, reverse=False): heap.add(node) # Add marker nodes for available ranges/prefixes if show_available: scopes = (child for child in heap.walk_roots() if child.net_type in ["scope"]) subnets = (get_available_nodes([scope.ip]) for scope in scopes) for subnet in subnets: heap.add_many(subnet) # Add marker nodes for empty ranges, e.g. ranges not spanned by the # children of a node. This is useful for aligning visualizations and so on. if show_unused: unused_prefixes = (child.not_in_use() for child in heap.walk()) for unused_prefix in unused_prefixes: nodes = nodes_from_ips(unused_prefix, klass="empty") heap.add_many(nodes) return heap
from multiprocessing import Process, Queue, Pool import time import subprocess from IPy import IP import sys q = Queue() ips = IP('10.0.1.0/24') def f(i, q): while (True): if q.empty(): sys.exit() ip = q.get() ret = subprocess.call(['ping', f'-c 1 , {ip}'], shell=True, stdout=open('/dev/null', 'w'), stderr=subprocess.STDOUT) if ret == 0: print(f"{ip} is alive") else: print(f"Process Number: {i} didn't find a response for {ip} ") for i in ips:
def usage(): print >> sys.stderr, 'Usage: %s cidr domain type ("a" or "ptr")' % sys.argv[ 0] sys.exit(1) ##### if len(sys.argv) < 4: usage() cidr = sys.argv[1] domain = sys.argv[2] type = sys.argv[3] if type != 'a' and type != 'ptr': usage() # Force domain to end with a dot if not domain.endswith('.'): domain += '.' iplist = IP(cidr) for ip in iplist: if type == 'a': print '%s.%s IN A %s' % (ip_to_hex(ip.strNormal()), domain, ip) elif type == 'ptr': print '%s IN PTR %s.%s' % (ip.reverseName(), ip_to_hex( ip.strNormal()), domain)
def test_indexed_access_should_work(self): i = IPRange(IP('10.0.42.0'), IP('10.0.42.127')) self.assertEquals(i[5], IP('10.0.42.5'))