def index(request): page = request.GET.get("p", "1") q = request.GET.get("q", None) try: page = int(page) except: page = 1 if page <= 0: page = 1 es = Elasticsearch(ELASTICSEARCH_HOSTS) start_time = datetime.now() keywords = None if q is None: _search = { "from": (page - 1) * 20, "size": 20, "sort": {"published_from": {"order": "desc"}} } else: _search, keywords = k2e_search(q, page) s = Search(using=es, index='w12scan').from_dict(_search) count = s.execute().hits.total # 分页逻辑 max_page = math.ceil(count / 20) if page <= 5: paginations = range(1, 10) elif page + 5 > max_page: paginations = range(max_page - 5, max_page + 5) else: paginations = range(page - 5, page + 5) temp_pagin = [] for i in paginations: if i <= max_page: temp_pagin.append(i) paginations = temp_pagin pagination = { "max_page": str(max_page), "current": page, "pre": str(page - 1) if page - 1 > 0 else "1", "next": str(page + 1) if page + 1 <= max_page else str(max_page), "paginations": paginations, "keyword": "" } if q is not None: pagination["keyword"] = "&q=" + q # 分页完 datas = [] for hit in s: doc_type = hit.meta.doc_type id = hit.meta.id d = {} if doc_type == "ips": d.update(hit.to_dict()) if d.get("infos"): d["info_tags"] = [] for info in d["infos"]: d["info_tags"].append("{}/{}".format(info["port"], info.get("name", "unknown"))) d["infos"] = json.dumps(d["infos"], indent=2) # 资产关联 d["proper"] = is_proper(d["target"], "ip") elif doc_type == "domains": d.update(hit.to_dict()) d["target"] = d.get("title") or d.get("url") if d.get("ip"): ip = d.get("ip") ip_info = es_search_ip(ip, True) if ip_info: d["location"] = ip_info.location d["proper"] = is_proper(d["url"], "domain") d["doc_type"] = doc_type d["id"] = id d["published_from"] = datetime_string_format(d["published_from"]) datas.append(d) # 左侧统计代码逻辑 statistics = {} # 1.组件统计 apps = count_app() countrys = count_country() names = count_name() ports = count_port() statistics["apps"] = apps statistics["countrys"] = countrys statistics["names"] = names statistics["ports"] = ports # 总耗时间 end_time = (datetime.now() - start_time).total_seconds() return render(request, "frontend/recent.html", {"datas": datas, "count": count, "second": end_time, "pagination": pagination, "statistics": statistics, "keyword": keywords})
def detail(request, id): data = es_search_ip_by_id(id) if not data: raise Http404 data = data[0] doc_type = data["_type"] data = data["_source"] data["published_from"] = datetime_string_format(data["published_from"]) if doc_type == "ips": target = data["target"] data["proper"] = is_proper(target, "ip") # 关联出域名 union_domains = es_search_domain_by_ip(target) # 关联C段ip c_data = [] temp_ips = target.split(".") if len(temp_ips) == 4: del temp_ips[-1] query_ip = '.'.join(temp_ips) + ".*" payload = {"query": { "wildcard": {"target": query_ip} } } s = Search(using=es, index='w12scan', doc_type='ips').from_dict(payload) res = s.execute() for hit in res: cid = hit.meta.id d = hit.to_dict() if d["target"] != target: # C段ip的上的域名 sub_data = [] sub_domain = es_search_domain_by_ip(d["target"]) for sub in sub_domain: dd = {} dd.update(sub) sub_data.append(dd) c_data.append({"id": cid, "ip": d["target"], "data": sub_data}) return render(request, "frontend/ip_detail.html", {"data": data, "union": union_domains, "c_data": c_data, "third_infomation": third_info(target)}) elif doc_type == "domains": ip = data["ip"] target = data["url"] data["proper"] = is_proper(target, "domain") payload = { "query": { "match": { "target": ip } } } s = Search(using=es, index='w12scan', doc_type='ips').from_dict(payload) ip_data = [] for hit in s: ip_data.append({"id": hit.meta.id, "ip": hit.to_dict()["target"]}) # subdomain 获取 try: sub_domain = get_fld(target, fix_protocol=True) except: sub_domain = None sub_domain_data = [] if sub_domain: payload = {"query": { "wildcard": {"url": "*." + sub_domain} } , "size": 1000 } s = Search(using=es, index='w12scan', doc_type='domains').from_dict(payload) for hit in s: dd = {} dd.update(hit.to_dict()) dd["id"] = hit.meta.id dd["published_from"] = datetime_string_format(dd["published_from"]) sub_domain_data.append(dd) return render(request, "frontend/domain_detail.html", {"data": data, "ip_data": ip_data, "sub_domain": sub_domain_data, "third_infomation": third_info(ip)})
def detail(request, id): ''' ip domain 详情 :param request: :param id: :return: ''' data = es_search_ip_by_id(id) if not data: raise Http404 data = data[0] doc_type = data["_type"] data = data["_source"] data["published_from"] = datetime_string_format(data["published_from"]) if doc_type == "ips": target = data["target"] data["proper"] = is_proper(target, "ip") # 关联出域名 union_domains = es_search_domain_by_ip(target, True) # 历史ip historys = es_search_ip(target) for h in historys: h["published_from"] = datetime_string_format(h["published_from"]) # 关联C段ip c_data = [] temp_ips = target.split(".") if len(temp_ips) == 4: del temp_ips[-1] query_ip = '.'.join(temp_ips) + ".*" payload = { "query": { "wildcard": {"target": query_ip} }, "collapse": { "field": "target" }, "sort": { "published_from": {"order": "desc"} }, "from": 0, "size": 10000 } s = Search(using=es, index='w12scan', doc_type='ips').from_dict(payload) res = s.execute() for hit in res: cid = hit.meta.id d = hit.to_dict() if d["target"] != target: if isinstance(d["target"], list): d["target"] = d["target"][0] # C段ip的上的域名 sub_data = [] sub_domain = es_search_domain_by_ip(d["target"], True) for sub in sub_domain: dd = {} dd.update(sub) sub_data.append(dd) extrainfo = "" for k in d.get("infos", []): extrainfo += "{0}/{1} ".format(k.get("port", ""), k.get("name", "unknown")) c_data.append({"id": cid, "ip": d["target"], "data": sub_data, "extrainfo": extrainfo}) # c_data 排序 c_data.sort(key=lambda a: int(a.get("ip", 0).split(".")[3])) return render(request, "frontend/ip_detail.html", {"data": data, "union": union_domains, "c_data": c_data, "third_infomation": third_info(target), "historys": historys}) elif doc_type == "domains": ip = data["ip"] target = data["url"] data["proper"] = is_proper(target, "domain") # 展现信息 field = ["title", "status_code", "X-Powered-By", "Server"] uldata = [] for f in field: if f in data: uldata.append((f, data[f])) hit = es_search_ip(ip, deduplicat=True) historys = es_search_domain_by_url(target) for h in historys: h["published_from"] = datetime_string_format(h["published_from"]) # s = Search(using=es, index='w12scan', doc_type='ips').from_dict(payload) ip_data = {} if hit: ip_data["id"] = hit.meta.id ip_data["ip"] = list(hit.target)[0] # subdomain 获取 try: sub_domain = get_fld(target, fix_protocol=True) except: sub_domain = None sub_domain_data = [] if sub_domain: payload = {"query": { "wildcard": {"url": "*." + sub_domain} } , "collapse": { "field": "url" }, "sort": { "published_from": {"order": "desc"} }, "from": 0, "size": 10000 } s = Search(using=es, index='w12scan', doc_type='domains').from_dict(payload) for hit in s: dd = {} dd.update(hit.to_dict()) if isinstance(dd["url"], list): dd["url"] = dd["url"][0] dd["id"] = hit.meta.id dd["published_from"] = datetime_string_format(dd["published_from"]) sub_domain_data.append(dd) return render(request, "frontend/domain_detail.html", {"data": data, "ip_data": ip_data, "sub_domain": sub_domain_data, "third_infomation": third_info(ip), "historys": historys, "uldata": uldata})
def detail(request, id): data = es_search_ip_by_id(id) if not data: raise Http404 data = data[0] doc_type = data["_type"] data = data["_source"] data["published_from"] = datetime_string_format(data["published_from"]) if doc_type == "ips": target = data["target"] data["proper"] = is_proper(target, "ip") # 关联出域名 union_domains = es_search_domain_by_ip(target, True) # 关联C段ip c_data = [] temp_ips = target.split(".") if len(temp_ips) == 4: del temp_ips[-1] query_ip = '.'.join(temp_ips) + ".*" payload = { "query": { "wildcard": {"target": query_ip} }, "collapse": { "field": "target" }, "sort": { "published_from": {"order": "desc"} }, "from": 0, "size": 10000 } s = Search(using=es, index='w12scan', doc_type='ips').from_dict(payload) res = s.execute() for hit in res: cid = hit.meta.id d = hit.to_dict() if d["target"] != target: if isinstance(d["target"], list): d["target"] = d["target"][0] # C段ip的上的域名 sub_data = [] sub_domain = es_search_domain_by_ip(d["target"], True) for sub in sub_domain: dd = {} dd.update(sub) sub_data.append(dd) extrainfo = "" for k in d.get("infos", []): extrainfo += "{}".format(k.get("port", "")) if k.get("name"): extrainfo += "/{} ".format(k["name"]) c_data.append({"id": cid, "ip": d["target"], "data": sub_data, "extrainfo": extrainfo}) # c_data 排序 c_data.sort(key=lambda a: int(a.get("ip", 0).split(".")[3])) return render(request, "frontend/ip_detail.html", {"data": data, "union": union_domains, "c_data": c_data, "third_infomation": third_info(target)}) elif doc_type == "domains": ip = data["ip"] target = data["url"] data["proper"] = is_proper(target, "domain") payload = { "query": { "match": { "target": ip } }, "collapse": { "field": "target" }, "sort": { "published_from": {"order": "desc"} } } s = Search(using=es, index='w12scan', doc_type='ips').from_dict(payload) ip_data = [] for hit in s: ip_data.append({"id": hit.meta.id, "ip": hit.to_dict()["target"]}) # subdomain 获取 try: sub_domain = get_fld(target, fix_protocol=True) except: sub_domain = None sub_domain_data = [] if sub_domain: payload = {"query": { "wildcard": {"url": "*." + sub_domain} } , "collapse": { "field": "url" }, "sort": { "published_from": {"order": "desc"} }, "from": 0, "size": 10000 } s = Search(using=es, index='w12scan', doc_type='domains').from_dict(payload) for hit in s: dd = {} dd.update(hit.to_dict()) if isinstance(dd["url"], list): dd["url"] = dd["url"][0] dd["id"] = hit.meta.id dd["published_from"] = datetime_string_format(dd["published_from"]) sub_domain_data.append(dd) return render(request, "frontend/domain_detail.html", {"data": data, "ip_data": ip_data, "sub_domain": sub_domain_data, "third_infomation": third_info(ip)})