def icgoo_visit_detail(): """定期获取ali日志的请求ip,排查直接请求接口的ip 5分钟执行一次""" endpoint = '' # 选择与上面步骤创建Project所属区域匹配的Endpoint accessKeyId = '' # 使用您的阿里云访问密钥AccessKeyId accessKey = '' # 使用您的阿里云访问密钥AccessKeySecret project = '' # 上面步骤创建的项目名称 logstore = '' # 上面步骤创建的日志库名称 client = LogClient(endpoint, accessKeyId, accessKey) topic = "" To = int(time.time()) + 100 From1 = To - 500 From2 = To - 600 sql1 = "* and request_uri: search/getdata|SELECT DISTINCT client_ip limit 0,2000" sql2 = "* and request_uri:accounts/is_login |SELECT DISTINCT client_ip limit 0,2000" res1 = GetLogsRequest(project, logstore, From1, To, topic, sql1, 2000, 0, False) last_res1 = client.get_logs(res1).get_logs() time.sleep(50) #扩大白名单搜索时间范围 避免由于请求的间隔过长造成的误封 res2 = GetLogsRequest(project, logstore, From2, To, topic, sql2, 2000, 0, False) last_res2 = client.get_logs(res2).get_logs() all_ip = [i.contents['client_ip'] for i in last_res1] white_ip = [i.contents['client_ip'] for i in last_res2] ip_ban = list(set(all_ip).difference(set(white_ip))) # mail_ipban('log',str(To),json.dumps(ip_ban)) for each_ip in ip_ban: #不允许解锁的ip ip_str=Webip(each_ip,'Anonymous') ip_obj=IPINFO.query.filter(IPINFO.ip_log==ip_str.md5_ip).first() if ip_obj: if not ip_obj.ip_white(): (ip_obj.lock_1m_times, ip_obj.lock_30m_times, ip_obj.lock_status, ip_obj.white_list_status, ip_obj.unlock_after_lockd) = sql_results['2'] db_session.commit()
def flow_chart_get_totle_count(request): return_result = {} data = [] try: user_id = request.session['user_id'] json_data = json.loads(request.body) try: domain = json_data['domain'] req_sql = DOMAIN_REQUEST_TOTLE_COUNT%(domain) req_sql2 = DOMAIN_UPSTREAM_TOTLE_COUNT%(domain) except: req_sql = REQUEST_TOTLE_COUNT req_sql2 = UPSTREAM_TOTLE_COUNT time_zone = json_data['time_zone'] from_time = int(time() - 86400) if time_zone == "7day": from_time = int(time() - 604800) elif time_zone == "24hour": from_time = int(time() - 86400) elif time_zone == "1hour": from_time = int(time() - 3600) global_result = waf_global.objects.get(user_id=user_id) endpoint = global_result.aliyun_log_endpoint.replace('https://', '').replace('http://', '') accessKeyId = global_result.aliyun_access_id accessKey = global_result.aliyun_access_secret project = global_result.aliyun_project logstore = global_result.aliyun_logstore client = LogClient(endpoint, accessKeyId, accessKey) req = GetLogsRequest(project=project, logstore=logstore, fromTime=from_time, toTime=int(time()), topic='', query=req_sql) res = client.get_logs(req) request_count = "" for log_result in res.get_logs(): try: request_count = log_result.get_contents()['count'] except: pass client2 = LogClient(endpoint, accessKeyId, accessKey) req2 = GetLogsRequest(project=project, logstore=logstore, fromTime=from_time, toTime=int(time()), topic='', query=req_sql2) res2= client2.get_logs(req2) upstream_count = "" for log_result2 in res2.get_logs(): try: upstream_count = log_result2.get_contents()['count'] except: pass return_result['result'] = True return_result['request_count'] = request_count return_result['upstream_count'] = upstream_count return JsonResponse(return_result, safe=False) except Exception, e: return_result['result'] = False return_result['message'] = str(e) return_result['errCode'] = 400 return JsonResponse(return_result, safe=False)
def sample_get_logs(client, project, logstore): topic = 'TestTopic_2' From = int(time.time()) - 3600 To = int(time.time()) request = GetLogsRequest(project, logstore, From, To, topic) response = client.get_logs(request) response.log_print()
def cc_chart_get_geoip(request): return_result = {} data = [] try: user_id = request.session['user_id'] json_data = json.loads(request.body) time_zone = json_data['time_zone'] from_time = int(time() - 86400) try: domain = json_data['domain'] req_sql = DOMAIN_GEO_SQL % (domain) if time_zone == "7day": from_time = int(time() - 604800) elif time_zone == "24hour": from_time = int(time() - 86400) elif time_zone == "1hour": from_time = int(time() - 3600) except: req_sql = GEO_SQL if time_zone == "7day": from_time = int(time() - 604800) elif time_zone == "24hour": from_time = int(time() - 86400) elif time_zone == "1hour": from_time = int(time() - 3600) global_result = waf_global.objects.get(user_id=user_id) endpoint = global_result.aliyun_log_endpoint.replace('https://', '').replace( 'http://', '') accessKeyId = global_result.aliyun_access_id accessKey = global_result.aliyun_access_secret project = global_result.aliyun_project logstore = global_result.aliyun_logstore client = LogClient(endpoint, accessKeyId, accessKey) req = GetLogsRequest(project=project, logstore=logstore, fromTime=from_time, toTime=int(time()), topic='', query=GEO_SQL) res = client.get_logs(req) for log_result in res.get_logs(): geo_info = log_result.get_contents()['geo'].split(",") try: data.append({ 'name': log_result.get_contents()['city'], 'ip_count': log_result.get_contents()['ip_count'], 'geo': [geo_info[1], geo_info[0]], 'count': log_result.get_contents()['count'] }) except: pass return_result['result'] = True return_result['message'] = data return JsonResponse(return_result, safe=False) except Exception, e: return_result['result'] = False return_result['message'] = str(e) return_result['errCode'] = 400 return JsonResponse(return_result, safe=False)
def attack_chart_get_type_top10(request): return_result = {} data = [] try: user_id = request.session['user_id'] json_data = json.loads(request.body) time_zone = json_data['time_zone'] from_time = int(time() - 86400) try: domain = json_data['domain'] req_sql = DOMAIN_ATT_TYPE_TOP10 % (domain) if time_zone == "7day": from_time = int(time() - 604800) elif time_zone == "24hour": from_time = int(time() - 86400) elif time_zone == "1hour": from_time = int(time() - 3600) except: req_sql = ATT_TYPE_TOP10 if time_zone == "7day": from_time = int(time() - 604800) elif time_zone == "24hour": from_time = int(time() - 86400) elif time_zone == "1hour": from_time = int(time() - 3600) global_result = waf_global.objects.get(user_id=user_id) endpoint = global_result.aliyun_log_endpoint.replace('https://', '').replace( 'http://', '') accessKeyId = global_result.aliyun_access_id accessKey = global_result.aliyun_access_secret project = global_result.aliyun_project logstore = global_result.aliyun_logstore client = LogClient(endpoint, accessKeyId, accessKey) req = GetLogsRequest(project=project, logstore=logstore, fromTime=from_time, toTime=int(time()), topic='', query=req_sql) res = client.get_logs(req) for log_result in res.get_logs(): try: data.append({ 'protection_type': log_result.get_contents()['protection_type'], 'count': log_result.get_contents()['count'], }) except: pass return_result['result'] = True return_result['message'] = data return JsonResponse(return_result, safe=False) except Exception, e: return_result['result'] = False return_result['message'] = str(e) return_result['errCode'] = 103 return JsonResponse(return_result, safe=False)
def cc_chart_get_black_ip_count(request): return_result = {} data = [] try: user_id = request.session['user_id'] json_data = json.loads(request.body) time_zone = json_data['time_zone'] from_time = int(time() - 86400) try: domain = json_data['domain'] req_sql = DOMAIN_CC_BLACK_IP_COUNT % (domain) if time_zone == "7day": from_time = int(time() - 604800) elif time_zone == "24hour": from_time = int(time() - 86400) elif time_zone == "1hour": from_time = int(time() - 3600) except: req_sql = CC_BLACK_IP_COUNT if time_zone == "7day": from_time = int(time() - 604800) elif time_zone == "24hour": from_time = int(time() - 86400) elif time_zone == "1hour": from_time = int(time() - 3600) global_result = waf_global.objects.get(user_id=user_id) endpoint = global_result.aliyun_log_endpoint.replace('https://', '').replace( 'http://', '') accessKeyId = global_result.aliyun_access_id accessKey = global_result.aliyun_access_secret project = global_result.aliyun_project logstore = global_result.aliyun_logstore client = LogClient(endpoint, accessKeyId, accessKey) req = GetLogsRequest(project=project, logstore=logstore, fromTime=from_time, toTime=int(time()), topic='', query=req_sql) res = client.get_logs(req) black_ip_count = '' for log_result in res.get_logs(): try: black_ip_count = log_result.get_contents()['black_ip_count'] except: pass return_result['result'] = True return_result['black_ip_count'] = black_ip_count return JsonResponse(return_result, safe=False) except Exception, e: return_result['result'] = False return_result['message'] = str(e) return_result['errCode'] = 400 return JsonResponse(return_result, safe=False)
def flow_chart_get_bad_upstream_count_trend(request): return_result = {} data = [] try: user_id = request.session['user_id'] json_data = json.loads(request.body) time_zone = json_data['time_zone'] from_time = int(time() - 86400) try: domain = json_data['domain'] if time_zone == "7day": from_time = int(time() - 604800) req_sql = DOMAIN_BAD_UPSTREAM_COUNT_TREND_7D%(domain) elif time_zone == "24hour": from_time = int(time() - 86400) req_sql =DOMAIN_BAD_UPSTREAM_COUNT_TREND_24H%(domain) elif time_zone == "1hour": from_time = int(time() - 3600) req_sql = DOMAIN_BAD_UPSTREAM_COUNT_TREND_1H%(domain) except: if time_zone == "7day": from_time = int(time() - 604800) req_sql = UPSTREAM_BAD_COUNT_TREND_7D elif time_zone == "24hour": from_time = int(time() - 86400) req_sql = UPSTREAM_BAD_COUNT_TREND_24H elif time_zone == "1hour": from_time = int(time() - 3600) req_sql = UPSTREAM_BAD_COUNT_TREND_1H global_result = waf_global.objects.get(user_id=user_id) endpoint = global_result.aliyun_log_endpoint.replace('https://', '').replace('http://', '') accessKeyId = global_result.aliyun_access_id accessKey = global_result.aliyun_access_secret project = global_result.aliyun_project logstore = global_result.aliyun_logstore client = LogClient(endpoint, accessKeyId, accessKey) req = GetLogsRequest(project=project, logstore=logstore, fromTime=from_time, toTime=int(time()), topic='', query=req_sql) res = client.get_logs(req) for log_result in res.get_logs(): try: data.append({'time': log_result.get_contents()['time'], 'count': log_result.get_contents()['count'], } ) except: pass return_result['result'] = True return_result['message'] = data return JsonResponse(return_result, safe=False) except Exception, e: return_result['result'] = False return_result['message'] = str(e) return_result['errCode'] = 400 return JsonResponse(return_result, safe=False)
def attack_chart_get_type_trend(request): return_result = {} data = [] try: user_id = request.session['user_id'] json_data = json.loads(request.body) time_zone = json_data['time_zone'] from_time = int(time() - 86400) try: domain = json_data['domain'] if time_zone == "7day": from_time = int(time() - 604800) req_sql = DOMAIN_ATT_TYPE_7D % (domain) elif time_zone == "24hour": from_time = int(time() - 86400) req_sql = DOMAIN_ATT_TYPE_24H % (domain) elif time_zone == "1hour": from_time = int(time() - 3600) req_sql = DOMAIN_ATT_TYPE_1H % (domain) except: if time_zone == "7day": from_time = int(time() - 604800) req_sql = ATT_TYPE_7D elif time_zone == "24hour": from_time = int(time() - 86400) req_sql = ATT_TYPE_24H elif time_zone == "1hour": from_time = int(time() - 3600) req_sql = ATT_TYPE_1H global_result = waf_global.objects.get(user_id=user_id) endpoint = global_result.aliyun_log_endpoint.replace('https://', '').replace( 'http://', '') accessKeyId = global_result.aliyun_access_id accessKey = global_result.aliyun_access_secret project = global_result.aliyun_project logstore = global_result.aliyun_logstore client = LogClient(endpoint, accessKeyId, accessKey) req = GetLogsRequest(project=project, logstore=logstore, fromTime=from_time, toTime=int(time()), topic='', query=req_sql) res = client.get_logs(req) x = [] x_exist = {} y = [] y_exist = {} for log_result in res.get_logs(): print log_result.get_contents() if not x_exist.has_key(log_result.get_contents()['time']): x.append(log_result.get_contents()['time']) x_exist[log_result.get_contents()['time']] = len(x) - 1 if not y_exist.has_key( log_result.get_contents()['protection_type']): if log_result.get_contents()['protection_type'] != 'null': y.append(log_result.get_contents()['protection_type']) y_exist[log_result.get_contents() ['protection_type']] = True result = {} for tmp in y: ss = [0] result[tmp] = ss * len(x) for log_result in res.get_logs(): for tmp in y: if log_result.get_contents()['protection_type'] == tmp: tt = result[tmp] tt[x_exist[log_result.get_contents() ['time']]] = log_result.get_contents()['count'] result[tmp] = tt return_result['result'] = True return_result['message'] = result return_result['x'] = x return_result['y'] = y return JsonResponse(return_result, safe=False) except Exception, e: return_result['result'] = False return_result['message'] = str(e) return_result['errCode'] = 103 return JsonResponse(return_result, safe=False)
def main(): endpoint = '' # 选择与上面步骤创建Project所属区域匹配的Endpoint accessKeyId = '' # 使用您的阿里云访问密钥AccessKeyId accessKey = '' # 使用您的阿里云访问密钥AccessKeySecret project = '' # 上面步骤创建的项目名称 logstore = '' # 上面步骤创建的日志库名称 # 重要提示:创建的logstore请配置为4个shard以便于后面测试通过 # 构建一个client client = LogClient(endpoint, accessKeyId, accessKey) # list 所有的logstore req1 = ListLogstoresRequest(project) res1 = client.list_logstores(req1) res1.log_print() topic = "" source = "" # 发送10个数据包,每个数据包有10条log for i in range(10): logitemList = [] # LogItem list for j in range(10): contents = [('index', str(i * 10 + j))] logItem = LogItem() logItem.set_time(int(time.time())) logItem.set_contents(contents) logitemList.append(logItem) req2 = PutLogsRequest(project, logstore, topic, source, logitemList) res2 = client.put_logs(req2) res2.log_print() # list所有的shard,读取上1分钟写入的数据全部读取出来 listShardRes = client.list_shards(project, logstore) for shard in listShardRes.get_shards_info(): shard_id = shard["shardID"] start_time = int(time.time() - 60) end_time = start_time + 60 res = client.get_cursor(project, logstore, shard_id, start_time) res.log_print() start_cursor = res.get_cursor() res = client.get_cursor(project, logstore, shard_id, end_time) end_cursor = res.get_cursor() while True: loggroup_count = 100 # 每次读取100个包 res = client.pull_logs(project, logstore, shard_id, start_cursor, loggroup_count, end_cursor) res.log_print() next_cursor = res.get_next_cursor() if next_cursor == start_cursor: break start_cursor = next_cursor # 重要提示: 只有打开索引功能,才可以使用以下接口来查询数据 time.sleep(60) topic = "" query = "index" From = int(time.time()) - 600 To = int(time.time()) res3 = None # 查询最近10分钟内,满足query条件的日志条数,如果执行结果不是完全正确,则进行重试 while (res3 is None) or (not res3.is_completed()): req3 = GetHistogramsRequest(project, logstore, From, To, topic, query) res3 = client.get_histograms(req3) res3.log_print() # 获取满足query的日志条数 total_log_count = res3.get_total_count() log_line = 10 # 每次读取10条日志,将日志数据查询完,对于每一次查询,如果查询结果不是完全准确,则重试3次 for offset in range(0, total_log_count, log_line): res4 = None for retry_time in range(0, 3): req4 = GetLogsRequest(project, logstore, From, To, topic, query, log_line, offset, False) res4 = client.get_logs(req4) if res4 is not None and res4.is_completed(): break time.sleep(1) if res4 is not None: res4.log_print() listShardRes = client.list_shards(project, logstore) shard = listShardRes.get_shards_info()[0] # 分裂shard if shard["status"] == "readwrite": shard_id = shard["shardID"] inclusiveBeginKey = shard["inclusiveBeginKey"] midKey = inclusiveBeginKey[:-1] + str((int(inclusiveBeginKey[-1:])) + 1) client.split_shard(project, logstore, shard_id, midKey) # 合并shard shard = listShardRes.get_shards_info()[1] if shard["status"] == "readwrite": shard_id = shard["shardID"] client.merge_shard(project, logstore, shard_id) # 删除shard shard = listShardRes.get_shards_info()[-1] if shard["status"] == "readonly": shard_id = shard["shardID"] client.delete_shard(project, logstore, shard_id) # 创建外部数据源 res = client.create_external_store( project, ExternalStoreConfig("rds_store", "cn-qingdao", "rds-vpc", "vpc-************", "i***********", "*.*.*.*", "3306", "root", "sfdsfldsfksflsdfs", "meta", "join_meta")) res.log_print() res = client.update_external_store( project, ExternalStoreConfig("rds_store", "cn-qingdao", "rds-vp", "rds-vpc", "vpc-************", "i************", "*.*.*.*", "3306", "root", "sfdsfldsfksflsdfs", "meta", "join_meta")) res.log_print() res = client.get_external_store(project, "rds_store") res.log_print() res = client.list_external_store(project, "") res.log_print() res = client.delete_external_store(project, "rds_store") res.log_print() # 使用python sdk进行查询分析 req4 = GetLogsRequest(project, logstore, From, To, topic, "* | select count(1)", 10, 0, False) res4 = client.get_logs(req4) # 使用python sdk进行join rds查询 req4 = GetLogsRequest( project, logstore, From, To, topic, "* | select count(1) from " + logstore + " l join rds_store r on l.ikey =r.ekey", 10, 0, False) res4 = client.get_logs(req4) # 使用python sdk把查询结果写入rds req4 = GetLogsRequest(project, logstore, From, To, topic, "* | insert into rds_store select count(1) ", 10, 0, False) res4 = client.get_logs(req4)
def main(): endpoint = 'cn-hangzhou.sls.aliyuncs.com' # 选择与上面步骤创建Project所属区域匹配的Endpoint accessKeyId = 'your_access_key_id' # 使用你的阿里云访问密钥AccessKeyId accessKey = 'your_access_key' # 使用你的阿里云访问密钥AccessKeySecret project = 'your_project' # 上面步骤创建的项目名称 logstore = 'your_logstore' # 上面步骤创建的日志库名称 # 构建一个client client = LogClient(endpoint, accessKeyId, accessKey) # list 所有的logstore req1 = ListLogstoresRequest(project) res1 = client.list_logstores(req1) res1.log_print() topic = "" source = "" # 发送10个数据包,每个数据包有10条log for i in range(10): logitemList = [] # LogItem list for j in range(10): contents = [('index', str(i * 10 + j))] logItem = LogItem() logItem.set_time(int(time.time())) logItem.set_contents(contents) logitemList.append(logItem) req2 = PutLogsRequest(project, logstore, topic, source, logitemList) res2 = client.put_logs(req2) res2.log_print() # list所有的shard,读取上1分钟写入的数据全部读取出来 listShardRes = client.list_shards(project, logstore) for shard in listShardRes.get_shards_info(): shard_id = shard["shardID"] start_time = int(time.time() - 60) end_time = start_time + 60 res = client.get_cursor(project, logstore, shard_id, start_time) res.log_print() start_cursor = res.get_cursor() res = client.get_cursor(project, logstore, shard_id, end_time) end_cursor = res.get_cursor() while True: loggroup_count = 100 # 每次读取100个包 res = client.pull_logs(project, logstore, shard_id, start_cursor, loggroup_count, end_cursor) res.log_print() next_cursor = res.get_next_cursor() if next_cursor == start_cursor: break start_cursor = next_cursor # 重要提示: 只有打开索引功能,才可以使用以下接口来查询数据 time.sleep(60) topic = "" query = "index" From = int(time.time()) - 600 To = int(time.time()) res3 = None # 查询最近10分钟内,满足query条件的日志条数,如果执行结果不是完全正确,则进行重试 while (res3 is None) or (not res3.is_completed()): req3 = GetHistogramsRequest(project, logstore, From, To, topic, query) res3 = client.get_histograms(req3) res3.log_print() # 获取满足query的日志条数 total_log_count = res3.get_total_count() log_line = 10 # 每次读取10条日志,将日志数据查询完,对于每一次查询,如果查询结果不是完全准确,则重试3次 for offset in range(0, total_log_count, log_line): res4 = None for retry_time in range(0, 3): req4 = GetLogsRequest(project, logstore, From, To, topic, query, log_line, offset, False) res4 = client.get_logs(req4) if res4 is not None and res4.is_completed(): break time.sleep(1) if res4 is not None: res4.log_print() listShardRes = client.list_shards(project, logstore) shard = listShardRes.get_shards_info()[0] # 分裂shard if shard["status"] == "readwrite": shard_id = shard["shardID"] inclusiveBeginKey = shard["inclusiveBeginKey"] midKey = inclusiveBeginKey[:-1] + str(( (int)(inclusiveBeginKey[-1:])) + 1) client.split_shard(project, logstore, shard_id, midKey) # 合并shard shard = listShardRes.get_shards_info()[1] if shard["status"] == "readwrite": shard_id = shard["shardID"] client.merge_shard(project, logstore, shard_id) # 删除shard shard = listShardRes.get_shards_info()[-1] if shard["status"] == "readonly": shard_id = shard["shardID"] client.delete_shard(project, logstore, shard_id)