def __init__(self, config: MigrationConfig): self._config = config _uuid = str(uuid.uuid4()) self._id = ''.join(_uuid.split('-')) self._es = Elasticsearch( hosts=self._config.hosts, timeout=60, max_retries=30, retry_on_timeout=True, verify_certs=False, ) self._log_client = LogClient( endpoint=self._config.get('endpoint'), accessKeyId=self._config.access_key_id, accessKey=self._config.access_key, ) setup_logging( self._id, self._config.get('endpoint'), self._config.get('project_name'), self._config.access_key_id, self._config.access_key, ) self._logger = logging.getLogger(__name__) self._shutdown_flag = op.join(self._config.cache_path, 'shutdown.lock') print('#migration: {}'.format(self._id))
def icgoo_visit_detail(): """定期获取ali日志的请求ip,排查直接请求接口的ip 5分钟执行一次""" endpoint = '' # 选择与上面步骤创建Project所属区域匹配的Endpoint accessKeyId = '' # 使用您的阿里云访问密钥AccessKeyId accessKey = '' # 使用您的阿里云访问密钥AccessKeySecret project = '' # 上面步骤创建的项目名称 logstore = '' # 上面步骤创建的日志库名称 client = LogClient(endpoint, accessKeyId, accessKey) topic = "" To = int(time.time()) + 100 From1 = To - 500 From2 = To - 600 sql1 = "* and request_uri: search/getdata|SELECT DISTINCT client_ip limit 0,2000" sql2 = "* and request_uri:accounts/is_login |SELECT DISTINCT client_ip limit 0,2000" res1 = GetLogsRequest(project, logstore, From1, To, topic, sql1, 2000, 0, False) last_res1 = client.get_logs(res1).get_logs() time.sleep(50) #扩大白名单搜索时间范围 避免由于请求的间隔过长造成的误封 res2 = GetLogsRequest(project, logstore, From2, To, topic, sql2, 2000, 0, False) last_res2 = client.get_logs(res2).get_logs() all_ip = [i.contents['client_ip'] for i in last_res1] white_ip = [i.contents['client_ip'] for i in last_res2] ip_ban = list(set(all_ip).difference(set(white_ip))) # mail_ipban('log',str(To),json.dumps(ip_ban)) for each_ip in ip_ban: #不允许解锁的ip ip_str=Webip(each_ip,'Anonymous') ip_obj=IPINFO.query.filter(IPINFO.ip_log==ip_str.md5_ip).first() if ip_obj: if not ip_obj.ip_white(): (ip_obj.lock_1m_times, ip_obj.lock_30m_times, ip_obj.lock_status, ip_obj.white_list_status, ip_obj.unlock_after_lockd) = sql_results['2'] db_session.commit()
def get_loghub_client(): if is_aliyun(): endpoint = loghub_endpoint_aliyun else: endpoint = loghub_endpoint access_key_id = loghub_accessKeyId access_key = loghub_accessKey return LogClient(endpoint, access_key_id, access_key)
def __init__(self, access_key, access_key_secret, region='cn-hangzhou', project='log-project-001', logstore='logstore-backup'): self._access_key = access_key self._access_key_secret = access_key_secret self._region = region self._project = project self._logstore = logstore self._endpoint = '%s.log.aliyuncs.com' % self._region self._client = LogClient(self._endpoint, self._access_key, self._access_key_secret) self._topic = 'BackupLogs'
def __init__(self, endpoint, access_id, access_key, project, logstore, max_buffer_trace = 10000, batch_size = 100, buffer_interval = 10) : self.trace_queue = Queue.Queue() self.semaphore = threading.Semaphore(0) self.batch_size = batch_size self.max_buffer_trace = max_buffer_trace self.buffer_interval = buffer_interval self.running = True self.logClient = LogClient(endpoint, access_id, access_key, "") self.project = project self.logstore = logstore self.hostname = socket.gethostname() self.ip = socket.gethostbyname(self.hostname) self.last_send_time = time.time() self.send_thread = threading.Thread(target = self.sed_trace_thread) self.send_thread.setDaemon(True) self.send_thread.start()
def handler(event, context): evt = json.loads(event) #input your app code uid = evt['queryParameters']['uid'] ip = evt['queryParameters']['ip'] device = evt['queryParameters']['device'] # print("test-api-gateway output: The uid is %s, the ip address is %s and the device type is %s. " % (uid, ip, device)) endpoint = 'cn-XXXX.log.aliyuncs.com' # 选择与上面步骤创建Project所属区域匹配的Endpoint accessKeyId = 'XXXX' # 使用您的阿里云访问密钥AccessKeyId accessKey = 'XXXX' # 使用您的阿里云访问密钥AccessKeySecret project = 'eric-nginx-logstore' # 上面步骤创建的项目名称 logstore = 'eric-nginx-access-log-store' # 上面步骤创建的日志库名称 # 构建一个client client = LogClient(endpoint, accessKeyId, accessKey) topic = "" source = "" # 向logstore写入数据 logitemList = [] # LogItem list contents = [('ip',ip), ('uid',uid), ('device',device)] print("FunctionCompute --> LogService test output: " + ip + " - " + uid + " - " + device) logItem = LogItem() logItem.set_time(int(time.time())) logItem.set_contents(contents) logitemList.append(logItem) req2 = PutLogsRequest(project, logstore, topic, source, logitemList) res2 = client.put_logs(req2) res2.log_print() response_content = "you can return any string" api_rep = { "isBase64Encoded":"false", "statusCode":"200", "headers":{"x-custom-header":"your header"}, "body":response_content } return json.dumps(api_rep)
def handler(event, context): evt = json.loads(event) print( "LogService trigger and send data to FunctionCompute test output, The content of event is : %s" % (evt)) # AK/SK accessKeyId = '**********' accessKey = '**********' # 从event中获取数据 endpoint = evt['source']['endpoint'] project = evt['source']['projectName'] logstore = evt['source']['logstoreName'] shard_id = evt['source']['shardId'] start_cursor = evt['source']['beginCursor'] end_cursor = evt['source']['endCursor'] # 构建一个client client = LogClient(endpoint, accessKeyId, accessKey) # 计算sharedID中beginCursor和endCursor之间有多少条消息 counter = 0 while True: loggroup_count = 100 # 每次读取100个包 res = client.pull_logs(project, logstore, shard_id, start_cursor, loggroup_count, end_cursor) #res.log_print() counter = counter + res.get_log_count() next_cursor = res.get_next_cursor() if next_cursor == start_cursor: break start_cursor = next_cursor print("LogService trigger and send data to FunctionCompute test output:\ The shard_id is: %d, the start_cursor is: %s, the end_cursor is: %s, the count of log events is : %d" \ % (shard_id, start_cursor, end_cursor, counter)) return counter
def main(): # 日志 Project 所属区域匹配的 Endpoint endpoint = 'cn-beijing-intranet.log.aliyuncs.com' # 访问阿里云密钥 AccessKeyId accessKeyId = '**********' # 访问阿里云密钥 AccessKeySecret accessKey = '****************************' # 日志的 project 项目名称 project = 'java-applications' # 日志库名称 logstore = 'mop_log' # 构建一个 client client = LogClient(endpoint, accessKeyId, accessKey) # 列出所有的 logstore req1 = ListLogstoresRequest(project) res1 = client.list_logstores(req1) res1.log_print() topic = "mop" query = "/ask/article/list or /ask/article/get" From = int(time.time()) - 600 To = int(time.time()) res3 = None # 查询最近10分钟内,满足query条件的日志条数,如果执行结果不是完全正确,则进行重试 while (res3 is None) or (not res3.is_completed()): req3 = GetHistogramsRequest(project, logstore, From, To, topic, query) res3 = client.get_histograms(req3) res3.log_print() print(type(res3.log_print())) '''
res = client.list_logstore(project, logstore) res.log_print() res = client.delete_logstore(project, logstore) res.log_print() if __name__ == '__main__': endpoint = '' accessKeyId = '' accessKey = '' project = '' logstore = '' token = None client = LogClient(endpoint, accessKeyId, accessKey, token) sample_logstore(client, project, logstore) res = client.create_logstore(project, logstore, 1, 1) time.sleep(0.1) try: sample_list_logstores(client, project) sample_logtail_config(client, project, logstore) sample_machine_group(client, project) sample_apply_config(client, project) sample_index(client, project, logstore) sample_list_topics(client, project, logstore) sample_put_logs(client, project, logstore) sample_pull_logs(client, project, logstore)
def __init__(self, name): self.topic = name self.client = LogClient(endpoint, keyid, key) req1 = ListLogstoresRequest(project) res = client.list_logstores(req1) res.log_print()
def main(): endpoint = '' # 选择与上面步骤创建Project所属区域匹配的Endpoint accessKeyId = '' # 使用您的阿里云访问密钥AccessKeyId accessKey = '' # 使用您的阿里云访问密钥AccessKeySecret project = '' # 上面步骤创建的项目名称 logstore = '' # 上面步骤创建的日志库名称 # 重要提示:创建的logstore请配置为4个shard以便于后面测试通过 # 构建一个client client = LogClient(endpoint, accessKeyId, accessKey) # list 所有的logstore req1 = ListLogstoresRequest(project) res1 = client.list_logstores(req1) res1.log_print() topic = "" source = "" # 发送10个数据包,每个数据包有10条log for i in range(10): logitemList = [] # LogItem list for j in range(10): contents = [('index', str(i * 10 + j))] logItem = LogItem() logItem.set_time(int(time.time())) logItem.set_contents(contents) logitemList.append(logItem) req2 = PutLogsRequest(project, logstore, topic, source, logitemList) res2 = client.put_logs(req2) res2.log_print() # list所有的shard,读取上1分钟写入的数据全部读取出来 listShardRes = client.list_shards(project, logstore) for shard in listShardRes.get_shards_info(): shard_id = shard["shardID"] start_time = int(time.time() - 60) end_time = start_time + 60 res = client.get_cursor(project, logstore, shard_id, start_time) res.log_print() start_cursor = res.get_cursor() res = client.get_cursor(project, logstore, shard_id, end_time) end_cursor = res.get_cursor() while True: loggroup_count = 100 # 每次读取100个包 res = client.pull_logs(project, logstore, shard_id, start_cursor, loggroup_count, end_cursor) res.log_print() next_cursor = res.get_next_cursor() if next_cursor == start_cursor: break start_cursor = next_cursor # 重要提示: 只有打开索引功能,才可以使用以下接口来查询数据 time.sleep(60) topic = "" query = "index" From = int(time.time()) - 600 To = int(time.time()) res3 = None # 查询最近10分钟内,满足query条件的日志条数,如果执行结果不是完全正确,则进行重试 while (res3 is None) or (not res3.is_completed()): req3 = GetHistogramsRequest(project, logstore, From, To, topic, query) res3 = client.get_histograms(req3) res3.log_print() # 获取满足query的日志条数 total_log_count = res3.get_total_count() log_line = 10 # 每次读取10条日志,将日志数据查询完,对于每一次查询,如果查询结果不是完全准确,则重试3次 for offset in range(0, total_log_count, log_line): res4 = None for retry_time in range(0, 3): req4 = GetLogsRequest(project, logstore, From, To, topic, query, log_line, offset, False) res4 = client.get_logs(req4) if res4 is not None and res4.is_completed(): break time.sleep(1) if res4 is not None: res4.log_print() listShardRes = client.list_shards(project, logstore) shard = listShardRes.get_shards_info()[0] # 分裂shard if shard["status"] == "readwrite": shard_id = shard["shardID"] inclusiveBeginKey = shard["inclusiveBeginKey"] midKey = inclusiveBeginKey[:-1] + str((int(inclusiveBeginKey[-1:])) + 1) client.split_shard(project, logstore, shard_id, midKey) # 合并shard shard = listShardRes.get_shards_info()[1] if shard["status"] == "readwrite": shard_id = shard["shardID"] client.merge_shard(project, logstore, shard_id) # 删除shard shard = listShardRes.get_shards_info()[-1] if shard["status"] == "readonly": shard_id = shard["shardID"] client.delete_shard(project, logstore, shard_id) # 创建外部数据源 res = client.create_external_store( project, ExternalStoreConfig("rds_store", "cn-qingdao", "rds-vpc", "vpc-************", "i***********", "*.*.*.*", "3306", "root", "sfdsfldsfksflsdfs", "meta", "join_meta")) res.log_print() res = client.update_external_store( project, ExternalStoreConfig("rds_store", "cn-qingdao", "rds-vp", "rds-vpc", "vpc-************", "i************", "*.*.*.*", "3306", "root", "sfdsfldsfksflsdfs", "meta", "join_meta")) res.log_print() res = client.get_external_store(project, "rds_store") res.log_print() res = client.list_external_store(project, "") res.log_print() res = client.delete_external_store(project, "rds_store") res.log_print() # 使用python sdk进行查询分析 req4 = GetLogsRequest(project, logstore, From, To, topic, "* | select count(1)", 10, 0, False) res4 = client.get_logs(req4) # 使用python sdk进行join rds查询 req4 = GetLogsRequest( project, logstore, From, To, topic, "* | select count(1) from " + logstore + " l join rds_store r on l.ikey =r.ekey", 10, 0, False) res4 = client.get_logs(req4) # 使用python sdk把查询结果写入rds req4 = GetLogsRequest(project, logstore, From, To, topic, "* | insert into rds_store select count(1) ", 10, 0, False) res4 = client.get_logs(req4)
from aliyun.log.logclient import LogClient sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + "/..") from common import Common from log import Log #Load config values accessKeyId, accessKeySecret, logEndPoint, logProject, logStore, ossEndPoint, ossBucketName, securityToken = Common.LoadConfig( ) #Initialise OSS authentication auth = oss2.Auth(accessKeyId, accessKeySecret) # Initialise Log Service authentication client client = LogClient(logProject + '.' + logEndPoint, accessKeyId, accessKeySecret) #Instantiate OSS bucket bucket = oss2.Bucket(auth, ossEndPoint, ossBucketName) @app.route('/') @app.route('/index') def index(): return render_template('base.html') @app.route('/upload', methods=['GET', 'POST']) def upload_file(): print(accessKeyId) print(accessKeySecret)
environment = sys.argv[4] print("Update the Logtail configuration on the log project (environment = " + environment + ", region = " + regionId + ")") endpoint = regionId + ".log.aliyuncs.com" logProjectName = "sample-app-log-project-" + environment logStoreName = "sample-app-log-store-" + environment logtailConfigName = "sample-app-logtail-config-" + environment logMachineGroupName = "sample-app-log-machine-group-" + environment # Load the existing Logtail configuration print("Loading existing Logtail configuration (endpoint = " + endpoint + ", logProjectName = " + logProjectName + ", logtailConfigName = " + logtailConfigName + ")...") client = LogClient(endpoint, accessKeyId, accessKeySecret) existingConfig = None try: response = client.get_logtail_config(logProjectName, logtailConfigName) existingConfig = response.logtail_config print("Existing logtail configuration found: ", existingConfig.to_json()) except LogException: print("No existing logtail configuration found.") # Create or update the logtail configuration configDetail = SyslogConfigDetail(logstoreName=logStoreName, configName=logtailConfigName, tag="sys_tag") if existingConfig is None: print("Create the logtail configuration:", configDetail.to_json()) client.create_logtail_config(logProjectName, configDetail)
def main(): endpoint = 'cn-hangzhou.sls.aliyuncs.com' # 选择与上面步骤创建Project所属区域匹配的Endpoint accessKeyId = 'your_access_key_id' # 使用你的阿里云访问密钥AccessKeyId accessKey = 'your_access_key' # 使用你的阿里云访问密钥AccessKeySecret project = 'your_project' # 上面步骤创建的项目名称 logstore = 'your_logstore' # 上面步骤创建的日志库名称 # 构建一个client client = LogClient(endpoint, accessKeyId, accessKey) # list 所有的logstore req1 = ListLogstoresRequest(project) res1 = client.list_logstores(req1) res1.log_print() topic = "" source = "" # 发送10个数据包,每个数据包有10条log for i in range(10): logitemList = [] # LogItem list for j in range(10): contents = [('index', str(i * 10 + j))] logItem = LogItem() logItem.set_time(int(time.time())) logItem.set_contents(contents) logitemList.append(logItem) req2 = PutLogsRequest(project, logstore, topic, source, logitemList) res2 = client.put_logs(req2) res2.log_print() # list所有的shard,读取上1分钟写入的数据全部读取出来 listShardRes = client.list_shards(project, logstore) for shard in listShardRes.get_shards_info(): shard_id = shard["shardID"] start_time = int(time.time() - 60) end_time = start_time + 60 res = client.get_cursor(project, logstore, shard_id, start_time) res.log_print() start_cursor = res.get_cursor() res = client.get_cursor(project, logstore, shard_id, end_time) end_cursor = res.get_cursor() while True: loggroup_count = 100 # 每次读取100个包 res = client.pull_logs(project, logstore, shard_id, start_cursor, loggroup_count, end_cursor) res.log_print() next_cursor = res.get_next_cursor() if next_cursor == start_cursor: break start_cursor = next_cursor # 重要提示: 只有打开索引功能,才可以使用以下接口来查询数据 time.sleep(60) topic = "" query = "index" From = int(time.time()) - 600 To = int(time.time()) res3 = None # 查询最近10分钟内,满足query条件的日志条数,如果执行结果不是完全正确,则进行重试 while (res3 is None) or (not res3.is_completed()): req3 = GetHistogramsRequest(project, logstore, From, To, topic, query) res3 = client.get_histograms(req3) res3.log_print() # 获取满足query的日志条数 total_log_count = res3.get_total_count() log_line = 10 # 每次读取10条日志,将日志数据查询完,对于每一次查询,如果查询结果不是完全准确,则重试3次 for offset in range(0, total_log_count, log_line): res4 = None for retry_time in range(0, 3): req4 = GetLogsRequest(project, logstore, From, To, topic, query, log_line, offset, False) res4 = client.get_logs(req4) if res4 is not None and res4.is_completed(): break time.sleep(1) if res4 is not None: res4.log_print() listShardRes = client.list_shards(project, logstore) shard = listShardRes.get_shards_info()[0] # 分裂shard if shard["status"] == "readwrite": shard_id = shard["shardID"] inclusiveBeginKey = shard["inclusiveBeginKey"] midKey = inclusiveBeginKey[:-1] + str(( (int)(inclusiveBeginKey[-1:])) + 1) client.split_shard(project, logstore, shard_id, midKey) # 合并shard shard = listShardRes.get_shards_info()[1] if shard["status"] == "readwrite": shard_id = shard["shardID"] client.merge_shard(project, logstore, shard_id) # 删除shard shard = listShardRes.get_shards_info()[-1] if shard["status"] == "readonly": shard_id = shard["shardID"] client.delete_shard(project, logstore, shard_id)