def __init__(self, config: MigrationConfig):
     self._config = config
     _uuid = str(uuid.uuid4())
     self._id = ''.join(_uuid.split('-'))
     self._es = Elasticsearch(
         hosts=self._config.hosts,
         timeout=60,
         max_retries=30,
         retry_on_timeout=True,
         verify_certs=False,
     )
     self._log_client = LogClient(
         endpoint=self._config.get('endpoint'),
         accessKeyId=self._config.access_key_id,
         accessKey=self._config.access_key,
     )
     setup_logging(
         self._id,
         self._config.get('endpoint'),
         self._config.get('project_name'),
         self._config.access_key_id,
         self._config.access_key,
     )
     self._logger = logging.getLogger(__name__)
     self._shutdown_flag = op.join(self._config.cache_path, 'shutdown.lock')
     print('#migration: {}'.format(self._id))
Exemple #2
0
def icgoo_visit_detail():
    """定期获取ali日志的请求ip,排查直接请求接口的ip 5分钟执行一次"""
    endpoint = ''  # 选择与上面步骤创建Project所属区域匹配的Endpoint
    accessKeyId = ''  # 使用您的阿里云访问密钥AccessKeyId
    accessKey = ''  # 使用您的阿里云访问密钥AccessKeySecret
    project = ''  # 上面步骤创建的项目名称
    logstore = ''  # 上面步骤创建的日志库名称
    client = LogClient(endpoint, accessKeyId, accessKey)
    topic = ""
    To = int(time.time()) + 100
    From1 = To - 500
    From2 = To - 600
    sql1 = "* and request_uri: search/getdata|SELECT   DISTINCT  client_ip limit 0,2000"
    sql2 = "* and request_uri:accounts/is_login |SELECT   DISTINCT  client_ip  limit 0,2000"
    res1 = GetLogsRequest(project, logstore, From1, To, topic, sql1, 2000, 0, False)
    last_res1 = client.get_logs(res1).get_logs()
    time.sleep(50) #扩大白名单搜索时间范围 避免由于请求的间隔过长造成的误封
    res2 = GetLogsRequest(project, logstore, From2, To, topic, sql2, 2000, 0, False)
    last_res2 = client.get_logs(res2).get_logs()
    all_ip = [i.contents['client_ip'] for i in last_res1]
    white_ip = [i.contents['client_ip'] for i in last_res2]
    ip_ban = list(set(all_ip).difference(set(white_ip)))
    # mail_ipban('log',str(To),json.dumps(ip_ban))
    for each_ip in ip_ban:
        #不允许解锁的ip
        ip_str=Webip(each_ip,'Anonymous')
        ip_obj=IPINFO.query.filter(IPINFO.ip_log==ip_str.md5_ip).first()
        if ip_obj:
            if not ip_obj.ip_white():
                (ip_obj.lock_1m_times, ip_obj.lock_30m_times, ip_obj.lock_status,
                 ip_obj.white_list_status, ip_obj.unlock_after_lockd) = sql_results['2']
    db_session.commit()
Exemple #3
0
    def __init__(self, access_key, access_key_secret, region='cn-hangzhou',
                 project='log-project-001', logstore='logstore-backup'):
        self._access_key = access_key
        self._access_key_secret = access_key_secret
        self._region = region
        self._project = project
        self._logstore = logstore
        self._endpoint = '%s.log.aliyuncs.com' % self._region
        self._client = LogClient(self._endpoint, self._access_key, self._access_key_secret)

        self._topic = 'BackupLogs'
 def __init__(self, endpoint, access_id, access_key, project, logstore, max_buffer_trace = 10000, batch_size = 100, buffer_interval = 10) : 
     self.trace_queue = Queue.Queue()
     self.semaphore = threading.Semaphore(0)
     self.batch_size = batch_size
     self.max_buffer_trace = max_buffer_trace
     self.buffer_interval = buffer_interval
     self.running = True
     self.logClient = LogClient(endpoint, access_id, access_key, "")
     self.project = project
     self.logstore = logstore
     self.hostname = socket.gethostname()  
     self.ip = socket.gethostbyname(self.hostname)
     self.last_send_time = time.time()
     self.send_thread = threading.Thread(target = self.sed_trace_thread)
     self.send_thread.setDaemon(True)
     self.send_thread.start()
Exemple #5
0
def get_loghub_client():
    if is_aliyun():
        endpoint = loghub_endpoint_aliyun
    else:
        endpoint = loghub_endpoint
    access_key_id = loghub_accessKeyId
    access_key = loghub_accessKey
    return LogClient(endpoint, access_key_id, access_key)
def handler(event, context):
  evt = json.loads(event)

  #input your app code
  uid = evt['queryParameters']['uid']
  ip = evt['queryParameters']['ip']
  device = evt['queryParameters']['device']
  
  # print("test-api-gateway output: The uid is %s, the ip address is %s and the device type is %s. " % (uid, ip, device))
  
  endpoint = 'cn-XXXX.log.aliyuncs.com'       # 选择与上面步骤创建Project所属区域匹配的Endpoint
  accessKeyId = 'XXXX'    # 使用您的阿里云访问密钥AccessKeyId
  accessKey = 'XXXX'      # 使用您的阿里云访问密钥AccessKeySecret
  project = 'eric-nginx-logstore'        # 上面步骤创建的项目名称
  logstore = 'eric-nginx-access-log-store'       # 上面步骤创建的日志库名称
  
  # 构建一个client
  client = LogClient(endpoint, accessKeyId, accessKey)
  topic = ""
  source = ""
  
  # 向logstore写入数据
  logitemList = []  # LogItem list
  contents = [('ip',ip), ('uid',uid), ('device',device)]
  print("FunctionCompute --> LogService test output: " + ip + " - " + uid + " - " + device)
  logItem = LogItem()
  logItem.set_time(int(time.time()))
  logItem.set_contents(contents)
  logitemList.append(logItem)
  req2 = PutLogsRequest(project, logstore, topic, source, logitemList)
  res2 = client.put_logs(req2)
  res2.log_print()


  response_content = "you can return any string"
  api_rep = {
    "isBase64Encoded":"false",
    "statusCode":"200",
    "headers":{"x-custom-header":"your header"},
    "body":response_content
  }
  return json.dumps(api_rep)
Exemple #7
0
    def __init__(self):
        ali_sls_config = getattr(settings, 'ALI_SLS', None)
        if ali_sls_config:
            self.endpoint    = ali_sls_config['endpoint']
            self.accessKeyId = ali_sls_config['accessKeyId']
            self.accessKey   = ali_sls_config['accessKey']
            self.project     = ali_sls_config['project']
            self.logstore    = ali_sls_config['logstore']
        else:
            raise ValueError('ALI_SLS setting is not a exist')

        self.client = LogClient(self.endpoint, self.accessKeyId, self.accessKey)
Exemple #8
0
def handler(event, context):
    evt = json.loads(event)
    print(
        "LogService trigger and send data to FunctionCompute test output, The content of event is : %s"
        % (evt))

    # AK/SK
    accessKeyId = '**********'
    accessKey = '**********'

    # 从event中获取数据
    endpoint = evt['source']['endpoint']
    project = evt['source']['projectName']
    logstore = evt['source']['logstoreName']
    shard_id = evt['source']['shardId']
    start_cursor = evt['source']['beginCursor']
    end_cursor = evt['source']['endCursor']

    # 构建一个client
    client = LogClient(endpoint, accessKeyId, accessKey)

    # 计算sharedID中beginCursor和endCursor之间有多少条消息
    counter = 0
    while True:
        loggroup_count = 100  # 每次读取100个包
        res = client.pull_logs(project, logstore, shard_id, start_cursor,
                               loggroup_count, end_cursor)
        #res.log_print()
        counter = counter + res.get_log_count()
        next_cursor = res.get_next_cursor()
        if next_cursor == start_cursor:
            break
        start_cursor = next_cursor

    print("LogService trigger and send data to FunctionCompute test output:\
	 The shard_id is: %d, the start_cursor is: %s, the end_cursor is: %s, the count of log events is : %d"    \
     % (shard_id, start_cursor, end_cursor, counter))

    return counter
Exemple #9
0
def main():

    # 日志 Project 所属区域匹配的 Endpoint
    endpoint = 'cn-beijing-intranet.log.aliyuncs.com'

    # 访问阿里云密钥 AccessKeyId
    accessKeyId = '**********'

    # 访问阿里云密钥 AccessKeySecret
    accessKey = '****************************'

    # 日志的 project 项目名称
    project = 'java-applications'

    # 日志库名称
    logstore = 'mop_log'

    # 构建一个 client
    client = LogClient(endpoint, accessKeyId, accessKey)

    # 列出所有的 logstore
    req1 = ListLogstoresRequest(project)
    res1 = client.list_logstores(req1)
    res1.log_print()
    topic = "mop"
    query = "/ask/article/list or /ask/article/get"
    From = int(time.time()) - 600
    To = int(time.time())
    res3 = None

    # 查询最近10分钟内,满足query条件的日志条数,如果执行结果不是完全正确,则进行重试
    while (res3 is None) or (not res3.is_completed()):
        req3 = GetHistogramsRequest(project, logstore, From, To, topic, query)
        res3 = client.get_histograms(req3)
    res3.log_print()
    print(type(res3.log_print()))

    '''
Exemple #10
0
class AliYunLogService(object):
    def __init__(self,
                 access_key,
                 access_key_secret,
                 region='cn-hangzhou',
                 project='log-project-001',
                 logstore='logstore-backup'):
        self._access_key = access_key
        self._access_key_secret = access_key_secret
        self._region = region
        self._project = project
        self._logstore = logstore
        self._endpoint = '%s.log.aliyuncs.com' % self._region
        self._client = LogClient(self._endpoint, self._access_key,
                                 self._access_key_secret)

        self._topic = 'BackupLogs'
        self._pylog = logging.getLogger(__name__)

    def log(self, contents, topic, source):
        level = int(getattr(logging, topic))
        self._pylog.log(level=level, msg=contents)

        return

        contents = [
            ('msg', contents),
        ]
        logitemList = []
        logItem = LogItem()
        logItem.set_time(int(time.time()))
        logItem.set_contents(contents)
        logitemList.append(logItem)
        req = PutLogsRequest(self._project, self._logstore, topic, source,
                             logitemList)
        resp = self._client.put_logs(req)
        return resp

    def debug(self, contents, source='BackupLogs'):
        self.log(contents, topic='DEBUG', source=source)

    def info(self, contents, source='BackupLogs'):
        self.log(contents, topic='INFO', source=source)

    def warn(self, contents, source='BackupLogs'):
        self.log(contents, topic='WARN', source=source)

    def error(self, contents, source='BackupLogs'):
        self.log(contents, topic='ERROR', source=source)
Exemple #11
0
class AliYunLogService(object):

    def __init__(self, access_key, access_key_secret, region='cn-hangzhou',
                 project='log-project-001', logstore='logstore-backup'):
        self._access_key = access_key
        self._access_key_secret = access_key_secret
        self._region = region
        self._project = project
        self._logstore = logstore
        self._endpoint = '%s.log.aliyuncs.com' % self._region
        self._client = LogClient(self._endpoint, self._access_key, self._access_key_secret)

        self._topic = 'BackupLogs'

    def log(self, topic, source, contents):
        logitemList = []
        logItem = LogItem()
        logItem.set_time(int(time.time()))
        logItem.set_contents(contents)
        logitemList.append(logItem)
        req = PutLogsRequest(self._project, self._logstore, topic, source, logitemList)
        resp = self._client.put_logs(req)
        return resp
class AliLogReporter : 
    def __init__(self, endpoint, access_id, access_key, project, logstore, max_buffer_trace = 10000, batch_size = 100, buffer_interval = 10) : 
        self.trace_queue = Queue.Queue()
        self.semaphore = threading.Semaphore(0)
        self.batch_size = batch_size
        self.max_buffer_trace = max_buffer_trace
        self.buffer_interval = buffer_interval
        self.running = True
        self.logClient = LogClient(endpoint, access_id, access_key, "")
        self.project = project
        self.logstore = logstore
        self.hostname = socket.gethostname()  
        self.ip = socket.gethostbyname(self.hostname)
        self.last_send_time = time.time()
        self.send_thread = threading.Thread(target = self.sed_trace_thread)
        self.send_thread.setDaemon(True)
        self.send_thread.start()


    def sed_trace_thread(self) : 
        while self.running : 
            if self.semaphore.acquire() :
                self.send_trace(False)
        self.send_trace(True)

    def send_trace(self, send_all = False) : 
        while self.trace_queue.empty() == False and (send_all or self.trace_queue.qsize() > self.batch_size or time.time() - self.last_send_time > self.buffer_interval) : 
            logitemList = []
            while self.trace_queue.empty() == False and len(logitemList) < self.batch_size : 
                logitemList.append(self.trace_queue.get())
            try : 
                request = PutLogsRequest(self.project, self.logstore, "", "", logitemList)
                self.logClient.put_logs(request)
            except LogException as e : 
                print "Send Failed:" + e.__str__()
            self.last_send_time = time.time()
        

    def report_span(self, span) : 
        if self.trace_queue.qsize() > self.max_buffer_trace : 
            print "discard trace as queue full, trace_id:" + str(span.context.trace_id) +"\tspan_id:" + str(span.context.span_id)  \
                + "\tparent_id:" + str(span.context.parent_id) + "\tservice_name:" + span.tracer.service_name  \
                + "\tOperation_name:" + span.operation_name + "\tstart_time:" + str(span.start_time) + "\tend_time:" + str(span.end_time)  \
                + "\ttags:" + str(span.tags) + "\tlogs:" + str(span.logs)

        logItem = LogItem()
        logItem.set_time(int(time.time()))
        logItem.push_back("TraceID", str(span.context.trace_id))
        logItem.push_back("SpanID", str(span.context.span_id))
        logItem.push_back("ParentSpanID", str(span.context.parent_id))
        logItem.push_back("ServiceName", span.tracer.service_name)
        logItem.push_back("OperationName", span.operation_name)
        start_time = (long)(span.start_time * 1000 * 1000 * 1000)
        end_time = (long)(span.end_time * 1000 * 1000 * 1000)
        logItem.push_back("StartTime", str(start_time))
        logItem.push_back("Duration", str(end_time - start_time))
        logItem.push_back("process.hostname", self.hostname)
        logItem.push_back("process.ips", self.ip)

        
        tag_map = dict()
        for tag in span.tags: 
            tag_map["tag." + str(tag.key)] = str(tag.value)

        for key,value in tag_map.items() : 
            logItem.push_back(key, value)

        log_list = []
        for log in span.logs : 
            log_list.append(str(log.value))
        if len(log_list) > 0 : 
            logItem.push_back("logs", str(log_list))

        self.trace_queue.put(logItem)

        if self.trace_queue.qsize() > self.max_buffer_trace or time.time() - self.last_send_time > self.buffer_interval : 
            self.semaphore.release()

    def flush(self) : 
        if self.trace_queue.empty() == False : 
            self.send_trace(True)
        

    def close(self) :  # when the reporter is closed ,it should never be used again 
        self.running = False
        self.semaphore.release()
        self.send_thread.join(10)  
        if self.trace_queue.empty() == False : 
            print "Trace exit while there are still " + str(self.trace_queue.qsize()) + " traces not send"
Exemple #13
0
 def __init__(self, endpoint, accessKeyId, accessKey, securityToken=None):
     LogClient.__init__(self, endpoint, accessKeyId, accessKey,
                        securityToken)
Exemple #14
0
    res = client.list_logstore(project, logstore)
    res.log_print()

    res = client.delete_logstore(project, logstore)
    res.log_print()


if __name__ == '__main__':
    endpoint = ''
    accessKeyId = ''
    accessKey = ''
    project = ''
    logstore = ''
    token = None

    client = LogClient(endpoint, accessKeyId, accessKey, token)

    sample_logstore(client, project, logstore)

    res = client.create_logstore(project, logstore, 1, 1)
    time.sleep(0.1)

    try:
        sample_list_logstores(client, project)
        sample_logtail_config(client, project, logstore)
        sample_machine_group(client, project)
        sample_apply_config(client, project)
        sample_index(client, project, logstore)
        sample_list_topics(client, project, logstore)
        sample_put_logs(client, project, logstore)
        sample_pull_logs(client, project, logstore)
Exemple #15
0
def main():
    endpoint = 'cn-hangzhou.sls.aliyuncs.com'  # 选择与上面步骤创建Project所属区域匹配的Endpoint
    accessKeyId = 'your_access_key_id'  # 使用你的阿里云访问密钥AccessKeyId
    accessKey = 'your_access_key'  # 使用你的阿里云访问密钥AccessKeySecret
    project = 'your_project'  # 上面步骤创建的项目名称
    logstore = 'your_logstore'  # 上面步骤创建的日志库名称
    # 构建一个client
    client = LogClient(endpoint, accessKeyId, accessKey)
    # list 所有的logstore
    req1 = ListLogstoresRequest(project)
    res1 = client.list_logstores(req1)
    res1.log_print()
    topic = ""
    source = ""
    # 发送10个数据包,每个数据包有10条log
    for i in range(10):
        logitemList = []  # LogItem list
        for j in range(10):
            contents = [('index', str(i * 10 + j))]
            logItem = LogItem()
            logItem.set_time(int(time.time()))
            logItem.set_contents(contents)
            logitemList.append(logItem)
        req2 = PutLogsRequest(project, logstore, topic, source, logitemList)
        res2 = client.put_logs(req2)
        res2.log_print()
    # list所有的shard,读取上1分钟写入的数据全部读取出来
    listShardRes = client.list_shards(project, logstore)
    for shard in listShardRes.get_shards_info():
        shard_id = shard["shardID"]
        start_time = int(time.time() - 60)
        end_time = start_time + 60
        res = client.get_cursor(project, logstore, shard_id, start_time)
        res.log_print()
        start_cursor = res.get_cursor()
        res = client.get_cursor(project, logstore, shard_id, end_time)
        end_cursor = res.get_cursor()
        while True:
            loggroup_count = 100  # 每次读取100个包
            res = client.pull_logs(project, logstore, shard_id, start_cursor,
                                   loggroup_count, end_cursor)
            res.log_print()
            next_cursor = res.get_next_cursor()
            if next_cursor == start_cursor:
                break
            start_cursor = next_cursor
    # 重要提示: 只有打开索引功能,才可以使用以下接口来查询数据
    time.sleep(60)
    topic = ""
    query = "index"
    From = int(time.time()) - 600
    To = int(time.time())
    res3 = None
    # 查询最近10分钟内,满足query条件的日志条数,如果执行结果不是完全正确,则进行重试
    while (res3 is None) or (not res3.is_completed()):
        req3 = GetHistogramsRequest(project, logstore, From, To, topic, query)
        res3 = client.get_histograms(req3)
    res3.log_print()
    # 获取满足query的日志条数
    total_log_count = res3.get_total_count()
    log_line = 10
    # 每次读取10条日志,将日志数据查询完,对于每一次查询,如果查询结果不是完全准确,则重试3次
    for offset in range(0, total_log_count, log_line):
        res4 = None
        for retry_time in range(0, 3):
            req4 = GetLogsRequest(project, logstore, From, To, topic, query,
                                  log_line, offset, False)
            res4 = client.get_logs(req4)
            if res4 is not None and res4.is_completed():
                break
            time.sleep(1)
        if res4 is not None:
            res4.log_print()
    listShardRes = client.list_shards(project, logstore)
    shard = listShardRes.get_shards_info()[0]
    # 分裂shard
    if shard["status"] == "readwrite":
        shard_id = shard["shardID"]
        inclusiveBeginKey = shard["inclusiveBeginKey"]
        midKey = inclusiveBeginKey[:-1] + str((
            (int)(inclusiveBeginKey[-1:])) + 1)
        client.split_shard(project, logstore, shard_id, midKey)
    # 合并shard
    shard = listShardRes.get_shards_info()[1]
    if shard["status"] == "readwrite":
        shard_id = shard["shardID"]
        client.merge_shard(project, logstore, shard_id)
    # 删除shard
    shard = listShardRes.get_shards_info()[-1]
    if shard["status"] == "readonly":
        shard_id = shard["shardID"]
        client.delete_shard(project, logstore, shard_id)
class MigrationManager(object):
    def __init__(self, config: MigrationConfig):
        self._config = config
        _uuid = str(uuid.uuid4())
        self._id = ''.join(_uuid.split('-'))
        self._es = Elasticsearch(
            hosts=self._config.hosts,
            timeout=60,
            max_retries=30,
            retry_on_timeout=True,
            verify_certs=False,
        )
        self._log_client = LogClient(
            endpoint=self._config.get('endpoint'),
            accessKeyId=self._config.access_key_id,
            accessKey=self._config.access_key,
        )
        setup_logging(
            self._id,
            self._config.get('endpoint'),
            self._config.get('project_name'),
            self._config.access_key_id,
            self._config.access_key,
        )
        self._logger = logging.getLogger(__name__)
        self._shutdown_flag = op.join(self._config.cache_path, 'shutdown.lock')
        print('#migration: {}'.format(self._id))

    def migrate(self):
        self._logger.info('Migration starts')
        tasks = self._discover_tasks()
        task_cnt = len(tasks)
        pool_size = max(1, min(self._config.get('pool_size'), task_cnt))
        print('#pool_size: {}'.format(pool_size))
        print('#tasks: {}'.format(task_cnt))

        self._prepare()
        futures = []
        state = {
            'total': task_cnt,
            Checkpoint.finished: 0,
            Checkpoint.dropped: 0,
            Checkpoint.failed: 0,
        }
        with ProcessPoolExecutor(max_workers=pool_size) as pool:
            for task in tasks:
                futures.append(
                    pool.submit(
                        _migration_worker,
                        self._config,
                        task,
                        self._shutdown_flag,
                    ))
            try:
                for future in as_completed(futures):
                    res = future.result()
                    if res in state:
                        state[res] += 1
                    self._logger.info('State', extra=state)
                    print('>> state:', json.dumps(state))
            except BaseException:
                self._logger.error(
                    'Exception',
                    extra={'traceback': traceback.format_exc()},
                )
                for future in futures:
                    if not future.done():
                        future.cancel()
                list(as_completed(futures, timeout=10))

        if state[Checkpoint.finished] + state[Checkpoint.dropped] >= task_cnt:
            self._logger.info('All migration tasks finished')
        self._logger.info('Migration exits')
        print('exit:', json.dumps(state))
        return state

    def _prepare(self):
        if op.exists(self._shutdown_flag):
            os.unlink(self._shutdown_flag)

        def _handle_term_sig(signum, frame):
            # Raise Ctrl+C
            with open(self._shutdown_flag, 'w') as f:
                f.write('')
            raise KeyboardInterrupt()

        signal.signal(signal.SIGINT, _handle_term_sig)
        signal.signal(signal.SIGTERM, _handle_term_sig)

    def _discover_tasks(self):
        indexes = self._config.get('indexes')
        data = self._es.search_shards(indexes)
        tasks = []
        for shard in data['shards']:
            for item in shard:
                # Ignore internal index
                if not indexes and item['index'].startswith('.'):
                    continue
                if item['state'] == 'STARTED' and item['primary']:
                    tasks.append(
                        {
                            'es_index': item['index'],
                            'es_shard': item['shard']
                        }, )
        return self._handle_cache(tasks)

    def _handle_cache(self, tasks):
        file_tasks = op.join(self._config.cache_path, 'tasks.json')
        if op.exists(file_tasks):
            with open(file_tasks) as f:
                cont = f.read()
        else:
            cont = '[]'

        try:
            old_tasks = json.loads(cont)
        except json.JSONDecodeError:
            self._logger.error('Invalid task cache', extra={'cache': cont})
            old_tasks = []

        task_map = {(task['es_index'], task['es_shard']): task['id']
                    for task in old_tasks}
        _mappings = IndexLogstoreMappings(
            list([task['es_index'] for task in tasks]),
            self._config.get('logstore_index_mappings'),
        )
        cnt, new_tasks = len(old_tasks), []
        for task in tasks:
            _task = (task['es_index'], task['es_shard'])
            if _task not in task_map:
                task['id'] = cnt
                task['logstore'] = _mappings.get_logstore(task['es_index'])
                new_tasks.append(task)
                cnt += 1
        tasks = old_tasks + new_tasks

        with open(file_tasks, 'w') as f:
            f.write(json.dumps(tasks, indent=2))

        if self._config.get('auto_creation'):
            self._setup_aliyun_log(_mappings)
        return tasks

    def _setup_aliyun_log(self, index_logstore_mappings):
        print('setup aliyun log service...')
        self._logger.info('Setup AliyunLog start')
        logstores = index_logstore_mappings.get_all_logstores()
        for logstore in logstores:
            self._setup_logstore(index_logstore_mappings, logstore)
        self._logger.info('Setup AliyunLog wait')
        time.sleep(self._config.get('wait_time_in_secs'))
        self._logger.info('Setup AliyunLog finish')

    def _setup_logstore(self, index_logstore_mappings, logstore):
        try:
            self._log_client.create_logstore(
                project_name=self._config.get('project_name'),
                logstore_name=logstore,
                shard_count=8,
                ttl=3650,
            )
        except LogException as exc:
            if exc.get_error_code() == 'LogStoreAlreadyExist':
                self._logger.info(
                    'Logstore already exist, skip creation.',
                    extra={'logstore': logstore},
                )
            else:
                raise
        self._setup_index(index_logstore_mappings, logstore)

    def _setup_index(self, index_logstore_mappings, logstore):
        indexes = index_logstore_mappings.get_indexes(logstore)
        for index in indexes:
            self._logger.info(
                'Setup AliyunLog Logstore',
                extra={
                    'logstore': logstore,
                    'es_index': index
                },
            )
            try:
                resp = self._es.indices.get(index=index)
            except FileNotFoundError:
                self._logger.error('Index not found',
                                   extra={'es_index': index})
                continue
            mappings = resp[index]['mappings']
            index_config = MappingIndexConverter.to_index_config(mappings)
            try:
                self._log_client.create_index(
                    self._config.get('project_name'),
                    logstore,
                    index_config,
                )
            except LogException as exc:
                if exc.get_error_code() == 'IndexAlreadyExist':
                    self._log_client.update_index(
                        self._config.get('project_name'),
                        logstore,
                        index_config,
                    )
                    continue
                raise
Exemple #17
0
 def __init__(self, name):
     self.topic = name
     self.client = LogClient(endpoint, keyid, key)
     req1 = ListLogstoresRequest(project)
     res = client.list_logstores(req1)
     res.log_print()
Exemple #18
0
    res = client.list_logstore(project, logstore)
    res.log_print()
    
    res = client.delete_logstore(project, logstore)
    res.log_print()


if __name__=='__main__':
    endpoint = 'cn-hangzhou-staging-intranet.sls.aliyuncs.com'
    accessKeyId = ''
    accessKey = ''
    project = 'ali-log-service'
    logstore = 'test4'
    token = ""

    client = LogClient(endpoint, accessKeyId, accessKey,token)

    sample_logstore(client, project, logstore) 

    res = client.create_logstore(project, logstore, 1, 1)
    time.sleep(0.1)

    try : 
        sample_list_logstores(client, project)
        sample_logtail_config(client, project, logstore) 
        sample_machine_group(client, project)
        sample_apply_config(client, project)
        sample_index(client, project, logstore)
        sample_list_topics(client, project, logstore)
        sample_put_logs(client, project, logstore, i)
        sample_pull_logs(client, project, logstore)
Exemple #19
0
environment = sys.argv[4]
print("Update the Logtail configuration on the log project (environment = " +
      environment + ", region = " + regionId + ")")

endpoint = regionId + ".log.aliyuncs.com"
logProjectName = "sample-app-log-project-" + environment
logStoreName = "sample-app-log-store-" + environment
logtailConfigName = "sample-app-logtail-config-" + environment
logMachineGroupName = "sample-app-log-machine-group-" + environment

# Load the existing Logtail configuration
print("Loading existing Logtail configuration (endpoint = " + endpoint +
      ", logProjectName = " + logProjectName + ", logtailConfigName = " +
      logtailConfigName + ")...")

client = LogClient(endpoint, accessKeyId, accessKeySecret)
existingConfig = None
try:
    response = client.get_logtail_config(logProjectName, logtailConfigName)
    existingConfig = response.logtail_config
    print("Existing logtail configuration found: ", existingConfig.to_json())
except LogException:
    print("No existing logtail configuration found.")

# Create or update the logtail configuration
configDetail = SyslogConfigDetail(logstoreName=logStoreName,
                                  configName=logtailConfigName,
                                  tag="sys_tag")
if existingConfig is None:
    print("Create the logtail configuration:", configDetail.to_json())
    client.create_logtail_config(logProjectName, configDetail)
from aliyun.log.logclient import LogClient

sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + "/..")

from common import Common
from log import Log

#Load config values
accessKeyId, accessKeySecret, logEndPoint, logProject, logStore, ossEndPoint, ossBucketName, securityToken = Common.LoadConfig(
)

#Initialise OSS authentication
auth = oss2.Auth(accessKeyId, accessKeySecret)

# Initialise Log Service authentication client
client = LogClient(logProject + '.' + logEndPoint, accessKeyId,
                   accessKeySecret)

#Instantiate OSS bucket
bucket = oss2.Bucket(auth, ossEndPoint, ossBucketName)


@app.route('/')
@app.route('/index')
def index():
    return render_template('base.html')


@app.route('/upload', methods=['GET', 'POST'])
def upload_file():
    print(accessKeyId)
    print(accessKeySecret)
Exemple #21
0
class SLSClient(object):

    def __init__(self):
        ali_sls_config = getattr(settings, 'ALI_SLS', None)
        if ali_sls_config:
            self.endpoint    = ali_sls_config['endpoint']
            self.accessKeyId = ali_sls_config['accessKeyId']
            self.accessKey   = ali_sls_config['accessKey']
            self.project     = ali_sls_config['project']
            self.logstore    = ali_sls_config['logstore']
        else:
            raise ValueError('ALI_SLS setting is not a exist')

        self.client = LogClient(self.endpoint, self.accessKeyId, self.accessKey)

    def pull_logs(self, shard_id = 0, start_time=60):
        res = self.client.get_cursor(self.project, self.logstore, shard_id, (int)(time.time() - start_time))
        res.log_print()
        cursor = res.get_cursor()

        res = self.client.get_end_cursor(self.project, self.logstore, shard_id)
        end_cursor = res.get_cursor()
        while cursor != end_cursor:
            loggroup_count = 10
            res = self.client.pull_logs(self.project, self.logstore, shard_id, cursor, loggroup_count)
            res.log_print()
            cursor = res.get_next_cursor()


    def get_logs(self, start, length, topic='', query='', start_time=600):
        fromTime = int(time.time() - start_time)
        toTime = int(time.time())

        res = None
        while (res is None) or  (not res.is_completed()):
            req = GetHistogramsRequest(self.project, self.logstore, fromTime, toTime, topic, query)
            res = self.client.get_histograms(req)

        # res.log_print()
        total_log_count = res.get_total_count()
        # 获取满足query的日志条数
        if(start != None and length != None):
            get_log_count = min(length, total_log_count)
            from_idx = start
            to_idx = start + get_log_count
        else:
            from_idx = 0
            to_idx = total_log_count

        print 'total_log_count:', total_log_count
        print 'from_idx:', from_idx
        print 'to_idx:', to_idx

        log_line = 10
        data = []
        # 每次读取10条日志,将日志数据查询完,对于每一次查询,如果查询结果不是完全准确,则重试3次
        for offset in range(from_idx, to_idx, log_line) :
            res = None
            for retry_time in range(0, 3) :
                # print self.project, self.logstore, fromTime, toTime, topic, query, log_line, offset
                req = GetLogsRequest(self.project, self.logstore, fromTime, toTime, topic, query, log_line, offset, True)
                res = self.client.get_logs(req)
                if res != None and res.is_completed():
                    break
                time.sleep(1)
            if res != None:
                data.extend(res.get_logs())

        return data, total_log_count
Exemple #22
0
def main():
    endpoint = ''  # 选择与上面步骤创建Project所属区域匹配的Endpoint
    accessKeyId = ''  # 使用您的阿里云访问密钥AccessKeyId
    accessKey = ''  # 使用您的阿里云访问密钥AccessKeySecret
    project = ''  # 上面步骤创建的项目名称
    logstore = ''  # 上面步骤创建的日志库名称
    # 重要提示:创建的logstore请配置为4个shard以便于后面测试通过
    # 构建一个client
    client = LogClient(endpoint, accessKeyId, accessKey)
    # list 所有的logstore
    req1 = ListLogstoresRequest(project)
    res1 = client.list_logstores(req1)
    res1.log_print()
    topic = ""
    source = ""
    # 发送10个数据包,每个数据包有10条log
    for i in range(10):
        logitemList = []  # LogItem list
        for j in range(10):
            contents = [('index', str(i * 10 + j))]
            logItem = LogItem()
            logItem.set_time(int(time.time()))
            logItem.set_contents(contents)
            logitemList.append(logItem)
        req2 = PutLogsRequest(project, logstore, topic, source, logitemList)
        res2 = client.put_logs(req2)
        res2.log_print()
    # list所有的shard,读取上1分钟写入的数据全部读取出来
    listShardRes = client.list_shards(project, logstore)

    for shard in listShardRes.get_shards_info():
        shard_id = shard["shardID"]
        start_time = int(time.time() - 60)
        end_time = start_time + 60
        res = client.get_cursor(project, logstore, shard_id, start_time)
        res.log_print()
        start_cursor = res.get_cursor()
        res = client.get_cursor(project, logstore, shard_id, end_time)
        end_cursor = res.get_cursor()

        while True:
            loggroup_count = 100  # 每次读取100个包
            res = client.pull_logs(project, logstore, shard_id, start_cursor,
                                   loggroup_count, end_cursor)
            res.log_print()
            next_cursor = res.get_next_cursor()
            if next_cursor == start_cursor:
                break
            start_cursor = next_cursor
    # 重要提示: 只有打开索引功能,才可以使用以下接口来查询数据
    time.sleep(60)
    topic = ""
    query = "index"
    From = int(time.time()) - 600
    To = int(time.time())
    res3 = None

    # 查询最近10分钟内,满足query条件的日志条数,如果执行结果不是完全正确,则进行重试
    while (res3 is None) or (not res3.is_completed()):
        req3 = GetHistogramsRequest(project, logstore, From, To, topic, query)
        res3 = client.get_histograms(req3)
    res3.log_print()
    # 获取满足query的日志条数
    total_log_count = res3.get_total_count()
    log_line = 10

    # 每次读取10条日志,将日志数据查询完,对于每一次查询,如果查询结果不是完全准确,则重试3次
    for offset in range(0, total_log_count, log_line):
        res4 = None
        for retry_time in range(0, 3):
            req4 = GetLogsRequest(project, logstore, From, To, topic, query,
                                  log_line, offset, False)
            res4 = client.get_logs(req4)
            if res4 is not None and res4.is_completed():
                break
            time.sleep(1)
        if res4 is not None:
            res4.log_print()
    listShardRes = client.list_shards(project, logstore)
    shard = listShardRes.get_shards_info()[0]

    # 分裂shard
    if shard["status"] == "readwrite":
        shard_id = shard["shardID"]
        inclusiveBeginKey = shard["inclusiveBeginKey"]
        midKey = inclusiveBeginKey[:-1] + str((int(inclusiveBeginKey[-1:])) +
                                              1)
        client.split_shard(project, logstore, shard_id, midKey)
    # 合并shard
    shard = listShardRes.get_shards_info()[1]
    if shard["status"] == "readwrite":
        shard_id = shard["shardID"]
        client.merge_shard(project, logstore, shard_id)
    # 删除shard
    shard = listShardRes.get_shards_info()[-1]
    if shard["status"] == "readonly":
        shard_id = shard["shardID"]
        client.delete_shard(project, logstore, shard_id)

# 创建外部数据源
    res = client.create_external_store(
        project,
        ExternalStoreConfig("rds_store", "cn-qingdao", "rds-vpc",
                            "vpc-************", "i***********", "*.*.*.*",
                            "3306", "root", "sfdsfldsfksflsdfs", "meta",
                            "join_meta"))
    res.log_print()
    res = client.update_external_store(
        project,
        ExternalStoreConfig("rds_store", "cn-qingdao", "rds-vp", "rds-vpc",
                            "vpc-************", "i************", "*.*.*.*",
                            "3306", "root", "sfdsfldsfksflsdfs", "meta",
                            "join_meta"))
    res.log_print()
    res = client.get_external_store(project, "rds_store")
    res.log_print()
    res = client.list_external_store(project, "")
    res.log_print()
    res = client.delete_external_store(project, "rds_store")
    res.log_print()
    # 使用python sdk进行查询分析
    req4 = GetLogsRequest(project, logstore, From, To, topic,
                          "* | select count(1)", 10, 0, False)
    res4 = client.get_logs(req4)
    # 使用python sdk进行join rds查询
    req4 = GetLogsRequest(
        project, logstore, From, To, topic, "* | select count(1) from " +
        logstore + "  l  join  rds_store  r on  l.ikey =r.ekey", 10, 0, False)
    res4 = client.get_logs(req4)
    # 使用python sdk把查询结果写入rds
    req4 = GetLogsRequest(project, logstore, From, To, topic,
                          "* | insert into rds_store select count(1) ", 10, 0,
                          False)
    res4 = client.get_logs(req4)