コード例 #1
0
    def put_log(self, log_dict, topic):
        if not self._network_available:
            print("network not available...")
            return False

        try:
            if not self._is_token_valid():
                self._activate()

            contents = []
            self._collect_contents(self._collect_basic_logs(), contents)
            self._collect_contents(log_dict, contents)

            log_item = LogItem()
            log_item.set_time(int(time.time()))
            log_item.set_contents(contents)

            req = PutLogsRequest(self._log_project, self._log_store, topic, '',
                                 [
                                     log_item,
                                 ])
            res = self._client.put_logs(req)
            return True
        except:
            return False
コード例 #2
0
ファイル: message.py プロジェクト: samuelchen/alicloud-demo
 def log(self, topic, source, contents):
     logitemList = []
     logItem = LogItem()
     logItem.set_time(int(time.time()))
     logItem.set_contents(contents)
     logitemList.append(logItem)
     req = PutLogsRequest(self._project, self._logstore, topic, source, logitemList)
     resp = self._client.put_logs(req)
     return resp
コード例 #3
0
ファイル: Log.py プロジェクト: itexpertshire/simplewebapp
 def write(client, topic, source, contents):
     # contents = [('Data', 'Data1')]
     logitemList = []  # LogItem list
     logItem = LogItem()
     logItem.set_time(int(time.time()))
     logItem.set_contents(contents)
     logitemList.append(logItem)
     req = PutLogsRequest(project, logstore, topic, source, logitemList)
     res = client.put_logs(req)
     res.log_print()
コード例 #4
0
ファイル: loghub_helper.py プロジェクト: whoiskx/com_code
    def send(self, topic, value, key=None, source=None):
        log_item = LogItem()
        log_item.set_time(int(time.time()))
        log_item.set_contents([(self._key,
                                Producer._value_to_string(key, value))])
        if source is None:
            log_info = (topic, self.source, [log_item])
        else:
            log_info = (topic, source, [log_item])

        self._put_logs(*log_info)
コード例 #5
0
def sample_put_logs(client, project, logstore):
    topic = 'TestTopic_2'
    source = ''
    contents = [('key_1', 'key_1'), ('avg', '30')]
    logitemList = []  # LogItem list
    logItem = LogItem()
    logItem.set_time(int(time.time()))
    logItem.set_contents(contents)
    for i in range(0, 1):
        logitemList.append(logItem)
    request = PutLogsRequest(project, logstore, topic, source, logitemList)

    response = client.put_logs(request)
    response.log_print()
コード例 #6
0
    def insert_one(self, args):
        logitem = LogItem()

        tn = args['timeStamp']
        tmstamp = int(time.mktime(tn.timetuple()))
        del args['timeStamp']

        cts = [(key, str(args[key])) for key in args]

        logitem.set_time(tmstamp)
        logitem.set_contents(cts)

        req2 = PutLogsRequest(project, store, self.topic, socket.gethostname(),
                              [logitem])
        res2 = client.put_logs(req2)
        res2.log_print()
コード例 #7
0
ファイル: sample.py プロジェクト: amorwilliams/gsoops
def sample_put_logs(client, project, logstore):
    topic = 'TestTopic_2'
    source = ''
    contents = [
        ('key_1', 'key_1'),
        ('avg', '30')
    ]
    logitemList = [] # LogItem list
    logItem = LogItem()
    logItem.set_time(int(time.time()))
    logItem.set_contents(contents)
    for i in range(0, 1) : 
        logitemList.append(logItem)
    request = PutLogsRequest(project, logstore, topic, source, logitemList)
    
    response = client.put_logs(request)
    response.log_print()
コード例 #8
0
ファイル: index.py プロジェクト: samuelchen/alicloud-demo
    def log(self, contents, topic, source):
        level = int(getattr(logging, topic))
        self._pylog.log(level=level, msg=contents)

        return

        contents = [
            ('msg', contents),
        ]
        logitemList = []
        logItem = LogItem()
        logItem.set_time(int(time.time()))
        logItem.set_contents(contents)
        logitemList.append(logItem)
        req = PutLogsRequest(self._project, self._logstore, topic, source,
                             logitemList)
        resp = self._client.put_logs(req)
        return resp
def handler(event, context):
  evt = json.loads(event)

  #input your app code
  uid = evt['queryParameters']['uid']
  ip = evt['queryParameters']['ip']
  device = evt['queryParameters']['device']
  
  # print("test-api-gateway output: The uid is %s, the ip address is %s and the device type is %s. " % (uid, ip, device))
  
  endpoint = 'cn-XXXX.log.aliyuncs.com'       # 选择与上面步骤创建Project所属区域匹配的Endpoint
  accessKeyId = 'XXXX'    # 使用您的阿里云访问密钥AccessKeyId
  accessKey = 'XXXX'      # 使用您的阿里云访问密钥AccessKeySecret
  project = 'eric-nginx-logstore'        # 上面步骤创建的项目名称
  logstore = 'eric-nginx-access-log-store'       # 上面步骤创建的日志库名称
  
  # 构建一个client
  client = LogClient(endpoint, accessKeyId, accessKey)
  topic = ""
  source = ""
  
  # 向logstore写入数据
  logitemList = []  # LogItem list
  contents = [('ip',ip), ('uid',uid), ('device',device)]
  print("FunctionCompute --> LogService test output: " + ip + " - " + uid + " - " + device)
  logItem = LogItem()
  logItem.set_time(int(time.time()))
  logItem.set_contents(contents)
  logitemList.append(logItem)
  req2 = PutLogsRequest(project, logstore, topic, source, logitemList)
  res2 = client.put_logs(req2)
  res2.log_print()


  response_content = "you can return any string"
  api_rep = {
    "isBase64Encoded":"false",
    "statusCode":"200",
    "headers":{"x-custom-header":"your header"},
    "body":response_content
  }
  return json.dumps(api_rep)
    def report_span(self, span) : 
        if self.trace_queue.qsize() > self.max_buffer_trace : 
            print "discard trace as queue full, trace_id:" + str(span.context.trace_id) +"\tspan_id:" + str(span.context.span_id)  \
                + "\tparent_id:" + str(span.context.parent_id) + "\tservice_name:" + span.tracer.service_name  \
                + "\tOperation_name:" + span.operation_name + "\tstart_time:" + str(span.start_time) + "\tend_time:" + str(span.end_time)  \
                + "\ttags:" + str(span.tags) + "\tlogs:" + str(span.logs)

        logItem = LogItem()
        logItem.set_time(int(time.time()))
        logItem.push_back("TraceID", str(span.context.trace_id))
        logItem.push_back("SpanID", str(span.context.span_id))
        logItem.push_back("ParentSpanID", str(span.context.parent_id))
        logItem.push_back("ServiceName", span.tracer.service_name)
        logItem.push_back("OperationName", span.operation_name)
        start_time = (long)(span.start_time * 1000 * 1000 * 1000)
        end_time = (long)(span.end_time * 1000 * 1000 * 1000)
        logItem.push_back("StartTime", str(start_time))
        logItem.push_back("Duration", str(end_time - start_time))
        logItem.push_back("process.hostname", self.hostname)
        logItem.push_back("process.ips", self.ip)

        
        tag_map = dict()
        for tag in span.tags: 
            tag_map["tag." + str(tag.key)] = str(tag.value)

        for key,value in tag_map.items() : 
            logItem.push_back(key, value)

        log_list = []
        for log in span.logs : 
            log_list.append(str(log.value))
        if len(log_list) > 0 : 
            logItem.push_back("logs", str(log_list))

        self.trace_queue.put(logItem)

        if self.trace_queue.qsize() > self.max_buffer_trace or time.time() - self.last_send_time > self.buffer_interval : 
            self.semaphore.release()
コード例 #11
0
ファイル: aliyun.py プロジェクト: perfest/webagent
def main():
    endpoint = ''  # 选择与上面步骤创建Project所属区域匹配的Endpoint
    accessKeyId = ''  # 使用您的阿里云访问密钥AccessKeyId
    accessKey = ''  # 使用您的阿里云访问密钥AccessKeySecret
    project = ''  # 上面步骤创建的项目名称
    logstore = ''  # 上面步骤创建的日志库名称
    # 重要提示:创建的logstore请配置为4个shard以便于后面测试通过
    # 构建一个client
    client = LogClient(endpoint, accessKeyId, accessKey)
    # list 所有的logstore
    req1 = ListLogstoresRequest(project)
    res1 = client.list_logstores(req1)
    res1.log_print()
    topic = ""
    source = ""
    # 发送10个数据包,每个数据包有10条log
    for i in range(10):
        logitemList = []  # LogItem list
        for j in range(10):
            contents = [('index', str(i * 10 + j))]
            logItem = LogItem()
            logItem.set_time(int(time.time()))
            logItem.set_contents(contents)
            logitemList.append(logItem)
        req2 = PutLogsRequest(project, logstore, topic, source, logitemList)
        res2 = client.put_logs(req2)
        res2.log_print()
    # list所有的shard,读取上1分钟写入的数据全部读取出来
    listShardRes = client.list_shards(project, logstore)

    for shard in listShardRes.get_shards_info():
        shard_id = shard["shardID"]
        start_time = int(time.time() - 60)
        end_time = start_time + 60
        res = client.get_cursor(project, logstore, shard_id, start_time)
        res.log_print()
        start_cursor = res.get_cursor()
        res = client.get_cursor(project, logstore, shard_id, end_time)
        end_cursor = res.get_cursor()

        while True:
            loggroup_count = 100  # 每次读取100个包
            res = client.pull_logs(project, logstore, shard_id, start_cursor,
                                   loggroup_count, end_cursor)
            res.log_print()
            next_cursor = res.get_next_cursor()
            if next_cursor == start_cursor:
                break
            start_cursor = next_cursor
    # 重要提示: 只有打开索引功能,才可以使用以下接口来查询数据
    time.sleep(60)
    topic = ""
    query = "index"
    From = int(time.time()) - 600
    To = int(time.time())
    res3 = None

    # 查询最近10分钟内,满足query条件的日志条数,如果执行结果不是完全正确,则进行重试
    while (res3 is None) or (not res3.is_completed()):
        req3 = GetHistogramsRequest(project, logstore, From, To, topic, query)
        res3 = client.get_histograms(req3)
    res3.log_print()
    # 获取满足query的日志条数
    total_log_count = res3.get_total_count()
    log_line = 10

    # 每次读取10条日志,将日志数据查询完,对于每一次查询,如果查询结果不是完全准确,则重试3次
    for offset in range(0, total_log_count, log_line):
        res4 = None
        for retry_time in range(0, 3):
            req4 = GetLogsRequest(project, logstore, From, To, topic, query,
                                  log_line, offset, False)
            res4 = client.get_logs(req4)
            if res4 is not None and res4.is_completed():
                break
            time.sleep(1)
        if res4 is not None:
            res4.log_print()
    listShardRes = client.list_shards(project, logstore)
    shard = listShardRes.get_shards_info()[0]

    # 分裂shard
    if shard["status"] == "readwrite":
        shard_id = shard["shardID"]
        inclusiveBeginKey = shard["inclusiveBeginKey"]
        midKey = inclusiveBeginKey[:-1] + str((int(inclusiveBeginKey[-1:])) +
                                              1)
        client.split_shard(project, logstore, shard_id, midKey)
    # 合并shard
    shard = listShardRes.get_shards_info()[1]
    if shard["status"] == "readwrite":
        shard_id = shard["shardID"]
        client.merge_shard(project, logstore, shard_id)
    # 删除shard
    shard = listShardRes.get_shards_info()[-1]
    if shard["status"] == "readonly":
        shard_id = shard["shardID"]
        client.delete_shard(project, logstore, shard_id)

# 创建外部数据源
    res = client.create_external_store(
        project,
        ExternalStoreConfig("rds_store", "cn-qingdao", "rds-vpc",
                            "vpc-************", "i***********", "*.*.*.*",
                            "3306", "root", "sfdsfldsfksflsdfs", "meta",
                            "join_meta"))
    res.log_print()
    res = client.update_external_store(
        project,
        ExternalStoreConfig("rds_store", "cn-qingdao", "rds-vp", "rds-vpc",
                            "vpc-************", "i************", "*.*.*.*",
                            "3306", "root", "sfdsfldsfksflsdfs", "meta",
                            "join_meta"))
    res.log_print()
    res = client.get_external_store(project, "rds_store")
    res.log_print()
    res = client.list_external_store(project, "")
    res.log_print()
    res = client.delete_external_store(project, "rds_store")
    res.log_print()
    # 使用python sdk进行查询分析
    req4 = GetLogsRequest(project, logstore, From, To, topic,
                          "* | select count(1)", 10, 0, False)
    res4 = client.get_logs(req4)
    # 使用python sdk进行join rds查询
    req4 = GetLogsRequest(
        project, logstore, From, To, topic, "* | select count(1) from " +
        logstore + "  l  join  rds_store  r on  l.ikey =r.ekey", 10, 0, False)
    res4 = client.get_logs(req4)
    # 使用python sdk把查询结果写入rds
    req4 = GetLogsRequest(project, logstore, From, To, topic,
                          "* | insert into rds_store select count(1) ", 10, 0,
                          False)
    res4 = client.get_logs(req4)
コード例 #12
0
ファイル: aliyun_sls.py プロジェクト: yjbq125/blog
def main():
    endpoint = 'cn-hangzhou.sls.aliyuncs.com'  # 选择与上面步骤创建Project所属区域匹配的Endpoint
    accessKeyId = 'your_access_key_id'  # 使用你的阿里云访问密钥AccessKeyId
    accessKey = 'your_access_key'  # 使用你的阿里云访问密钥AccessKeySecret
    project = 'your_project'  # 上面步骤创建的项目名称
    logstore = 'your_logstore'  # 上面步骤创建的日志库名称
    # 构建一个client
    client = LogClient(endpoint, accessKeyId, accessKey)
    # list 所有的logstore
    req1 = ListLogstoresRequest(project)
    res1 = client.list_logstores(req1)
    res1.log_print()
    topic = ""
    source = ""
    # 发送10个数据包,每个数据包有10条log
    for i in range(10):
        logitemList = []  # LogItem list
        for j in range(10):
            contents = [('index', str(i * 10 + j))]
            logItem = LogItem()
            logItem.set_time(int(time.time()))
            logItem.set_contents(contents)
            logitemList.append(logItem)
        req2 = PutLogsRequest(project, logstore, topic, source, logitemList)
        res2 = client.put_logs(req2)
        res2.log_print()
    # list所有的shard,读取上1分钟写入的数据全部读取出来
    listShardRes = client.list_shards(project, logstore)
    for shard in listShardRes.get_shards_info():
        shard_id = shard["shardID"]
        start_time = int(time.time() - 60)
        end_time = start_time + 60
        res = client.get_cursor(project, logstore, shard_id, start_time)
        res.log_print()
        start_cursor = res.get_cursor()
        res = client.get_cursor(project, logstore, shard_id, end_time)
        end_cursor = res.get_cursor()
        while True:
            loggroup_count = 100  # 每次读取100个包
            res = client.pull_logs(project, logstore, shard_id, start_cursor,
                                   loggroup_count, end_cursor)
            res.log_print()
            next_cursor = res.get_next_cursor()
            if next_cursor == start_cursor:
                break
            start_cursor = next_cursor
    # 重要提示: 只有打开索引功能,才可以使用以下接口来查询数据
    time.sleep(60)
    topic = ""
    query = "index"
    From = int(time.time()) - 600
    To = int(time.time())
    res3 = None
    # 查询最近10分钟内,满足query条件的日志条数,如果执行结果不是完全正确,则进行重试
    while (res3 is None) or (not res3.is_completed()):
        req3 = GetHistogramsRequest(project, logstore, From, To, topic, query)
        res3 = client.get_histograms(req3)
    res3.log_print()
    # 获取满足query的日志条数
    total_log_count = res3.get_total_count()
    log_line = 10
    # 每次读取10条日志,将日志数据查询完,对于每一次查询,如果查询结果不是完全准确,则重试3次
    for offset in range(0, total_log_count, log_line):
        res4 = None
        for retry_time in range(0, 3):
            req4 = GetLogsRequest(project, logstore, From, To, topic, query,
                                  log_line, offset, False)
            res4 = client.get_logs(req4)
            if res4 is not None and res4.is_completed():
                break
            time.sleep(1)
        if res4 is not None:
            res4.log_print()
    listShardRes = client.list_shards(project, logstore)
    shard = listShardRes.get_shards_info()[0]
    # 分裂shard
    if shard["status"] == "readwrite":
        shard_id = shard["shardID"]
        inclusiveBeginKey = shard["inclusiveBeginKey"]
        midKey = inclusiveBeginKey[:-1] + str((
            (int)(inclusiveBeginKey[-1:])) + 1)
        client.split_shard(project, logstore, shard_id, midKey)
    # 合并shard
    shard = listShardRes.get_shards_info()[1]
    if shard["status"] == "readwrite":
        shard_id = shard["shardID"]
        client.merge_shard(project, logstore, shard_id)
    # 删除shard
    shard = listShardRes.get_shards_info()[-1]
    if shard["status"] == "readonly":
        shard_id = shard["shardID"]
        client.delete_shard(project, logstore, shard_id)