コード例 #1
0
def count_hot_uid(uid, start_time, stop_time):
    query_body = {
        "query":{
            "filtered":{
                "filter":{
                    "bool":{
                        "must":[
                            {"range":{
                                "timestamp":{
                                    "gte":start_time,
                                    "lt": stop_time
                                }
                            }},
                            {"term": {"root_uid": uid}}
                        ]
                    }
                }
#                "query":{
#                    "bool":{
#                        "should":[
#                        ]
#                    }
#                }
            }
        }
    }


    count = 0
    datetime = ts2datetime(float(stop_time))
    index_name = flow_text_index_name_pre + datetime
    exist_es = es_text.indices.exists(index_name)
    if exist_es:
        count = es_text.count(index=index_name, doc_type=flow_text_index_type, body=query_body)["count"]
    else:
        count = 0

    datetime_1 = ts2datetime(float(start_time))
    if datetime_1 == datetime:
        pass
    else:
        ts = float(stop_time)
        while 1:
            ts = ts-day_time
            datetime = ts2datetime(ts)
            index_name = flow_text_index_name_pre + datetime
            exist_es = es_text.indices.exists(index_name)
            if exist_es:
                count = es_text.count(index=index_name, doc_type=flow_text_index_type, body=query_body)["count"]
            else:
                count += 0
            if datetime_1 == datetime:
                break

    return count
コード例 #2
0
def get_keyword_trend(keyword, ts, interval):
    query_body = {
        "query":{
            "filtered":{
                "filter":{
                    "bool":{
                        "must":[
                            {"range":{
                                "timestamp":{
                                   "gte": ts - interval,
                                   "lt": ts
                                }
                            }},
                            {"term":{"keywords_string":keyword}}
                        ]
                    }
                }
            }
        }
    }

    datetime = ts2datetime(ts - interval)
    index_name = "flow_text_" + datetime
    exist_es = es_flow_text.indices.exists(index=index_name)
    if exist_es:
        label = 1
        count = es_flow_text.count(index=index_name, doc_type="text", body=query_body)['count']
        print count
    else:
        label = 0
        count = 0
    return count, label
コード例 #3
0
def get_weibo_num(keywords, start_time, end_time):
    keyword_query_list = []
    for keyword in keywords.split():
        keyword_query_list.append({'wildcard':{'text':'*'+keyword+'*'}})
        
    query_body = {
        'query':{
            'bool':{
                'should':keyword_query_list,
                'minimum_should_match':2
                }
            }
    }

    index_list = dateRange(start_time, end_time)
    count = es_flow_text.count(index=index_list, doc_type='text', body=query_body)["count"]

    return count
コード例 #4
0
def active_time_compute(uids_list, datetime):
    flow_text_index_name = flow_text_index_name_pre + datetime

    start_ts = datetime2ts(datetime)
    day_hour_counts = []
    day_hours = DAY_HOURS
    for i in range(day_hours):

        query_body = {
            'query': {
                'bool': {
                    'must': [{
                        'terms': {
                            'uid': uids_list
                        }
                    }, {
                        'range': {
                            'timestamp': {
                                'gte': start_ts + i * HOUR,
                                'lt': start_ts + (i + 1) * HOUR
                            }
                        }
                    }]
                }
            }
        }

        active_hour_counts = es_flow_text.count(index=flow_text_index_name,
                                                doc_type=flow_text_index_type,
                                                body=query_body)  #\
        if active_hour_counts['_shards']['successful'] != 0:
            hour_counts = active_hour_counts['count']
        else:
            print 'es_weibo_counts error'
            hour_counts = 0
        if len(uids_list):
            day_hour_counts.append(hour_counts / float(len(uids_list)))
        else:
            day_hour_counts.append(0)
    # [21,32,213,42,...] 一维数组,24个元素
    return day_hour_counts
コード例 #5
0
def day_post_num_compute(uids_list,datetime):
    flow_text_index_name = flow_text_index_name_pre+datetime

    es_weibo_counts = es_flow_text.count(index=flow_text_index_name,doc_type=flow_text_index_type,\
                                body={'query':{'match_all':{}}})
    if es_weibo_counts['_shards']['successful'] != 0:
        weibo_counts = es_weibo_counts['count']
    else:
        print 'es_weibo_counts error'
        weibo_counts = 0
    print weibo_counts

    query_body = {
        'query':{
            'filtered':{
                'filter':{
                    'terms':{
                        'uid':uids_list
                    }
                }
            }
        },
        'aggs':{
            'all_uids':{
                'terms':{
                    'field':'uid',
                    'size':MAX_SEARCH_SIZE
                }
            }
        }
    }
    es_uid_counts = es_flow_text.search(index=flow_text_index_name,doc_type=flow_text_index_type,\
                    body=query_body)['aggregations']['all_uids']['buckets']
    
    print len(es_uid_counts)

    print float(weibo_counts)/len(es_uid_counts)
コード例 #6
0
def get_keyword_trend(keyword, ts, interval):
    query_body = {
        "query": {
            "filtered": {
                "filter": {
                    "bool": {
                        "must": [{
                            "range": {
                                "timestamp": {
                                    "gte": ts - interval,
                                    "lt": ts
                                }
                            }
                        }, {
                            "term": {
                                "keywords_string": keyword
                            }
                        }]
                    }
                }
            }
        }
    }

    datetime = ts2datetime(ts - interval)
    index_name = "flow_text_" + datetime
    exist_es = es_flow_text.indices.exists(index=index_name)
    if exist_es:
        label = 1
        count = es_flow_text.count(index=index_name,
                                   doc_type="text",
                                   body=query_body)['count']
        print count
    else:
        label = 0
        count = 0
    return count, label
コード例 #7
0
def trendline_list(mid, total_value, mid_ts):
    if RUN_TYPE:
        ts = time.time()
    else:
        ts = datetime2ts("2016-11-20")
    """
    index_list = []
    for i in range(diffusion_time):
        index_list.append("flow_text_"+ts2datetime(ts-i*24*3600))

    result = dict()
    for iter_index in index_list:
        if not es.indices.exists(index=iter_index):
            continue
        try:
            result = es.get(index=iter_index, doc_type="text", id=mid)["_source"]
            break
        except:
            pass

    if not result:
        return []
    """

    nn = 24 * 3600 / diffusion_time_interval  ###
    current_list = []
    rising_list = []
    falling_list = []
    exist_time_list = []
    total_time_list = []

    timestamp = mid_ts
    start_ts = mid_ts
    timestamp = datehour2ts(ts2datehour(timestamp))
    for i in range(diffusion_time * nn):
        total_time_list.append(timestamp + i * diffusion_time_interval)

    # diffusion more than 5 days, return time list as far
    if 1:
        while 1:
            query_body = {
                "query": {
                    "bool": {
                        "must": [{
                            "term": {
                                "root_mid": mid
                            }
                        }, {
                            "range": {
                                "timestamp": {
                                    "gte": timestamp,
                                    "lt": timestamp + diffusion_time_interval
                                }
                            }
                        }]
                    }
                }
            }
            index_name = "flow_text_" + ts2datetime(timestamp)
            count = es.count(index=index_name,
                             doc_type="text",
                             body=query_body)["count"]
            current_list.append(count)
            exist_time_list.append(timestamp)
            timestamp += diffusion_time_interval
            if timestamp >= ts:
                break

    left_set = set(total_time_list) - set(exist_time_list)
    left_list = sorted(list(left_set), reverse=False)

    max_value = max(current_list)
    index_exist = len(current_list)
    value = current_list

    expected_value = total_value * 0.8 / (0.2 * nn * diffusion_time)
    if expected_value <= max_value:
        top_value = (max_value + total_value) / 2
    else:
        top_value = expected_value

    # weibo prediction
    k = 5
    h = 0.5
    peak = spd(value, h, k)
    flag = judge(peak, value)
    if len(flag) == 2:
        paras = getTwoBeauties(value, flag[0], flag[1])
        paras[-1] = diffusion_time * nn
        series = bassTwoPeaks(paras)
    else:
        paras = getSingleBeauty(value)
        paras[-1] = diffusion_time * nn
        series = bassOnePeak(paras)

    # 预测峰值位置
    predict_climax = series.index(max(series))

    if predict_climax > index_exist:
        predict_climax_left = predict_climax - len(current_list)
        rise_trend, fall_trend = get_trend(left_list, predict_climax_left,
                                           value[-1], top_value)
        true_climax = exist_time_list[0] + (
            exist_time_list[1] - exist_time_list[0]) * predict_climax
    else:
        top_value = value[-1]
        rise_trend, fall_trend = get_trend(left_list, 0, value[-1], 1)
        true_climax = exist_time_list[value.index(max(value))]
        top_value = max(value)

    results = dict()
    results["climax"] = [true_climax, top_value]
    results["rise_trend"] = rise_trend
    results["fall_trend"] = fall_trend
    new_list = []
    for i in range(len(exist_time_list)):
        new_list.append([exist_time_list[i], value[i]])
    results["exist_trend"] = new_list

    return results
コード例 #8
0
def social_sensing(task_detail):

    '''
    with open("prediction_uid.pkl", "r") as f:
        uid_model = pickle.load(f)
    with open("prediction_weibo.pkl", "r") as f:
        weibo_model = pickle.load(f)
    '''
    # 任务名 传感器 终止时间 之前状态 创建者 时间
    
    task_name = task_detail[0]
    social_sensors = task_detail[1]
    #ts = int(task_detail[2])
    ts = float(task_detail[2])

    #xnr_user_no = task_detail[3]

    print ts2date(ts)
    index_list = []
    important_words = []
    datetime_1 = ts2datetime(ts)
    index_name_1 = flow_text_index_name_pre + datetime_1
    exist_es = es_text.indices.exists(index=index_name_1)
    if exist_es:
        index_list.append(index_name_1)
    datetime_2 = ts2datetime(ts-DAY)
    index_name_2 = flow_text_index_name_pre + datetime_2
    exist_es = es_text.indices.exists(index=index_name_2)
    if exist_es:
        index_list.append(index_name_2)
    if es_text.indices.exists(index=flow_text_index_name_pre+ts2datetime(ts-2*DAY)):
        index_list.append(flow_text_index_name_pre+ts2datetime(ts-2*DAY))

    # PART 1
    
    #forward_result = get_forward_numerical_info(task_name, ts, create_by)
    # 之前时间阶段内的原创微博list/retweeted
    forward_origin_weibo_list, forward_1 = query_mid_list(ts-time_interval, social_sensors, forward_time_range)
    forward_retweeted_weibo_list, forward_3 = query_mid_list(ts-time_interval, social_sensors, forward_time_range, 3)
    # 当前阶段内原创微博list
    current_mid_list, current_1 = query_mid_list(ts, social_sensors, time_interval)
    current_retweeted_mid_list, current_3 = query_mid_list(ts, social_sensors, time_interval, 3)
    all_mid_list = []
    all_mid_list.extend(current_mid_list)
    all_mid_list.extend(current_retweeted_mid_list)
    all_mid_list.extend(forward_origin_weibo_list)
    all_mid_list.extend(forward_retweeted_weibo_list)
    all_origin_list = []
    all_origin_list.extend(current_mid_list)
    all_origin_list.extend(forward_origin_weibo_list)
    all_origin_list = list(set(all_origin_list))
    all_retweeted_list = []
    all_retweeted_list.extend(current_retweeted_mid_list)
    all_retweeted_list.extend(forward_retweeted_weibo_list)#被转发微博的mid/root-mid
    all_retweeted_list = list(set(all_retweeted_list))


    all_mid_list = filter_mid(all_mid_list)
    all_origin_list = filter_mid(all_origin_list)
    all_retweeted_list = filter_mid(all_retweeted_list)

    print "all mid list: ", len(all_mid_list)
    print "all_origin_list", len(all_origin_list)
    print "all_retweeted_list", len(all_retweeted_list)


    # 查询微博在当前时间内的转发和评论数, 聚合按照message_type
    #statistics_count = query_related_weibo(ts, all_mid_list, time_interval)
    if all_origin_list:
        #origin_weibo_detail = query_hot_weibo(ts, all_origin_list, time_interval) # 原创微博详情
        origin_weibo_detail = dict()
        for mid in all_origin_list:
            retweet_count = es_text.count(index=index_list, doc_type="text", body={"query":{"bool":{"must":[{"term":{"root_mid": mid}}, {"term":{"message_type":3}}]}}})["count"]
            comment_count = es_text.count(index=index_list, doc_type="text", body={"query":{"bool":{"must":[{"term":{"root_mid": mid}}, {"term":{"message_type":2}}]}}})["count"]
            tmp = dict()
            tmp["retweeted"] = retweet_count
            tmp["comment"] = comment_count
            origin_weibo_detail[mid] = tmp
    else:
        origin_weibo_detail = {}
    print "len(origin_weibo_detail): ", len(origin_weibo_detail)
    if all_retweeted_list:
        retweeted_weibo_detail = dict()
        for mid in all_retweeted_list:
            retweet_count = es_text.count(index=index_list, doc_type="text", body={"query":{"bool":{"must":[{"term":{"root_mid": mid}}, {"term":{"message_type":3}}]}}})["count"]
            comment_count = es_text.count(index=index_list, doc_type="text", body={"query":{"bool":{"must":[{"term":{"root_mid": mid}}, {"term":{"message_type":2}}]}}})["count"]
            tmp = dict()
            tmp["retweeted"] = retweet_count
            tmp["comment"] = comment_count
            retweeted_weibo_detail[mid] = tmp
        #retweeted_weibo_detail = query_hot_weibo(ts, all_retweeted_list, time_interval) # 转发微博详情
    else:
        retweeted_weibo_detail = {}
    print "len(retweeted_weibo_detail): ", len(retweeted_weibo_detail)
    #current_total_count = statistics_count['total_count']

    # 当前阶段内所有微博总数
    #current_retweeted_count = statistics_count['retweeted']
    #current_comment_count = statistics_count['comment']

    #all_mid_list = list(set(all_origin_list[:100]) | set(all_retweeted_list[:100]))


    # 感知到的事, all_mid_list
    sensitive_text_list = []
    tmp_sensitive_warning = ""
    text_dict = dict() # 文本信息
    mid_value = dict() # 文本赋值
    duplicate_dict = dict() # 重合字典
    portrait_dict = dict() # 背景信息
    classify_text_dict = dict() # 分类文本
    classify_uid_list = []
    duplicate_text_list = []
    sensitive_words_dict = dict()
    sensitive_weibo_detail = {}
    trendline_dict = dict()
    all_text_dict = dict()

    # 有事件发生时开始
    if 1:
        print "index_list:", index_list

        if index_list and all_mid_list:
            query_body = {
                "query":{
                    "filtered":{
                        "filter":{
                            "terms":{"mid": all_mid_list}
                        }
                    }
                },
                "size": 5000
            }
            search_results = es_text.search(index=index_list, doc_type="text", body=query_body)['hits']['hits']
            print "search mid len: ", len(search_results)
            tmp_sensitive_warning = ""
            text_dict = dict() # 文本信息
            mid_value = dict() # 文本赋值
            duplicate_dict = dict() # 重合字典
            portrait_dict = dict() # 背景信息
            classify_text_dict = dict() # 分类文本
            #classify_uid_list = []
            classify_mid_list = []
            duplicate_text_list = []
            sensitive_words_dict = dict()
            mid_ts_dict = dict() # 文本发布时间
            uid_prediction_dict = dict()
            weibo_prediction_dict = dict()
            trendline_dict = dict()
            feature_prediction_list = []  # feature
            mid_prediction_list = [] # dui ying mid
            if search_results:
                for item in search_results:
                    iter_uid = item['_source']['uid']
                    iter_mid = item['_source']['mid']
                    mid_ts_dict[iter_mid] = item["_source"]["timestamp"]
                    iter_text = item['_source']['text'].encode('utf-8', 'ignore')
                    iter_sensitive = item['_source'].get('sensitive', 0)
                    tmp_text = get_weibo(item['_source'])
                    all_text_dict[iter_mid] = tmp_text

                    duplicate_text_list.append({"_id":iter_mid, "title": "", "content":iter_text.decode("utf-8",'ignore')})

                    if iter_sensitive:
                        tmp_sensitive_warning = signal_sensitive_variation #涉及到敏感词的微博
                        sensitive_words_dict[iter_mid] = iter_sensitive

                    keywords_dict = json.loads(item['_source']['keywords_dict'])
                    personal_keywords_dict = dict()
                    for k, v in keywords_dict.iteritems():
                        k = k.encode('utf-8', 'ignore')
                        personal_keywords_dict[k] = v
                    classify_text_dict[iter_mid] = personal_keywords_dict
                    #classify_uid_list.append(iter_uid)
                    classify_mid_list.append(iter_mid)

                # 去重
                print "start duplicate"
                if duplicate_text_list:
                    dup_results = duplicate(duplicate_text_list)
                    for item in dup_results:
                        if item['duplicate']:
                            duplicate_dict[item['_id']] = item['same_from']

                # 分类
                print "start classify"
                mid_value = dict()
                if classify_text_dict:
                    #classify_results = topic_classfiy(classify_uid_list, classify_text_dict)
                    classify_results = topic_classfiy(classify_mid_list, classify_text_dict)
                    
                    #print "classify_results: ", classify_results

                    for k,v in classify_results.iteritems(): # mid:value
                        #mid_value[k] = topic_value_dict[v[0]]
                        mid_value[k]=v[0]
                        #feature_list = organize_feature(k, mid_ts_dict[k])
                        #feature_prediction_list.append(feature_list) # feature list
                        #mid_prediction_list.append(k) # corresponding 
                    
                # prediction
                """
                print "start prediction"
                weibo_prediction_result = weibo_model.predict(feature_prediction_list)
                uid_prediction_result = uid_model.predict(feature_prediction_list)
                for i in range(len(mid_prediction_list)):
                    if  i % 100 == 0:
                        print i
                    uid_prediction_dict[mid_prediction_list[i]] = uid_prediction_result[i]
                    weibo_prediction_dict[mid_prediction_list[i]] = weibo_prediction_result[i]
                    tmp_trendline = trendline_list(mid_prediction_list[i], weibo_prediction_result[i], mid_ts_dict[mid_prediction_list[i]])
                    trendline_dict[mid_prediction_list[i]] = tmp_trendline
                """
    # organize data

    mid_list = all_text_dict.keys()
    print "final mid:", len(mid_list)
    print "intersection: ", len(set(mid_list)&set(all_mid_list))
    bulk_action = []
    count = 0
    for mid in mid_list:
        iter_dict = dict()
        if origin_weibo_detail.has_key(mid):
            iter_dict.update(origin_weibo_detail[mid])
            iter_dict["type"] = 1
        elif retweeted_weibo_detail.has_key(mid):
            iter_dict.update(retweeted_weibo_detail[mid])
            iter_dict["type"] = 3
        else:
            iter_dict["retweeted"] = 0
            iter_dict["comment"] = 0
            print "mid in all_mid_list: ", mid in set(all_mid_list)

        #iter_dict["trendline"] = json.dumps(trendline_dict[mid])
        if duplicate_dict.has_key(mid):
            iter_dict["duplicate"] = duplicate_dict[mid]
        else:
            iter_dict["duplicate"] = ""

        #iter_dict["uid_prediction"] = uid_prediction_dict[mid]
        #iter_dict["weibo_prediction"] = weibo_prediction_dict[mid]
        iter_dict["compute_status"] = 0  # 尚未计算
        iter_dict["topic_field"] = mid_value[mid]
        iter_dict["detect_ts"] = ts
        #iter_dict["xnr_user_no"] = xnr_user_no

        iter_dict.update(all_text_dict[mid])
        count += 1
        #print 'iter_dict:::',iter_dict
        # _id = xnr_user_no + '_' + mid
        _id = mid
        bulk_action.extend([{"index":{"_id": _id}}, iter_dict])
        if count % 500 == 0:
            es_xnr.bulk(bulk_action, index="social_sensing_text", doc_type="text", timeout=600)
            bulk_action = []


    if bulk_action:
        es_xnr.bulk(bulk_action, index="social_sensing_text", doc_type="text", timeout=600)


    return "1"
コード例 #9
0
def social_sensing(task_detail):
    # 任务名, 传感器, 任务创建时间(感知时间的起点)
    
    task_name = task_detail[0]
    social_sensors = task_detail[1]
    ts = float(task_detail[2])

    print 'sensing_start_time:',ts2date(ts)
    index_list = ["flow_text_gangdu"]   # 被感知的数据库,后期根据情况修改
 

    # 前两天之内的原创、转发微博  list/retweeted (不包含当前一个小时)
    forward_origin_weibo_list, forward_1 = query_mid_list(ts-time_interval, social_sensors, forward_time_range)
    forward_retweeted_weibo_list, forward_3 = query_mid_list(ts-time_interval, social_sensors, forward_time_range, 3)
    
    # 前一个小时内原创、转发微博  list/retweeted
    current_origin_weibo_list, current_1 = query_mid_list(ts, social_sensors, time_interval)
    current_retweeted_weibo_list, current_3 = query_mid_list(ts, social_sensors, time_interval, 3)

    all_mid_list = []
    all_mid_list.extend(current_origin_weibo_list)
    all_mid_list.extend(current_retweeted_weibo_list)
    all_mid_list.extend(forward_origin_weibo_list)
    all_mid_list.extend(forward_retweeted_weibo_list)

    all_origin_list = []
    all_origin_list.extend(current_origin_weibo_list)
    all_origin_list.extend(forward_origin_weibo_list)
    all_origin_list = list(set(all_origin_list))

    all_retweeted_list = []
    all_retweeted_list.extend(current_retweeted_weibo_list)
    all_retweeted_list.extend(forward_retweeted_weibo_list)   #被转发微博的mid/root_mid
    all_retweeted_list = list(set(all_retweeted_list))


    all_mid_list = filter_mid(all_mid_list)
    all_origin_list = filter_mid(all_origin_list)
    all_retweeted_list = filter_mid(all_retweeted_list)

    print "all mid list: ", len(all_mid_list)
    print "all_origin_list", len(all_origin_list)
    print "all_retweeted_list", len(all_retweeted_list)


    # 查询微博在当前时间内的转发和评论数, 聚合按照message_type
    if all_origin_list:
        origin_weibo_detail = dict()
        for mid in all_origin_list:
            retweet_count = es_flow_text.count(index=index_list, doc_type="text", body={"query":{"bool":{"must":[{"term":{"root_mid": mid}}, {"term":{"message_type":3}}]}}})["count"]
            comment_count = es_flow_text.count(index=index_list, doc_type="text", body={"query":{"bool":{"must":[{"term":{"root_mid": mid}}, {"term":{"message_type":2}}]}}})["count"]
            tmp = dict()
            tmp["retweeted_stat"] = retweet_count
            tmp["comment_stat"] = comment_count
            origin_weibo_detail[mid] = tmp
    else:
        origin_weibo_detail = {}
    print "len(origin_weibo_detail): ", len(origin_weibo_detail)

    if all_retweeted_list:
        retweeted_weibo_detail = dict()
        for mid in all_retweeted_list:
            retweet_count = es_flow_text.count(index=index_list, doc_type="text", body={"query":{"bool":{"must":[{"term":{"root_mid": mid}}, {"term":{"message_type":3}}]}}})["count"]
            comment_count = es_flow_text.count(index=index_list, doc_type="text", body={"query":{"bool":{"must":[{"term":{"root_mid": mid}}, {"term":{"message_type":2}}]}}})["count"]
            tmp = dict()
            tmp["retweeted_stat"] = retweet_count
            tmp["comment_stat"] = comment_count
            retweeted_weibo_detail[mid] = tmp
    else:
        retweeted_weibo_detail = {}
    print "len(retweeted_weibo_detail): ", len(retweeted_weibo_detail)


    # 有事件发生时开始,查询所有的 all_mid_list, 一小时+两天
    if index_list and all_mid_list:
        query_body = {
            "query":{
                "filtered":{
                    "filter":{
                        "terms":{"mid": all_mid_list}
                    }
                }
            },
            "size": 5000
        }
        search_results = es_flow_text.search(index=index_list, doc_type="text", body=query_body)['hits']['hits']
        print "search mid len: ", len(search_results)


        all_text_dict = dict()          # 感知到的事, all_mid_list
        mid_value = dict()              # 文本赋值
        duplicate_dict = dict()         # 重合字典
        classify_text_dict = dict()     # 分类文本
        sensitive_words_dict = dict()    

        duplicate_text_list = []
        classify_mid_list = []
        
        if search_results:
            for item in search_results:
                iter_mid = item['_source']['mid']
                iter_text = item['_source']['text'].encode('utf-8', 'ignore')
                iter_sensitive = item['_source'].get('sensitive', 0)
                tmp_text = get_weibo(item['_source'])

                all_text_dict[iter_mid] = tmp_text
                duplicate_text_list.append({"_id":iter_mid, "title": "", "content":iter_text.decode("utf-8",'ignore')})

                if iter_sensitive:
                    sensitive_words_dict[iter_mid] = iter_sensitive

                keywords_dict = json.loads(item['_source']['keywords_dict'])
                personal_keywords_dict = dict()
                for k, v in keywords_dict.iteritems():
                    k = k.encode('utf-8', 'ignore')
                    personal_keywords_dict[k] = v

                classify_text_dict[iter_mid] = personal_keywords_dict
                classify_mid_list.append(iter_mid)

            # 去重
            print "start duplicate:",'----'
            if duplicate_text_list:
                dup_results = duplicate(duplicate_text_list)
                for item in dup_results:
                    if item['duplicate']:
                        duplicate_dict[item['_id']] = item['same_from']
            print '----', "duplicate finished:"

            # 分类
            print "start classify:",'----'
            mid_value = dict()
            if classify_text_dict:
                classify_results = topic_classfiy(classify_mid_list, classify_text_dict)

                for k,v in classify_results.iteritems(): # mid:value
                    mid_value[k]=v[0]
            print '----', "classify finished:"
                    
        mid_list = all_text_dict.keys()
        mid_duplicate_list = set(duplicate_dict.keys())|set(duplicate_dict.values())
        intersection_list = set(mid_list)-(set(duplicate_dict.keys())|set(duplicate_dict.values()))
        print "final mid:", len(mid_list)
        print "duplicate mid:", len(mid_duplicate_list)
        print "duplicate:", len(set(duplicate_dict.values()))
        print "single: ", len(intersection_list)

        # 将字典键值对倒过来
        reverse_duplicate_dict = defaultdict(list)
        for k,v in duplicate_dict.iteritems():
            reverse_duplicate_dict[v].append(k)

        for term in intersection_list:
            reverse_duplicate_dict[term] = [term]

        bulk_action = []
        count = 0
        for id in reverse_duplicate_dict.keys():    
            iter_dict = dict()

            inter_mid_list = []
            inter_mid_list.append(id)
            inter_mid_list.extend(reverse_duplicate_dict[id])


            # 计算发起者
            timestamp_list = []
            for mid in inter_mid_list:
                timestamp_list.append(all_text_dict[mid]['timestamp'])

            mid_initial = inter_mid_list[timestamp_list.index(min(timestamp_list))]


            # 计算推动者
            push_list = []
            for mid in inter_mid_list:
                if origin_weibo_detail.has_key(mid):
                    retweeted_stat = origin_weibo_detail[mid]['retweeted_stat']
                elif retweeted_weibo_detail.has_key(mid):
                    retweeted_stat = retweeted_weibo_detail[mid]
                else:
                    retweeted_stat = 0
                push_list.append(retweeted_stat)

            mid_push = inter_mid_list[push_list.index(max(push_list))]
            mid = mid_push

            if origin_weibo_detail.has_key(mid):
                iter_dict.update(origin_weibo_detail[mid])   #  update  函数把字典dict2的键/值对更新到dict里
                iter_dict["type"] = 1
            elif retweeted_weibo_detail.has_key(mid):
                iter_dict.update(retweeted_weibo_detail[mid])
                iter_dict["type"] = 0
            else:
                iter_dict["retweeted_stat"] = 0
                iter_dict["comment_stat"] = 0
                iter_dict["type"] = -1


            # iter_dict["name"] = ''      # 
            iter_dict["heat"] = iter_dict["retweeted_stat"] + iter_dict["comment_stat"]     
            iter_dict["status"] = 0      # 是否加入监测
            iter_dict["delete"] = 0      # 是否删除
            iter_dict["topic_field"] = eng2chi_dict[mid_value[mid]]    # 分类标签
            iter_dict["detect_ts"] = ts       # 感知开始时间
            iter_dict["initiator"] = all_text_dict[mid_initial]['uid']       # 发起者
            iter_dict["push"] = all_text_dict[mid_push]['uid']       # 发起者


            iter_dict.update(all_text_dict[mid])
            count += 1

            _id = mid
            bulk_action.extend([{"index":{"_id": _id}}, iter_dict])
            if count % 500 == 0:
                es_sensor.bulk(bulk_action, index=index_content_sensing, doc_type=type_content_sensing, timeout=600)
                bulk_action = []

        if bulk_action:
            es_sensor.bulk(bulk_action, index=index_content_sensing, doc_type=type_content_sensing)


    return "1"
コード例 #10
0
def compute_safe_num(xnr_user_no):
    if S_TYPE == 'test':
        current_time = datetime2ts(S_DATE) - DAY
    else:
        current_time = int(time.time()-DAY)

    current_date = ts2datetime(current_time)

    index_name = flow_text_index_name_pre + current_date

    # top100活跃用户平均发博数量
    query_body = {
        'query':{
            'match_all':{}
        },
        'sort':{'activeness':{'order':'desc'}},
        'size':TOP_ASSESSMENT_NUM
    }
    top_active_users = es_user_portrait.search(index=portrait_index_name,doc_type=portrait_index_type,\
                body=query_body)['hits']['hits']
    top_active_uid_list = []
    for user in top_active_users:
        user = user['_source']
        top_active_uid_list.append(user['uid'])

    query_body_count = {
        'query':{
            'filtered':{
                'filter':{
                    'terms':{'uid':top_active_uid_list}
                }
            }
        }
    }
    es_count_results = es_flow_text.count(index=index_name,doc_type=flow_text_index_type,body=query_body_count)

    if es_count_results['_shards']['successful'] != 0:
       tweets_count = es_count_results['count']
       tweets_top_avg = float(tweets_count)/TOP_ASSESSMENT_NUM
    else:
        print 'es index rank error'
        tweets_top_avg = 0

    # 当前虚拟人发博数量
    uid = xnr_user_no2uid(xnr_user_no)
    if S_TYPE == 'test':
        uid = ACTIVE_UID
    xnr_query_body_count = {
        'query':{
            'filtered':{
                'filter':{
                    'term':{'uid':uid}
                }
            }
        }
    }
    es_xnr_count_results = es_flow_text.count(index=index_name,doc_type=flow_text_index_type,body=xnr_query_body_count)

    if es_xnr_count_results['_shards']['successful'] != 0:
       xnr_tweets_count = es_xnr_count_results['count']

    else:
        print 'es index rank error'
        xnr_tweets_count = 0
    try:
        active_mark = float(xnr_tweets_count)/tweets_top_avg
    except:
        active_mark = 0

    ## 计算分数
    topic_distribute_dict = get_tweets_distribute(xnr_user_no)
    domain_distribute_dict = get_follow_group_distribute(xnr_user_no)

    topic_mark = topic_distribute_dict['mark']
    domain_mark = domain_distribute_dict['mark']
    print 'active_mark::',active_mark
    print 'topic_mark:::',topic_mark
    print 'domain_mark::',domain_mark

    safe_mark = float(active_mark+topic_mark+domain_mark)/3
    safe_mark = round(safe_mark*100,2)
    return safe_mark