def get_sensitive_user_detail(uid_list, date, sensitive): results = [] index_name = str(date).replace('-','') # index_name:20130901 user_bci_results = es_cluster.mget(index=index_name, doc_type='bci', body={'ids':uid_list}, _source=True)['docs'] user_profile_results = es_user_profile.mget(index="weibo_user", doc_type="user", body={"ids":uid_list}, _source=True)['docs'] for i in range(0, len(uid_list)): personal_info = ['']*6 uid = uid_list[i] personal_info[0] = uid_list[i] if user_profile_results[i]['found']: profile_dict = user_profile_results[i]['_source'] personal_info[1] = profile_dict['nick_name'] personal_info[2] = profile_dict['user_location'] personal_info[3] = profile_dict['fansnum'] personal_info[4] = profile_dict['statusnum'] if user_bci_results[i]['found']: personal_info[5] = user_bci_results[i]['_source'].get('user_index', 0) else: personal_info[5] = 0 if sensitive: sensitive_words = r_cluster.hget('sensitive_' + index_name, str(uid)) if sensitive_words: sensitive_dict = json.loads(sensitive_words) personal_info.append(sensitive_dict.keys()) else: personal_info.append([]) results.append(personal_info) return results
def get_sensitive_user_detail(uid_list, date, sensitive): es_cluster = es_user_profile ts = datetime2ts(date) results = [] index_name = pre_influence_index + str(date).replace( '-', '') # index_name:20130901 user_bci_results = es_bci.mget(index=index_name, doc_type='bci', body={'ids': uid_list}, _source=False, fields=['user_index'])['docs'] user_profile_results = es_user_profile.mget(index="weibo_user", doc_type="user", body={"ids": uid_list}, _source=True)['docs'] top_influnce_value = get_top_value("user_index", es_bci, index_name, "bci") for i in range(0, len(uid_list)): personal_info = [''] * 6 uid = uid_list[i] personal_info[0] = uid_list[i] personal_info[1] = uid_list[i] if user_profile_results[i]['found']: profile_dict = user_profile_results[i]['_source'] uname = profile_dict['nick_name'] if uname: personal_info[1] = uname personal_info[2] = profile_dict['user_location'] personal_info[3] = profile_dict['fansnum'] personal_info[4] = profile_dict['statusnum'] if user_bci_results[i]['found']: try: tmp_bci = user_bci_results[i]['fields']['user_index'][0] influence = math.log( tmp_bci / float(top_influnce_value) * 9 + 1, 10) * 100 personal_info[5] = influence except: personal_info[5] = 0 else: personal_info[5] = 0 if sensitive: sensitive_words = redis_cluster.hget('sensitive_' + str(ts), str(uid)) if sensitive_words: sensitive_dict = json.loads(sensitive_words) personal_info.append(sensitive_dict.keys()) else: personal_info.append([]) else: personal_info.append([]) results.append(personal_info) return results
def get_sensitive_user_detail(uid_list, date, sensitive): es_cluster = es_user_profile ts = datetime2ts(date) results = [] index_name = pre_influence_index + str(date).replace('-','') # index_name:20130901 user_bci_results = es_bci.mget(index=index_name, doc_type='bci', body={'ids':uid_list}, _source=False, fields=['user_index'])['docs'] user_profile_results = es_user_profile.mget(index="weibo_user", doc_type="user", body={"ids":uid_list}, _source=True)['docs'] top_influnce_value = get_top_value("user_index", es_bci, index_name, "bci") for i in range(0, len(uid_list)): personal_info = ['']*6 uid = uid_list[i] personal_info[0] = uid_list[i] personal_info[1] = uid_list[i] if user_profile_results[i]['found']: profile_dict = user_profile_results[i]['_source'] uname = profile_dict['nick_name'] if uname: personal_info[1] = uname personal_info[2] = profile_dict['user_location'] personal_info[3] = profile_dict['fansnum'] personal_info[4] = profile_dict['statusnum'] if user_bci_results[i]['found']: try: tmp_bci = user_bci_results[i]['fields']['user_index'][0] influence = math.log(tmp_bci/float(top_influnce_value)*9+1, 10)*100 personal_info[5] = influence except: personal_info[5] = 0 else: personal_info[5] = 0 if sensitive: sensitive_words = redis_cluster.hget('sensitive_' + str(ts), str(uid)) if sensitive_words: sensitive_dict = json.loads(sensitive_words) personal_info.append(sensitive_dict.keys()) else: personal_info.append([]) else: personal_info.append([]) results.append(personal_info) return results
def get_sensitive_user_detail(uid_list, date, sensitive): results = [] index_name = str(date).replace('-', '') # index_name:20130901 user_bci_results = es_cluster.mget(index=index_name, doc_type='bci', body={'ids': uid_list}, _source=True)['docs'] user_profile_results = es_user_profile.mget(index="weibo_user", doc_type="user", body={"ids": uid_list}, _source=True)['docs'] for i in range(0, len(uid_list)): personal_info = [''] * 6 uid = uid_list[i] personal_info[0] = uid_list[i] if user_profile_results[i]['found']: profile_dict = user_profile_results[i]['_source'] personal_info[1] = profile_dict['nick_name'] personal_info[2] = profile_dict['user_location'] personal_info[3] = profile_dict['fansnum'] personal_info[4] = profile_dict['statusnum'] if user_bci_results[i]['found']: personal_info[5] = user_bci_results[i]['_source'].get( 'user_index', 0) else: personal_info[5] = 0 if sensitive: sensitive_words = r_cluster.hget('sensitive_' + index_name, str(uid)) if sensitive_words: sensitive_dict = json.loads(sensitive_words) personal_info.append(sensitive_dict.keys()) else: personal_info.append([]) results.append(personal_info) return results
def full_text_search(keywords, uid, start_time, end_time, size): results = [] uid_list = [] user_profile_list = [] query_body = { "query": { "filtered":{ "filter":{ "bool": { "must": [] } } } }, "size":size, "sort":{"timestamp":{"order": 'desc'}} } if RUN_TYPE: query_body["sort"] = {"user_fansnum":{"order": 'desc'}} if uid: query_body["query"]["filtered"]["filter"]["bool"]["must"].append({"term":{"uid":uid}}) if keywords: keywords_list = keywords.split(',') for word in keywords_list: query_body["query"]["filtered"]["filter"]["bool"]["must"].append({'wildcard':{'text':{'wildcard':'*'+word+'*'}}}) index_list = [] exist_bool = es_flow_text.indices.exists(index="flow_text_"+end_time) if start_time: start_ts = datetime2ts(start_time) end_ts = datetime2ts(end_time) ts = end_ts while 1: index_name = "flow_text_"+ts2datetime(ts) exist_bool = es_flow_text.indices.exists(index=index_name) if exist_bool: index_list.append(index_name) if ts == start_ts: break else: ts -= 3600*24 print index_list # 没有可行的es if not index_list: return [] search_results = es_flow_text.search(index=index_list, doc_type="text", body=query_body)["hits"]["hits"] for item in search_results: uid_list.append(item['_source']['uid']) history_max = get_history_max() personal_field = ["nick_name", "fansnum", "statusnum","user_location"] user_info = get_user_profile(uid_list, personal_field) bci_results = ES_CLUSTER_FLOW1.mget(index="bci_history", doc_type="bci", body={"ids":uid_list}, _source=False, fields=["bci_day_last"])["docs"] sensitive_results = es_sensitive_history.mget(index="sensitive_history", doc_type="sensitive", body={"ids":uid_list}, _source=False, fields=["last_value"])["docs"] count = 0 for item in search_results: item = item['_source'] uid_list.append(item['uid']) iter_item = [] iter_item.append(item['uid']) iter_item.append(user_info[count][1]) iter_item.append(item['text']) iter_item.append(ts2date(item['timestamp'])) iter_item.append(item['geo']) if item.get("sensitive_words_string", ''): iter_item.append(item['sensitive_words_string'].split('&')) else: iter_item.append([]) iter_item.append(item.get('retweeted', 0)) iter_item.append(item.get('comment', 0)) count += 1 results.append(iter_item) user_set = set() count = 0 for item in user_info: if item[0] in user_set: continue else: user_set.add(item[0]) if bci_results[count]["found"]: bci_value = bci_results[count]["fields"]["bci_day_last"][0] item.append(normalize_index(bci_value, history_max["max_bci"])) else: item.append(0) if sensitive_results[count]["found"]: sensitive_value = sensitive_results[count]['fields']['last_value'][0] item.append(normalize_index(sensitive_value, history_max["max_sensitive"])) else: item.append(0) user_profile_list.append(item) return results, user_profile_list
def influenced_user_detail(uid, date, origin_retweeted_mid, retweeted_retweeted_mid, message_type, default_number=20): query_body = { "query":{ "filtered":{ "filter":{ "bool":{ "must": [ ] } } } }, "size":20000, } if RUN_TYPE == 1: query_body["sort"] = {"user_fansnum":{"order":"desc"}} #详细影响到的人 date1 = str(date).replace('-', '') index_name = pre_index + date1 index_flow_text = pre_text_index + date origin_retweeted_uid = [] # influenced user uid_list retweeted_retweeted_uid = [] origin_comment_uid = [] retweeted_comment_uid = [] query_origin = copy.deepcopy(query_body) query_retweeted = copy.deepcopy(query_body) if origin_retweeted_mid: # 所有转发该条原创微博的用户 query_origin["query"]["filtered"]["filter"]["bool"]["must"].append({"terms": {"root_mid": origin_retweeted_mid}}) query_origin["query"]["filtered"]["filter"]["bool"]["must"].extend([{"term":{"message_type": message_type}}, {"term":{"root_uid": uid}}]) origin_retweeted_result = es.search(index=index_flow_text, doc_type=flow_text_index_type, body=query_origin, fields=["uid"])["hits"]["hits"] if origin_retweeted_result: for item in origin_retweeted_result: origin_retweeted_uid.append(item["fields"]["uid"][0]) if retweeted_retweeted_mid: # 所有评论该条原创微博的用户 query_retweeted["query"]["filtered"]["filter"]["bool"]["must"].append({"terms": {"root_mid": retweeted_retweeted_mid}}) query_retweeted["query"]["filtered"]["filter"]["bool"]["must"].extend([{"term":{"message_type": message_type}},{"term": {"directed_uid": uid}}]) retweeted_retweeted_result = es.search(index=index_flow_text, doc_type=flow_text_index_type, body=query_retweeted, fields=["uid"])["hits"]["hits"] if retweeted_retweeted_result: for item in retweeted_retweeted_result: retweeted_retweeted_uid.append(item["fields"]["uid"][0]) retweeted_uid_list = [] # all retweeted user list retweeted_results = {} # statistics of all retweeted uid information retweeted_domain = {} retweeted_topic = {} retweeted_geo = {} bci_results = {} in_portrait = [] out_portrait = [] average_influence = 0 total_influence = 0 count = 0 all_uid_set = set(origin_retweeted_uid) | set(retweeted_retweeted_uid) retweeted_uid_list.extend(origin_retweeted_uid) retweeted_uid_list.extend(retweeted_retweeted_uid) retweeted_uid_list = list(set(retweeted_uid_list) - set([uid])) # filter uids if retweeted_uid_list: user_portrait_result = es_user_portrait.mget(index=user_portrait, doc_type=portrait_index_type, body={"ids": retweeted_uid_list}, fields=["domain", "topic_string", "activity_geo_dict","importance", "influence"])["docs"] bci_index = "bci_" + date.replace('-', '') bci_results = es_cluster.mget(index=bci_index, doc_type="bci", body={"ids":retweeted_uid_list}, fields=['user_index'])["docs"] for item in user_portrait_result: if item["found"]: temp = [] count += 1 temp.append(item['_id']) temp.append(item["fields"]["importance"][0]) in_portrait.append(temp) temp_domain = item["fields"]["domain"][0].split('&') temp_topic = item["fields"]["topic_string"][0].split('&') temp_geo = json.loads(item["fields"]["activity_geo_dict"][0])[-1].keys() #total_influence += item["fields"]["influence"][0] retweeted_domain = aggregation(temp_domain, retweeted_domain) retweeted_topic = aggregation(temp_topic, retweeted_topic) retweeted_geo = aggregation(temp_geo, retweeted_geo) else: out_portrait.append(item['_id']) retweeted_domain = proportion(retweeted_domain) retweeted_topic = proportion(retweeted_topic) retweeted_geo = proportion(retweeted_geo) if bci_results: total_influence = 0 for item in bci_results: if item['found']: total_influence += item['fields']['user_index'][0] try: average_influence = total_influence/len(retweeted_uid_list) except: average_influence = 0 sorted_retweeted_domain = sorted(retweeted_domain.items(),key=lambda x:x[1], reverse=True) sorted_retweeted_topic = sorted(retweeted_topic.items(),key=lambda x:x[1], reverse=True) sorted_retweeted_geo = sorted(retweeted_geo.items(), key=lambda x:x[1], reverse=True) retweeted_results["domian"] = sorted_retweeted_domain[:5] retweeted_results["topic"] = sorted_retweeted_topic[:5] retweeted_results["geo"] = sorted_retweeted_geo[:5] retweeted_results["influence"] = average_influence in_portrait = sorted(in_portrait, key=lambda x:x[1], reverse=True) temp_list = [] for item in in_portrait: temp_list.append(item[0]) retweeted_results['in_portrait_number'] = len(temp_list) retweeted_results['out_portrait_number'] = len(out_portrait) in_portrait_url = get_user_url(temp_list[:default_number]) out_portrait_url = get_user_url(out_portrait[:default_number]) retweeted_results["in_portrait"] = in_portrait_url retweeted_results["out_portrait"] = out_portrait_url retweeted_results["total_number"] = len(temp_list) + len(out_portrait) return retweeted_results
def influenced_people(uid, mid, influence_style, date, default_number=20): # uid # which weibo----mid, retweeted weibo ---seek for root_mid # influence_style: retweeted(0) or comment(1) date1 = ts2datetime(datetime2ts(date)).replace('-', '') index_name = pre_index + date1 index_flow_text = pre_text_index + date text_result = es.get(index=index_flow_text, doc_type=flow_text_index_type, id=mid)["_source"] temp_mid = text_result.get("root_mid",'') #判断微博是否是原创微博 if temp_mid: mid_type = 1 # 非原创微博 else: mid_type = 0 # 原创微博 query_body = { "query":{ "filtered":{ "filter":{ "bool":{ "must":[ ] } } } }, "size": 30000 } if RUN_TYPE: query_body["sort"] = {"user_fansnum":{"order":"desc"}} if int(mid_type) == 0: if int(influence_style) == 0: # origin weibo, all retweeted people query_body["query"]["filtered"]["filter"]["bool"]["must"].extend([{"term": {"root_uid": uid}}, {"term": {"message_type": 3}}, {"term": {"root_mid": mid}}]) else: # commented people query_body["query"]["filtered"]["filter"]["bool"]["must"].extend([{"term": {"directed_uid": uid}}, {"term": {"message_type": 2}}, {"term": {"root_mid": mid}}]) else: if int(influence_style) == 0: # origin weibo, all retweeted people query_body["query"]["filtered"]["filter"]["bool"]["must"].extend([{"term": {"directed_uid": uid}}, {"term": {"message_type": 3}}, {"term": {"root_mid": temp_mid}}]) else: # commented people query_body["query"]["filtered"]["filter"]["bool"]["must"].extend([{"term": {"directed_uid": uid}}, {"term": {"message_type": 2}}, {"term": {"root_mid": temp_mid}}]) search_results = es.search(index=index_flow_text, doc_type=flow_text_index_type, body=query_body, _source=False, fields=["uid"], timeout=30)["hits"]["hits"] results = [] # uid_list if search_results: for item in search_results: if int(item["fields"]["uid"][0]) == int(uid): pass else: results.append(item["fields"]["uid"][0]) results = list(set(results)) else: results = [] bci_index = "bci_" + date.replace('-','') if results: portrait_results = es_user_portrait.mget(index=user_portrait, doc_type=portrait_index_type, body={"ids": results}, fields=["domain", "topic_string", "activity_geo_dict","importance", "influence"])["docs"] bci_results = es_cluster.mget(index=bci_index, doc_type='bci', body={"ids":results}, fields=['user_index'])['docs'] else: portrait_results = {} bci_results = {} in_portrait = [] out_portrait = [] in_portrait_info = [] retweeted_domain = {} retweeted_topic = {} retweeted_geo = {} average_influence = 0 total_influence = 0 count = 0 if bci_results: total_influence = 0 for item in bci_results: if item['found']: total_influence += item['fields']['user_index'][0] try: average_influence = total_influence/len(results) except: average_influence = 0 if portrait_results: for item in portrait_results: if item["found"]: temp = [] count += 1 temp.append(item['_id']) temp.append(item["fields"]["importance"][0]) in_portrait.append(temp) temp_domain = item["fields"]["domain"][0].split('&') temp_topic = item["fields"]["topic_string"][0].split('&') temp_geo = json.loads(item["fields"]["activity_geo_dict"][0])[-1].keys() #total_influence += item["fields"]["influence"][0] retweeted_domain = aggregation(temp_domain, retweeted_domain) retweeted_topic = aggregation(temp_topic, retweeted_topic) retweeted_geo = aggregation(temp_geo, retweeted_geo) else: out_portrait.append(item['_id']) retweeted_domain = proportion(retweeted_domain) retweeted_topic = proportion(retweeted_topic) retweeted_geo = proportion(retweeted_geo) #try: # average_influence = total_influence/count #except: # average_influence = 0 sorted_retweeted_domain = sorted(retweeted_domain.items(),key=lambda x:x[1], reverse=True) sorted_retweeted_topic = sorted(retweeted_topic.items(),key=lambda x:x[1], reverse=True) sorted_retweeted_geo = sorted(retweeted_geo.items(), key=lambda x:x[1], reverse=True) retweeted_results = dict() retweeted_results["domian"] = sorted_retweeted_domain[:5] retweeted_results["topic"] = sorted_retweeted_topic[:5] retweeted_results["geo"] = sorted_retweeted_geo[:5] retweeted_results["influence"] = average_influence in_portrait = sorted(in_portrait, key=lambda x:x[1], reverse=True) temp_list = [] for item in in_portrait: temp_list.append(item[0]) retweeted_results['in_portrait_number'] = len(temp_list) retweeted_results['out_portrait_number'] = len(out_portrait) in_portrait_url = get_user_url(temp_list[:default_number]) out_portrait_url = get_user_url(out_portrait[:default_number]) return_results = dict() return_results["influence_users"] = [in_portrait_url, out_portrait_url] return_results["influence_distribution"] = retweeted_results return return_results
def full_text_search(keywords, uid, start_time, end_time, size): results = [] uid_list = [] user_profile_list = [] query_body = { "query": { "bool": { "must": [] } }, "size":size, "sort":{"timestamp":{"order": 'desc'}} } if RUN_TYPE: query_body["sort"] = {"user_fansnum":{"order": 'desc'}} if uid: query_body["query"]["bool"]["must"].append({"term":{"uid":uid}}) if keywords: keywords_list = keywords.split(',') for word in keywords_list: query_body["query"]["bool"]["must"].append({'wildcard':{'text':{'wildcard':'*'+word+'*'}}}) index_list = [] exist_bool = es_flow_text.indices.exists(index="flow_text_"+end_time) if start_time: start_ts = datetime2ts(start_time) end_ts = datetime2ts(end_time) ts = end_ts while 1: index_name = "flow_text_"+ts2datetime(ts) exist_bool = es_flow_text.indices.exists(index=index_name) if exist_bool: index_list.append(index_name) if ts == start_ts: break else: ts -= 3600*24 print index_list # 没有可行的es if not index_list: return [[], []] search_results = es_flow_text.search(index=index_list, doc_type="text", body=query_body)["hits"]["hits"] for item in search_results: uid_list.append(item['_source']['uid']) user_info = [] if uid_list: history_max = get_history_max() personal_field = ["nick_name", "fansnum", "statusnum","user_location"] user_info = get_user_profile(uid_list, personal_field) bci_results = ES_CLUSTER_FLOW1.mget(index="bci_history", doc_type="bci", body={"ids":uid_list}, _source=False, fields=["bci_day_last"])["docs"] in_portrait = es_user_portrait.mget(index="sensitive_user_portrait", doc_type="user", body={"ids":uid_list}, _source=False)["docs"] sensitive_results = es_sensitive_history.mget(index="sensitive_history", doc_type="sensitive", body={"ids":uid_list}, _source=False, fields=["last_value"])["docs"] print "len search: ", len(search_results) count = 0 # uid uname text date geo sensitive_words retweeted comment for item in search_results: item = item['_source'] uid_list.append(item['uid']) iter_item = [] iter_item.append(item['uid']) iter_item.append(user_info[count][1]) iter_item.append(item['text']) iter_item.append(ts2date(item['timestamp'])) iter_item.append(item['geo']) if item.get("sensitive_words_string", ''): iter_item.append(item['sensitive_words_string'].split('&')) else: iter_item.append([]) iter_item.append(item.get('retweeted', 0)) iter_item.append(item.get('comment', 0)) count += 1 results.append(iter_item) user_set = set() count = 0 # uid "nick_name", "fansnum", "statusnum","user_location", bci, sensitive for item in user_info: if item[0] in user_set: continue else: user_set.add(item[0]) if bci_results[count]["found"]: if bci_results[count].has_key("fields"): bci_value = bci_results[count]["fields"]["bci_day_last"][0] else: bci_value = 0 item.append(normalize_index(bci_value, history_max["max_bci"])) else: item.append(0) if sensitive_results[count]["found"]: if sensitive_results[count].has_key("fields"): sensitive_value = sensitive_results[count]['fields']['last_value'][0] else: sensitive_value = 0 item.append(normalize_index(sensitive_value, history_max["max_sensitive"])) else: item.append(0) if in_portrait[count]["found"]: item.append("1") else: item.append("0") user_profile_list.append(item) return results, user_profile_list