Ejemplo n.º 1
0
def friend_word(keyword):
	keyword = toUnicode(keyword)
	word = toUnicode(keyword[1:len(keyword)])
	result = None
	title = "없음"

	friend_word = FriendWord()
	head = ""
	if keyword.startswith("$"):             # shop=>food
		result = friend_word.shop2food(toUnicode(word),15)
		title = "'"+word+"' 와 같이 이야기된 음식"
		head = "@"        
	elif keyword.startswith("@"):       # food=>shop
		result = friend_word.food2shop(toUnicode(word),15)
		title = "'"+word+"' 의 음식과 많이 이야기된 가게"
		head = "$"
	elif keyword.startswith("+"):       # local=>shop
		result = friend_word.local2shop(toUnicode(word),15)
		title = "'"+word+"' 의 지역과 같이 이야기된  가게"
		head = "$"
	else:
		result = ["결과가없음"]
		head = ""
		
	result=[] if not result else result
	
	
	#트윗결과
	twitterList = []
	for row  in DaumTwitter.search(word, 20):
		twitterList.append(row)

	
	return render_template('friend_word.html', title=title,result=result, twitterList=twitterList, head=head,word=word)
Ejemplo n.º 2
0
 def load(self):
     self._source_dict.clear()
     
     print "[BOOT] START"
     print "[STEP1] delemeter loading..."
     self._token_delemeter = {toUnicode(key):True  for key in fileToData(self.__file_path__("dict_data",self["dict.delemeter"]))}
     print "[STEP1] delemeter end...(count=%d)\n" % len(self._token_delemeter)
     
     print "[STEP2] stop_dict loading..."
     self._stop_dict = {to_index_key(key, self._token_delemeter):True for key in fileToData(self.__file_path__("dict_data",self["dict.stop"]))}
     print "[STEP2] stop_dict end...(count=%d)\n" % len(self._stop_dict)
     
     print "[STEP3] title_dict loading..."
     def create_title_dict(loop):     
         ret = {}   
         for line in loop:
             token = line.strip().split("\t")
             for word in token[1:]:
                 ret[word] = token[0].strip()
         return ret
     self._title_dict = create_title_dict(fileToData(self.__file_path__("dict_data",self["dict.title"])))
     print "[STEP3] title_dict end...(count=%d)\n" % len(self._title_dict)
      
     print "[STEP4] use_dict loading..."
     for use_conf in self["dict.use"]:
         print ">>> " + use_conf["path"] 
         load_use_dict = BitmaskDict(use_conf["bit"],self._source_dict)
         for line in fileToData(self.__file_path__("dict_data",use_conf["path"])):
             load_use_dict[ to_index_key(line, self._token_delemeter)] = None
         print ">>> load count = %d\n" % len(load_use_dict)
     print "[STEP4] use_dict end... (total count=%d)\n" % len(self._source_dict)        
     print "[BOOT] END"
Ejemplo n.º 3
0
def getShop2Shop(shop):
	friendword = FriendWord()
	
	shopWordList = []
	for results in friendword.shop2shop(toUnicode(shop), 10):
		word, cnt = results
		shopWordList.append(word)

	return shopWordList
Ejemplo n.º 4
0
def getShop2Food(shop):
	friendword = FriendWord()
	
	foodWordList = []
	for results in friendword.shop2food(toUnicode(shop), 10):
		word, cnt = results
		foodWordList.append(word)

	return foodWordList
Ejemplo n.º 5
0
def getFood2Food(food):
	friendword = FriendWord()
	
	foodWordList = []
	for results in friendword.food2food(toUnicode(food), 10):
		word, cnt = results
		foodWordList.append(word)

	return foodWordList
Ejemplo n.º 6
0
def getFood2Shop(food):
	friendword = FriendWord()
	
	shopWordList = []
	for results in friendword.food2shop(toUnicode(food), 10):
		word, cnt = results
		shopWordList.append(word)
   
	return shopWordList
Ejemplo n.º 7
0
 def to_index_key(self,value):
     if not value:
         return None
     
     value = toUnicode(value)
     ret = ""
     for ch in value:
         if not self._token_delemeter.has_key(ch) and len(ch.strip())>0:
             ret += ch.lower()
          
     return ret
Ejemplo n.º 8
0
 def split(self, text):
     text = toUnicode(text)
     if not text: 
         yield ""
         return
      
     for first_word in text.split():
         if Tokenizer.isUrlPattern(first_word) or Tokenizer.isTwitterId(first_word):
             continue
         
         temp = ""
         for ch in first_word:
             if self.has_delemter(ch):  # 토큰으로 리턴될값.
                 if len(temp) > 0:
                     yield temp
                     temp = ""
             else:
                 temp += ch    
         if len(temp) > 0: #마지막 토큰을 의미한다
             yield temp     
Ejemplo n.º 9
0
    def search(keyword, limit=30):
        keyword = toUnicode(keyword)
        keyword = keyword.replace(" ", "+")
        find_url = DaumTwitter.seed_url.replace("{{kwd}}", keyword).replace("{{limit}}",str(limit))

        req = urllib2.Request(find_url)
        req.add_header('Cookie', 'uvkey=VCE4ytPUJWwAABIVgg4AAADE') #auction check d
        handle = urllib2.urlopen(req, timeout=5)
        json_data = handle.read()

#        response = urllib2.urlopen(find_url)
#        json_data = response.read()
        no_filter = json.loads(json_data)
        no_filter = no_filter[u'RESULT'][u'SOCIALWEB_BOARD'][u'r'][u'ds']
        if not u'data' in no_filter:
            return []
        else:
            no_filter = no_filter[u'data']
            
        
        return map(lambda x: {"text":str(x[u'text'].replace("<b>","").replace("</b>","")), "doc_id":str(x[u'docid']), "pub_date":str(x[u'pub_date']), "doc_url":str(x[u'doc_url']),"user_name":str(x[u'user_name']), "thumbnail_image":str(x[u'thumbnail_image']) }, no_filter) 
Ejemplo n.º 10
0
    mongo = RelationFoods(table_name="wagle2")

    for row in generator_database(1, 267526):
        if not row:
            continue

        food_set = Set()
        shop_set = Set()
        local_set = Set()

        for word in tokenizer(row["title"] + " " + row["contents"]):
            output_word = chain_filter(word.index, titleFilter, myWordFilter)
            if not output_word:
                continue

            find_key = shared.to_index_key(toUnicode(output_word))
            if find_key in food_dict:
                food_set.add(find_key)
            if find_key in shop_dict:
                shop_set.add(find_key)
            if find_key in local_dict:
                local_set.add(find_key)

        if len(food_set) + len(shop_set) + len(local_set) > 0:
            #             print row["docid"], row["regdate"], [toStr(item) for item in food_set], [toStr(item) for item in shop_set], [toStr(item) for item in local_set]
            #         else:
            #             print "else"
            mongo.add(row["docid"], row["regdate"],
                      [toStr(item) for item in food_set],
                      [toStr(item) for item in shop_set],
                      [toStr(item) for item in local_set])
Ejemplo n.º 11
0
    #mongo = RelationFoods(table_name="wagle")
    mongo = RelationFoods(table_name="wagle2")
    
    for row in generator_database(1,267526):
        if not row:
            continue
             
        food_set = Set()
        shop_set = Set()
        local_set = Set()
            
        for word in tokenizer(row["title"] +" " + row["contents"]):
            output_word =  chain_filter(word.index, titleFilter, myWordFilter)
            if not output_word:
                continue
            
            find_key = shared.to_index_key(toUnicode(output_word))
            if find_key in food_dict:
                food_set.add(find_key)
            if find_key in shop_dict:
                shop_set.add(find_key)
            if find_key in local_dict:
                local_set.add(find_key)
        
        if len(food_set) + len(shop_set) + len(local_set) > 0:
#             print row["docid"], row["regdate"], [toStr(item) for item in food_set], [toStr(item) for item in shop_set], [toStr(item) for item in local_set]
#         else:
#             print "else"
            mongo.add(row["docid"], row["regdate"], [toStr(item) for item in food_set], [toStr(item) for item in shop_set], [toStr(item) for item in local_set])

        
Ejemplo n.º 12
0
 def __init__(self, delemeters={}):
     self._delemeters = {toUnicode(key): True for key in delemeters}
     self.use_dict = {}
     self.tail_dict = {}
     self.head_dict = {}
Ejemplo n.º 13
0
 def __init__(self, delemeters={}):
     self._delemeters = {toUnicode(key):True for key in delemeters}
     self.use_dict  = {}
     self.tail_dict = {}
     self.head_dict = {}
Ejemplo n.º 14
0
#     print "~~~~~~~~~"
#     for words in wagle_group(["안녕하세요. 저는 오빠닭에서 갈릭치킨을 먹고 싶어요. 치킨 치킨 그것은 운명 ","치킨"]):
#         word, word_cnt, doc_cnt = words
#         
#          
#         print word, word_cnt, doc_cnt
#         print "food" , isFood(word)
#         print "local", isLocal(word)
#         print "shop", isShop(word)
#         print ""
        
    """
        2. 연관단어 검색 현재 지역데이터는 안뽑혔는지 안나옴. 
    """
    friendword = FriendWord()
    for results in friendword.food2shop(toUnicode("파스타"), 10):
# #     for results in friendword.shop2shop(toUnicode("피자헛"), 10):    
    #for results in friendword.shop2food(toUnicode("제인스 피키 피자"), 10):
# #     for results in friendword.food2food(toUnicode("치킨"), 10):÷
        word , cnt = results
        print "단어: " , word, " , 갯수:", cnt

        
    """
        3.다음  트위터 검색결과를 의미한다. 
        하나의 단어를 검색하는 메소드와 여러개의 메소드를 검색하는 메소드 두개가 있다.
        pub_date
        user_name
        doc_id
        doc_url
        thumbnail_image