Example #1
0
def find_mathID_detail(match_id, user_id, my_object):
    '根据提供的match_id,返回的数据为两个对象,一个是用户列表,一个是这次战斗中的用户战斗数据'

    serverName = r'网通三'
    playerName = user_id

    # test='{0},{1},{2}'.format(match_id,urllib.quote(str(serverName)),urllib.quote(str(playerName)))
    # print test
    battle_url = r'http://lolbox.duowan.com/matchList/ajaxMatchDetail2.php?matchId={0}&serverName={1}&playerName={2}&favorate=0'.format(
        match_id, urllib.quote(str(serverName)), urllib.quote(str(playerName)))

    find_users = []
    re = read_lol_dat.http_header(battle_url)
    html = urllib2.urlopen(re).read()

    soup_html = read_lol_dat.BeautifulSoup(html, "html.parser")

    detail_dat = battle_detail_parse(soup_html, my_object)

    if len(detail_dat) != 0:
        for item in detail_dat:
            find_users.append(item[0])
            #更新数据时候,同时更新统计数据
            if my_object.global_users_dat_count.has_key(item[0]):
                if my_object.global_users_dat_count[item[0]] > 20:
                    break
                else:
                    my_object.global_users_dat_count[item[0]] += 1
            else:
                my_object.global_users_dat_count[item[0]] = 1
    else:
        return find_users, detail_dat
    return find_users, detail_dat
Example #2
0
def find_mathID_detail(match_id,user_id,my_object):
    '根据提供的match_id,返回的数据为两个对象,一个是用户列表,一个是这次战斗中的用户战斗数据'
    
    serverName=r'网通三'
    playerName=user_id
    
    # test='{0},{1},{2}'.format(match_id,urllib.quote(str(serverName)),urllib.quote(str(playerName)))
    # print test
    battle_url=r'http://lolbox.duowan.com/matchList/ajaxMatchDetail2.php?matchId={0}&serverName={1}&playerName={2}&favorate=0'.format(match_id,urllib.quote(str(serverName)),urllib.quote(str(playerName)))
    
    find_users=[]
    re=read_lol_dat.http_header(battle_url)
    html=urllib2.urlopen(re).read()
    
    soup_html=read_lol_dat.BeautifulSoup(html,"html.parser")
    
    detail_dat=battle_detail_parse(soup_html,my_object)
    
    
    if len(detail_dat)!=0:
         for item in detail_dat:
             find_users.append(item[0])
             #更新数据时候,同时更新统计数据
             if my_object.global_users_dat_count.has_key(item[0]):
                 if my_object.global_users_dat_count[item[0]]>20:
                      break
                 else:
                     my_object.global_users_dat_count[item[0]]+=1
             else:
                  my_object.global_users_dat_count[item[0]]=1             
    else:
        return find_users,detail_dat
    return find_users,detail_dat
Example #3
0
def find_user_matchIDs(username):
    'search the target user latest 3page martch id'
    matchid = []
    serverName = r'网通三'
    playerName = username

    matchId_by_name_url = r'http://lolbox.duowan.com/matchList.php?serverName={0}&playerName={1}'.format(
        serverName, urllib.quote(str(playerName)))

    # matchId_by_name_url=r'http://lolbox.duowan.com/matchList.php?serverName=%E7%BD%91%E9%80%9A%E4%B8%89&playerName=%E4%B8%89%E7%BA%B7%E7%BB%A3%E6%B0%94'
    re = read_lol_dat.http_header(matchId_by_name_url)
    html = urllib2.urlopen(re).read()

    soup_html = read_lol_dat.BeautifulSoup(html, "html.parser")

    page_nnnumber = int(get_page_limit(soup_html))

    t = find_match_id(soup_html)

    matchid.extend(t)  #page_nnnumber默认从0开始,记录数据,避免后续重复查询
    # print '第%s页有%s条数据,当前一共%s数据'%(1,len(t),len(matchid))

    if page_nnnumber <= 2:
        logging.DEBUG('%s 用户数据过少,不予统计' % (username))
        return []

    else:
        for n_page in range(1, 4):
            matchId_by_name_url = r'http://lolbox.duowan.com/matchList.php?serverName=%s&playerName=%s&page=%s' % (
                serverName, playerName, str(n_page + 1))
            re = read_lol_dat.http_header(matchId_by_name_url)
            html = urllib2.urlopen(re).read()
            soup_html = read_lol_dat.BeautifulSoup(html, "html.parser")
            temp = find_match_id(soup_html)

            matchid.extend(temp)
            # print '第%s页有%s条数据,当前一共%s数据'%(n_page+1,len(temp),len(matchid))
            if len(matchid) > 15:
                break

    return matchid
Example #4
0
def find_user_matchIDs(username):
    'search the target user latest 3page martch id'
    matchid=[]
    serverName=r'网通三'
    playerName=username
    

    matchId_by_name_url=r'http://lolbox.duowan.com/matchList.php?serverName={0}&playerName={1}'.format(serverName,urllib.quote(str(playerName)))
    
    # matchId_by_name_url=r'http://lolbox.duowan.com/matchList.php?serverName=%E7%BD%91%E9%80%9A%E4%B8%89&playerName=%E4%B8%89%E7%BA%B7%E7%BB%A3%E6%B0%94'
    re=read_lol_dat.http_header(matchId_by_name_url)
    html=urllib2.urlopen(re).read()

    soup_html=read_lol_dat.BeautifulSoup(html,"html.parser")

    page_nnnumber=int(get_page_limit(soup_html))
    
    t=find_match_id(soup_html)

    matchid.extend(t)#page_nnnumber默认从0开始,记录数据,避免后续重复查询
    # print '第%s页有%s条数据,当前一共%s数据'%(1,len(t),len(matchid))
    
    if page_nnnumber<=2:
        logging.DEBUG('%s 用户数据过少,不予统计'%(username))
        return []
    
    else:
        for n_page in range(1,4):
            matchId_by_name_url=r'http://lolbox.duowan.com/matchList.php?serverName=%s&playerName=%s&page=%s'%(serverName,playerName,str(n_page+1))
            re=read_lol_dat.http_header(matchId_by_name_url)
            html=urllib2.urlopen(re).read()
            soup_html=read_lol_dat.BeautifulSoup(html,"html.parser")
            temp=find_match_id(soup_html)
            
            matchid.extend(temp)
            # print '第%s页有%s条数据,当前一共%s数据'%(n_page+1,len(temp),len(matchid))
            if len(matchid)>15:
                break
    
    return matchid