Example #1
0
File: main.py Project: xiaoge56/lol
def find_user_matchIDs(username):
    'search the target user latest 3page martch id'
    matchid=[]
    serverName=r'网通三'
    playerName=username
    

    matchId_by_name_url=r'http://lolbox.duowan.com/matchList.php?serverName=%s&playerName=%s'%(serverName,urllib.quote(playerName))
    re=spider.http_header(matchId_by_name_url)
    html=urllib2.urlopen(re).read()
    soup_html=spider.BeautifulSoup(html,"html.parser")
    page_nnnumber=int(spider.get_page_limit(soup_html))
    t=spider.find_match_id(soup_html)
    matchid.extend(t)#page_nnnumber默认从0开始,记录数据,避免后续重复查询
    # print '第%s页有%s条数据,当前一共%s数据'%(1,len(t),len(matchid))
    
    if page_nnnumber<=2:
        logging.DEBUG('%s 用户数据过少,不予统计'%(username))
        return []
    
    else:
        for n_page in range(1,4):
            matchId_by_name_url=r'http://lolbox.duowan.com/matchList.php?serverName=%s&playerName=%s&page=%s'%(serverName,playerName,str(n_page+1))
            re=spider.http_header(matchId_by_name_url)
            html=urllib2.urlopen(re).read()
            soup_html=spider.BeautifulSoup(html,"html.parser")
            temp=spider.find_match_id(soup_html)
            
            matchid.extend(temp)
            # print '第%s页有%s条数据,当前一共%s数据'%(n_page+1,len(temp),len(matchid))
            if len(matchid)>15:
                break
    return matchid
Example #2
0
def find_user_matchIDs(username):
    'search the target user latest 3page martch id'
    matchid = []
    serverName = r'网通三'
    playerName = username

    matchId_by_name_url = r'http://lolbox.duowan.com/matchList.php?serverName=%s&playerName=%s' % (
        serverName, urllib.quote(playerName))
    re = spider.http_header(matchId_by_name_url)
    html = urllib2.urlopen(re).read()
    soup_html = spider.BeautifulSoup(html, "html.parser")
    page_nnnumber = int(spider.get_page_limit(soup_html))
    t = spider.find_match_id(soup_html)
    matchid.extend(t)  #page_nnnumber默认从0开始,记录数据,避免后续重复查询
    # print '第%s页有%s条数据,当前一共%s数据'%(1,len(t),len(matchid))

    if page_nnnumber <= 2:
        logging.DEBUG('%s 用户数据过少,不予统计' % (username))
        return []

    else:
        for n_page in range(1, 4):
            matchId_by_name_url = r'http://lolbox.duowan.com/matchList.php?serverName=%s&playerName=%s&page=%s' % (
                serverName, playerName, str(n_page + 1))
            re = spider.http_header(matchId_by_name_url)
            html = urllib2.urlopen(re).read()
            soup_html = spider.BeautifulSoup(html, "html.parser")
            temp = spider.find_match_id(soup_html)

            matchid.extend(temp)
            # print '第%s页有%s条数据,当前一共%s数据'%(n_page+1,len(temp),len(matchid))
            if len(matchid) > 15:
                break
    return matchid
Example #3
0
File: main.py Project: xiaoge56/lol
def find_mathID_detail(matchId,user_id):
    'retrun find_users and detail_dat,the input match is the match id'
    serverName=r'网通三'
    playerName=user_id
    battle_url=r'http://lolbox.duowan.com/matchList/ajaxMatchDetail2.php?matchId=%s&serverName=%s&playerName=%s&favorate=0'%(matchId,urllib.quote(serverName),urllib.quote(playerName))
    find_users=[]
    re=spider.http_header(battle_url)
    html=urllib2.urlopen(re).read()
    soup_html=BeautifulSoup(html,"html.parser")
    detail_dat=spider.battle_detail_parse(soup_html)
    if len(detail_dat)!=0:
         for item in detail_dat:
             find_users.append(item[0])
    else:
        return find_users,detail_dat
    return find_users,detail_dat
Example #4
0
def find_mathID_detail(matchId, user_id):
    'retrun find_users and detail_dat,the input match is the match id'
    serverName = r'网通三'
    playerName = user_id
    battle_url = r'http://lolbox.duowan.com/matchList/ajaxMatchDetail2.php?matchId=%s&serverName=%s&playerName=%s&favorate=0' % (
        matchId, urllib.quote(serverName), urllib.quote(playerName))
    find_users = []
    re = spider.http_header(battle_url)
    html = urllib2.urlopen(re).read()
    soup_html = BeautifulSoup(html, "html.parser")
    detail_dat = spider.battle_detail_parse(soup_html)
    if len(detail_dat) != 0:
        for item in detail_dat:
            find_users.append(item[0])
    else:
        return find_users, detail_dat
    return find_users, detail_dat