Esempio n. 1
0
def parseWebsite():
    log = open('../logs/parseweb.log', 'a')
    # 每次取5000条
    count = TempLz.select().count()
    pagesize = 5000
    pagecount = int(math.ceil(float(count) / float(pagesize)))
    for i in range(pagecount):
        datas = TempLz.select().where(TempLz.id>18879).order_by(TempLz.id).paginate(i + 1, pagesize)
        if datas is not None:
            for d in datas:
                data = d.lzpage
                if data is not None:
                    parseData = parserCompanyAndWeb(data)
                    com = buildCompany(parseData['company'])
                    web = buildWebsite(parseData['web'])
                    if com is not None and web is not None:
                        c = Company.getOne(Company.coname == com.coname)
                        if c is not None:
                            web.regID = c
                            impWebsite(web)
                        else:
                            impCompanyInfo(com)
                            tempCom = Company.getOne(Company.regID == com.regID)
                            web.regID = tempCom
                            impWebsite(web)
                log.write(str(d.id)+ "\n")
                print d.id
    log.flush()
    log.close()
Esempio n. 2
0
def impData():
    while(parsePage()!=[]):
        companyData = parsePage()
        for data in companyData:
            #解析当前内容
            result = parser(data)
            com = buildCompany(result)
            impCompanyInfo(com)
    data = TempLz.select().where(TempLz.lzID=='20120323163520958')
    for d in data:
        result = parser(d.lzpage)
        for r in result:
            print r
Esempio n. 3
0
def fetchLzPage(isLzUrl, lzPath, shortUrl, subTask):
    # 根据亮照的完整url进行抓取
    status = downloadByPath(isLzUrl, lzPath)
    if not os.path.exists(lzPath):
        print "lzpath:",lzPath
        #logger.debug('亮照页面无法访问:', isLzUrl)
        #dt = format(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')
        # qw = Website.update(updateDate=dt).where(Website.webId == subTask.webId.webId)
        # qw.execute()
        q = TaskInfo.update(state='5', remark=isLzUrl).where(TaskInfo.id == subTask.id)
        q.execute()
    else:
        try:
            f = open(lzPath, 'r')
            parseData = parserCompanyAndWeb(f.read())
            com = buildCompany(parseData['company'])
            tempBuildWeb = buildWebsite(parseData['web'])
            judgeLzResult(com, tempBuildWeb, shortUrl, subTask)
        except Exception, e:
            print e
Esempio n. 4
0
def parsePage():
    # 将已经解析过的亮照id写入impdbdata.log
    # 读取impdbdata.log

    log = None
    try:
        # 直到所有页面抓取完毕为止
        while (True):
            f = open('../logs/impdbdata.log', 'r')
            lines = f.readlines()
            arrs = []
            for l in lines:
                arrs.append(l)
            f.close()

            log = open('../logs/impdbdata.log', 'a')
            # 获取此文件中的所有regID
            if arrs != []:
                lzs = TempLz.select().where(
                        (TempLz.lzpage != '')
                        & (TempLz.lzID.not_in(arrs))
                )
                print 'result count:', TempLz.select().where(
                        (TempLz.lzpage != '')
                        & (TempLz.lzID.not_in(arrs))).count()

            else:
                lzs = TempLz.select().where(
                        (TempLz.lzpage != '')
                )
            i = 0
            for lz in lzs:
                i += 1
                result = parser(lz.lzpage)
                com = buildCompany(result)
                # 构建公司信息
                impCompanyInfo(com)
                log.write(lz.lzID + '\n')
                log.flush()
    except Exception, e:
        print e