コード例 #1
0
    def __VenueParser(self, url, cateName):
        existing = [x for x in self.listlink if url in x]
        self.listlink.append(url)
        if len(existing) > 0:
            self.countduplicate += 1
            print '[INFO] Duplicate count = ' + str(self.countduplicate)
            return
        try:
            print 'Scraping url: ' + url

            #url = 'http://www.uksecurity-directory.co.uk/the-directory/1905/ecpcco/'

            xmlDoc = Util.getRequestsXML(url,
                                         '//div[@class="gdl-page-content"]')
            xmlDoc = xmlDoc.xpath('//div[@class="gdl-page-content"]/div')[0]
            ven = Venue()

            imgs = []

            ven.category = cateName
            ven.scrape_page = url
            ven.country = self._language
            ven.name = xmlDoc.find('./div/h2').text
            ven.hqdb_featured_ad_type = 'none'
            isFeatured = xmlDoc.find('./div[@class="stickytag"]/img')
            if isFeatured != None:
                if isFeatured.get('title') == 'Featured Listing':
                    ven.hqdb_featured_ad_type = 'featured'
            divInfo = xmlDoc.xpath('./div[@class="listing-details cf"]/div')
            town_ = ''
            area_ = ''
            zipcode = ''
            listPhone_ = []
            for div__ in divInfo:
                label = div__.find('./label')
                if label != None:
                    label_ = label.text
                    if label_ == 'Business Website Address:':
                        website = div__.find('./span/a')
                        if website != None:
                            website = website.get('href')
                            isFacebook = website.find('facebook.com')
                            isTwiter = website.find('twiter.com')
                            if isFacebook == -1 and isTwiter == -1:
                                ven.business_website = website
                            else:
                                if isFacebook != -1:
                                    ven.facebook = website
                                if ven.twitter != -1:
                                    ven.twitter = website
                    if label_ == 'Security Services:':
                        serviceStr = div__.xpath('./span/a')
                        sers = []
                        for ser in serviceStr:
                            serv = Service()
                            serv.service = ser.text
                            sers.append(serv)
                        if len(sers) > 0:
                            ven.services = sers
                            ven.pricelist_link = [ven.scrape_page]
                    if label_ == 'Long Business Description:':
                        des = div__.find('./span')
                        if des != None:
                            des = ' '.join(des.itertext())
                            ven.description = des
                    if label_ == 'Business Phone Number:':
                        phone = div__.find('./span').text
                        #phone = self.formatPhone(phone)
                        findsplistPPhone = self.findSplitPhone(phone)
                        if findsplistPPhone == None:
                            listPhone_ = [phone]
                        #(ven.office_number,ven.office_number2,ven.mobile_number,ven.mobile_number2) = self.processPhones([phone])
                        else:

                            listPhone_ = phone.split(findsplistPPhone)
                        (ven.office_number, ven.office_number2,
                         ven.mobile_number,
                         ven.mobile_number2) = self.processPhones(listPhone_)

                    if label_ == 'Postcode:':
                        zipcode = div__.find('./span').text
                    if label_ == 'Town:':
                        town_ = div__.find('./span').text
                    if label_ == 'Area:':
                        area_ = div__.find('./span').text
                    zipcode = self.validateZipcode(zipcode)
            if url == 'http://www.uksecurity-directory.co.uk/the-directory/1981/s-comm-vehicle-surveillance-system':
                print
            if ven.office_number == 'NOT_GB' or ven.office_number2 == 'NOT_GB' or ven.mobile_number == 'NOT_GB' or ven.mobile_number2 == 'NOT_GB':
                return
            for p in listPhone_:
                if p == town_:
                    town_ = ''
                    break

            ven.zipcode = zipcode
            ven.formatted_address = ', '.join([area_, town_, zipcode])

            ven.formatted_address = self.refixFormatAddress(
                ven.formatted_address.replace('0000000', ''))
            extraImg = xmlDoc.xpath('./div[@class="extra-images"]//a/img')
            listingThumbnail = xmlDoc.xpath(
                './div[@class="listing-thumbnail"]//a/img')
            for thumb in listingThumbnail:
                imgs.append(thumb.get('src'))
            for img in extraImg:
                imgs.append(img.get('src'))
            if len(imgs) > 0:
                ven.img_link = imgs
            self.index = self.index + 1
            ven.writeToFile(self.folder, self.index, ven.name, False)

        except Exception, ex:
            print '[ERROR] ' + url + ': ' + str(ex)
コード例 #2
0
    def __VenueParser(self, link):
        #link ='https://www.meilleur-garagiste.com/annuaire/garage-la-couronne.464207.html'
        print 'Scrapping: ' + link
        existing = [x for x in self.link_venues if link in x]
        if len(existing) > 0:
            print 'Len existing : ' + str(len(existing))
            return None
        xmlBody = Util.getRequestsXML(link, '//div[@id="fiche-artisan"]')
        if xmlBody != None and len(xmlBody) > 0:
            ven = Venue()
            name_ = xmlBody.xpath('.//h1/parent::div')
            if len(name_) > 0:
                if name_ != None:
                    name_h1 = name_[0].find('./h1')
                    name_h2 = name_[0].find('.//h2')
                    if name_h2 != None:
                        ven.name = name_h2.text
                    else:
                        ven.name = name_h1.text

            else:
                return None
            xmldiv = xmlBody.find('.//div[@class="row nomargin"]/div')
            if xmldiv == None:
                return None
            span_ = xmldiv.xpath('./span')
            for i_ in span_:
                if i_.get('class') == 'street-address text-hide-mobile':
                    ven.street = i_.text
                    if ven.street != None:
                        #ven.street = self.validateStreet(ven.street).replace('43442491700012', '')
                        ven.street = self.validateStreet2(ven.street).replace(
                            '43442491700012', '')
                        if ven.street.strip() == '.':
                            ven.street = None
                if i_.get('class') == 'postal-code':
                    ven.zipcode = i_.text
                    ven.zipcode = self.validateZipcode(ven.zipcode)
                if i_.get('class') == 'locality':
                    ven.city = i_.text
            a = xmlBody.find(
                './/a[@class="col m12 s4 tel waves-effect waves-light btn center btn-fix bleu"]'
            )
            if a != None:
                phone = a.get('href').replace('tel:', '').replace(' ', '')
                if phone.startswith('07') | phone.startswith('06'):
                    ven.mobile_number = self.validatePhone__(phone, 'FR')
                else:
                    ven.office_number = self.validatePhone__(phone, 'FR')
            logo = xmlBody.find('.//div[@class="center-align"]/img')
            if logo != None:
                ven.img_link = [self.__url__ + logo.get('src')]
            ven.scrape_page = link
            ven.pricelist_link = [link]
            listServices = xmlBody.xpath(
                '//li/div[@class="collapsible-body"]/div/a')
            sers = []
            for ser in listServices:
                servic = Service()
                servic.service = ser.text
                sers.append(servic)
                self.services.append(servic)
            ven.services = sers
            if ven.city != None and ven.zipcode != None:
                if ven.street != None and len(ven.street) > 0:
                    add_ = ven.street + ', ' + ven.city + ', ' + ven.zipcode
                else:
                    add_ = ven.city + ', ' + ven.zipcode
            else:
                add_ = None
            (ven.latitude, ven.longitude) = self.getLatlng(add_, 'FR')
            if ven.latitude == None and ven.longitude == None:
                Util.log.coordinate_logger.error(ven.scrape_page +
                                                 ' : Cannot get GEO code')
            self.link_venues.append(link)
            ven.country = 'fr'
            desc = xmlBody.find('.//p[@id="description"]')
            desc_ = ''
            if desc != None:
                desc_ = ''.join(desc.itertext()).strip().replace('\n',
                                                                 '|').replace(
                                                                     '\t', '')
            title = xmlBody.find('.//div[@class="container"]//h2')
            if title != None and desc != None:
                desc_ = title.text + ' | ' + desc_
            img_link_arr = []
            desc_ = self.replace__(desc_)
            desc_ = self.replaceSame(desc_, '||', '|').replace('|', ' | ')
            ven.description = desc_
            img_link = xmlBody.find('.//div[@class="realisations"]/img')
            if img_link != None:
                temp_img = ven.img_link = self.__url__ + img_link.get('src')
                img_link_arr.append(temp_img)
            multi_img = xmlBody.xpath(
                '//div[@class="3photo realisations"]/div/img')
            for it in multi_img:
                temp_ml = self.__url__ + it.get('src')
                img_link_arr.append(temp_ml)
            if len(img_link_arr) > 0:
                ven.img_link = img_link_arr
            nr_reviewer = xmlBody.xpath('//div[@class="avisoperation row"]')
            if len(nr_reviewer) > 0:
                ven.hqdb_nr_reviews = str(len(nr_reviewer))
            ven.is_get_by_address = True
            return ven
コード例 #3
0
    def __VenueParser(self, xmlE, index):
        #print 'Scrapping: '
        ven = Venue()
        ven.adid = xmlE.get('id')
        ven.category = 'architecural technologist'
        photos = xmlE.find(
            './div[@class="search_result_photo"]/div[@class="photo"]/a')
        ven.venue_images = self.__url__ + photos.find('./img').get('src')
        ven.scrape_page = self.__url__ + photos.get('href')
        #print str(index)+' >>'+ ven.scrape_page
        existing = [x for x in self.list_url if ven.scrape_page in x]
        if len(existing) > 0:
            print 'This venues exist in list'
            return
        self.list_url.append(ven.scrape_page)
        details_ = xmlE.find('.//div[@class="search_result_details"]')
        ven.name = details_.find('./div[@class="title"]/h3/a').text
        contacts_ = details_.find('./div[@class="contact"]').text
        ven.description = details_.find('./div[@class="desc"]').text
        contact__ = contacts_.split(',')
        if len(contact__) >= 2:
            ven.zipcode = contact__[len(contact__) - 1]
            if ven.zipcode != None:
                ven.zipcode = self.check_zip(ven.zipcode)
            ven.city = contact__[len(contact__) - 2]

        #scraping details ____
        #ven.scrape_page ='http://www.architecturalindex.com/consumers/architects/architect.asp?lngArchitectId=207922'
        xmlInfo = Util.getRequestsXML(
            ven.scrape_page, '//div[@class="architect_header"]/parent::div')
        if xmlInfo != None:
            addressInfo = xmlInfo.find(
                './/div[@class="architect_header"]/div[@class="architect_header_info"]'
            )
            h2 = addressInfo.find('./h2')
            if h2 != None:
                addressInfo.remove(h2)
            address__ = ' '.join(addressInfo.itertext())
            if ven.city == None:
                __address = address__.split(',')
                ven.city = __address[len(__address) - 3]
            if len(ven.city) < 2:
                __address = address__.split(',')
                ven.city = __address[len(__address) - 3]
            street = address__[0:address__.find(ven.city.strip()) - 1]
            if street.endswith(','):
                street = street[0:len(street) - 1]
                if street.upper().find('PO BOX') >= 0:
                    street = None
                ven.street = street

            #ven.office_number= '08708700053'
            img = []
            img_info = xmlInfo.find('.//div[@class="architect_portfolio"]')
            photos_ = img_info.xpath(
                './div[@class="architect_portfolio_photo"]//img')
            for photo in photos_:
                im_ = self.__url__ + photo.get('src')
                img.append(im_)
            ven.img_link = img
            sers = []
            des = xmlInfo.find('.//div[@class="architect_info_statement"]')
            des = ' '.join(des.itertext())
            ven.description = des
            services = xmlInfo.xpath('//div[@class="architect_info"]/ul')
            desP = xmlInfo.xpath('//div[@class="architect_info"]/p')
            affi = xmlInfo.xpath('//div[@class="architect_info"]/h3')
            isAffiliations = ''
            for aff in affi:
                if aff.text.strip() == 'Affiliations':
                    isAffiliations = desP[len(desP) - 1].text
                    ven.accreditations = isAffiliations

            if len(desP) >= 2:
                p1 = desP[0].text
                p2 = desP[1].text

                #ven.description= ven.description+' '+p1+' '+p2
                if p1 != None:
                    ven.description += ' ' + p1
                if p2 != None:
                    if p2 != 'None':
                        ven.description += ' ' + p2 + ': '

            if len(services) >= 3:
                services_ = services[1]
                listSer = services_.xpath('./li')

                listDes_2 = services[2].xpath('./li')
                des_2 = ''
                if len(listDes_2) > 0:
                    des_2 = '. Specialist Experience: '
                    for des2 in listDes_2:
                        des_2 += des2.text + ', '

                    des_2 = des_2.strip()
                    if des_2.endswith(','):
                        des_2 = des_2[0:-1]

                listDes = services[0].xpath('./li')
                if len(listDes) > 0:
                    desSectors = ''
                    for lides in listDes:
                        desSectors += lides.text + ', '
                    desSectors = desSectors.strip()
                    if desSectors.endswith(','):
                        desSectors = desSectors[0:-1]
                    ven.description = ven.description + ' ' + desSectors + '.' + des_2
                    ven.description = ven.description.replace(', ,',
                                                              ', ').replace(
                                                                  '..', '.')
                for ser in listSer:
                    se = ser.text
                    serv = Service()
                    serv.service = se
                    sers.append(serv)
            ven.services = sers
            ven.pricelist_link = [ven.scrape_page]
            ven.country = 'gb'
            '''if ven.street!=None:
                add_ = ven.street+', '+ven.city+', '+ ven.zipcode
            else:
                add_ = ven.city+', '+ ven.zipcode
            #(ven.latitude,ven.longitude) = self.getLatlng(add_, 'UK')'''
            indexc = self.addIndex()
            try:
                print 'Writing index: ' + str(indexc)
                ven.writeToFile(self.folder, indexc, ven.name.replace(':', ''),
                                False)
            #return ven
            except Exception, ex:
                print ex
                return
コード例 #4
0
    def __VenueParser(self, item, city):
        existing = [x for x in self.listLink if item in x]
        self.listLink.append(item)
        if len(existing) <= 0:
            try:
                xmlDoc = Util.getRequestsXML(item, '/html')
                ven = Venue()
                ven.scrape_page = item
                #ven.city = city
                ven.name = xmlDoc.xpath(
                    '//div[@class="row top-buffer"]/h3')[0].text
                (ven.latitude, ven.longitude) = self.getLatlng(xmlDoc)
                xmlcontent = xmlDoc.find('.//div[@class="tab-content"]')
                services_schedule_info = xmlcontent.xpath(
                    './div/div[@class="row top-buffer"]/h4/parent::div')[0]
                if services_schedule_info != None:
                    services_schedule_info = ''.join(
                        services_schedule_info.itertext()).split('\n')
                    for it in services_schedule_info:
                        if it.find('Style:') != -1:
                            it = it[0:it.find('Schedule')]
                            it = it.strip()
                            ser_name = it[it.find('Style:') + len('Style:'):it.
                                          find('Ability level')]
                            cost = len(it)
                            cost_ = ['Cost:', 'Concession cost:']
                            char_cost = ''
                            for c in cost_:
                                if it.find(c) != -1:
                                    cost = it.find(c)
                                    char_cost = c
                                    break
                            #cost = it.find('Cost:')
                            if cost == -1:
                                cost = len(it)

                            ser_des = it[it.find('Ability level:') +
                                         len('Ability level:'):cost]
                            ser_price = it[cost +
                                           len(char_cost):it.find('GBP') +
                                           len('GBP')]
                            ser = Service()
                            ser.service = ser_name
                            ser.description = ser_des
                            ser.price = ser_price.replace('-', '')
                            ven.services = [ser]
                        if it.find('a.m.') != -1 or it.find('p.m.') != -1:
                            ven.opening_hours_raw = it.strip().replace(
                                '.Monday', ' | Monday').replace(
                                    '.Tuesday', ' | Tuesday').replace(
                                        '.Wednesday', ' | Wednesday').replace(
                                            '.Thursday',
                                            ' | Thursday').replace(
                                                '.Friday',
                                                ' | Friday').replace(
                                                    '.Saturday',
                                                    ' | Saturday').replace(
                                                        '.Sunday', ' | Sunday')
                            ven.opening_hours_raw = self.formatOpenhour(
                                ven.opening_hours_raw)
                address = xmlcontent.find('.//address')
                if address != None:

                    #print ET.dump(address)
                    address = ''.join(address.itertext()).replace(
                        'United Kingdom', '').strip()
                    address = self.validateAddress(address)

                    #address ='Ward Park Arras Pavilion,Gransha Road,Bangor,Northern Ireland,BT20 4TN'
                    ven.country = 'gb'
                    if address.upper().find('Ireland'.upper()) != -1:
                        if address.upper().find(
                                'Northern Ireland'.upper()) != -1:
                            ven.country = 'ie'
                    if address.endswith(','):
                        address = address[0:-1]
                    ven.formatted_address = address
                posted = xmlcontent.find('./div/div[@class="row"]/p')
                imgs = xmlcontent.xpath('.//a/img')
                img_ = []
                for img in imgs:
                    img_.append(img.get('src'))

                ven.img_link = img_
                if posted != None:
                    ven.hqdb_ad_posted = posted.text.replace(
                        'Last updated', '')
                    split_posted = ven.hqdb_ad_posted.split(',')
                    if len(split_posted) >= 3:
                        ven.hqdb_ad_posted = ', '.join(
                            split_posted[0:len(split_posted) - 1])
                ven.category = self.category
                #ven.country ='gb'
                des_info = xmlcontent.xpath(
                    '//div[@class="row top-buffer"]/h3')[1]
                #print des_info.text
                des_info = des_info.getparent()
                des__ = des_info.xpath('./p')

                ven.pricelist_link = [ven.scrape_page]
                ven.hqdb_featured_ad_type = 'none'

                ven.description = ''
                for des in des__:
                    ven.description += ''.join(des.itertext()) + ' '
                    des_info.remove(des)
                info = '____'.join(des_info.itertext())
                a = des_info.find('./a')
                if a != None:
                    a = a.get('href')
                    if a.find('facebook.com') == -1:
                        ven.business_website = a
                    else:
                        if a.startswith('http:'):
                            a = a.replace('http:', 'https:')
                        ven.facebook = a

                info = info.split('__')

                for inf in range(0, len(info)):
                    if info[inf] == 'Qualifications:':
                        ven.accreditations = info[inf + 2]
                    if info[inf] == 'Phone:':

                        phone = info[inf + 2].strip()

                        pattren = '(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)'
                        '''isEmail  = re.search(pattren, phone, flags=0)
                        if isEmail!=None:
                            ven.business_email = isEmail.group(0)
                            continue'''
                        find_charSplit = self.findSplitPhone(phone)
                        if find_charSplit == None:

                            issMail = re.search(pattren, phone, flags=0)
                            if issMail != None:
                                ven.business_email = issMail.group(0)
                                continue
                            phone = phone.replace('Mobile:', '').replace(
                                'ext.225', '').replace('O7',
                                                       '07').replace(' ', '')
                            if phone.startswith('07') or phone.startswith(
                                    '447'
                            ) or phone.startswith('+447') or phone.startswith(
                                    '00447') or phone.startswith(
                                        '+44(0)7') or phone.startswith(
                                            '44(0)7') or phone.startswith(
                                                '004407'):
                                ven.mobile_number = self.validatePhone__(
                                    phone, ven.country)
                            else:
                                ven.office_number = self.validatePhone__(
                                    phone, ven.country)
                        else:
                            phone = phone.split(find_charSplit)
                            for p in phone:
                                issMail = re.search(pattren, p, flags=0)
                                if issMail != None:
                                    ven.business_email = issMail.group(0)
                                    continue
                                p = p.replace('Mobile', '').replace(
                                    'ext225',
                                    '').replace('O7', '07').replace(' ', '')
                                if p.startswith('07') or p.startswith(
                                        '447'
                                ) or p.startswith('+447') or p.startswith(
                                        '00447'
                                ) or p.startswith('+44(0)7') or p.startswith(
                                        '44(0)7') or p.startswith('004407'):
                                    if ven.mobile_number != None:
                                        ven.mobile_number2 = self.validatePhone__(
                                            p, ven.country)
                                    else:
                                        ven.mobile_number = self.validatePhone__(
                                            p, ven.country)
                                else:
                                    if ven.office_number != None:
                                        ven.office_number2 = self.validatePhone__(
                                            p, ven.country)
                                    else:
                                        ven.office_number = self.validatePhone__(
                                            p, ven.country)
                isPhoneOverSea = self.checkPhoneOverSea([
                    ven.office_number, ven.office_number2, ven.mobile_number,
                    ven.mobile_number2
                ])
                if isPhoneOverSea == False:
                    index = self.addIndex()
                    print str(
                        index) + ' Scapping: ' + city + '---' + ven.scrape_page
                    #ven.is_get_by_address =True
                    ven.writeToFile(self.folder, index, ven.name, False)
            except Exception, ex:
                print ex
                return
コード例 #5
0
    def __VenueParser(self, url__, name__):
        print 'Scraping: ' + url__
        existing = [x for x in self.venuesList if url__ in x]
        if len(existing) > 0:
            return None
        #url__ ='http://www.drivingschoolsfinder.co.uk/city-Accrington/1846198-driving-Terrys-School-of-Motoring.html'
        #name__ ='Terrys School of Motoring'
        city = url__.split('/')[3].replace('city-', '').replace('-', ' ')
        xmlDoc = Util.getRequestsXML(url__, '/html/body')
        if xmlDoc == None:
            return None
        else:
            ven = Venue()
            sers = []
            ven.name = name__
            ven.city = city
            ven.scrape_page = url__
            td = xmlDoc.xpath('//td[@class="welcome-padding"]')
            iter__ = ''.join(td[0].itertext())
            iter__ = iter__[iter__.find('Driving School:') +
                            len('Driving School:'):iter__.
                            find('[Edit Text]')].replace('\n', '|').replace(
                                '\t', '')
            iter__ = iter__.replace('|||', ' | ')
            rep = '|' + name__
            iter__ = iter__[0:iter__.find(rep)]
            rep = '  |  |'
            iter__ = iter__[0:iter__.find(rep)]
            ven.description = iter__
            div = td[0].xpath('./div')

            if len(div) < 5:
                return None
            else:
                # div info = position div gray-line[0]+1
                div_info = 0
                for div_ in div:
                    if div_.find('./script') != None:
                        div_info = 3
                info = div[div_info]
                info_ = ''.join(info.itertext())
                address = info_[0:info_.find('Phone')].replace(
                    name__, '').replace(city,
                                        ',' + city).replace(',,', ',').replace(
                                            ', ,', ',').split(',')
                #street = ', '.join(address[0:len(address)-2]).replace(','+city,'')
                street = ', '.join(address[0:len(address)])
                street = street[0:street.find(city) - 1]
                if street.endswith(','):
                    street = street[0:len(street) - 1]
                zipcode = address[len(address) - 1]
                street__ = street.upper()
                if street__.find('PO BOX') == -1:
                    ven.street = street.replace('n/a', '').replace(
                        '***', '').replace('6 weldon place croy', '').replace(
                            'cumbernauld41 napier square bellshill ml4 1tb',
                            '').replace('P.O. Box 1048', '')
                if ven.street == '-':
                    ven.street = None
                ven.zipcode = self.validateZipcode(zipcode)

                phone = info_[info_.find('Phone:') +
                              len('Phone:'):info_.find('Fax:')].replace(
                                  ' ', '')
                if phone.isdigit():
                    if phone.startswith('07') | phone.startswith('7'):
                        ven.mobile_number = self.validatePhone(phone)
                        ven.mobile_number = self.validatePhone__(
                            ven.mobile_number, 'gb')
                    else:
                        ven.office_number = self.validatePhone(phone)
                        ven.office_number = self.validatePhone__(
                            ven.office_number, 'gb')
                services_ = info_[info_.find('Services Offered:') +
                                  len('Services Offered:'):info_.
                                  find('Areas Served:')].strip().replace(
                                      ';', ',')
                if services_ != 'None Listed - [Edit]':
                    services_ = services_.replace('/',
                                                  ',').replace(',,',
                                                               ',').split(',')
                    for s in services_:
                        name = self.validateServices(s)
                        if len(name) >= 5:
                            name__ = name.split()
                            for n in name__:
                                name = self.validateNameServices(name)
                        if len(name.strip()) >= 5:
                            services = Service()
                            services.service = name
                            sers.append(services)

                    #ven.description = ven.description +' | ' +services_
                stringfind = 'No Website'
                if info_.find('No Website') == -1:
                    stringfind = 'Website'
                area_coverd = info_[info_.find('Areas Served:') +
                                    len('Areas Served:'):info_.
                                    find(stringfind)].strip().replace(
                                        ';', ',')
                #area_coverd = area_coverd[0:area_coverd.find(stringfind)]
                if area_coverd != 'None Listed - [Edit]':
                    ven.areas_covered = area_coverd

                ven.services = sers
                reviewer = len(xmlDoc.xpath('//td[@class="review-box"]'))
                if reviewer > 0:
                    ven.hqdb_nr_reviews = str(reviewer)
                scoreInfo = div[div_info + 1]
                #http://www.drivingschoolsfinder.co.uk/halfstar.gif +0.5
                #http://www.drivingschoolsfinder.co.uk/fullstar.gif +1
                #http://www.drivingschoolsfinder.co.uk/emptystar.gif +0
                tr = scoreInfo.xpath('./table/tr')
                tr = tr[1]
                img_core = tr.xpath('./td')[1]
                img_core = img_core.xpath('./table/tr/td/img')
                score__ = 0.0
                for score in img_core:
                    score_ = score.get('src')
                    if score_ == 'http://www.drivingschoolsfinder.co.uk/halfstar.gif':
                        score__ += 0.5
                    if score_ == 'http://www.drivingschoolsfinder.co.uk/fullstar.gif':
                        score__ += 1
                    if score_ == 'http://www.drivingschoolsfinder.co.uk/emptystar.gif':
                        score__ += 0
                if score__ > 0:
                    ven.hqdb_review_score = str(score__).replace('.0', '')
                ven.country = 'gb'
                emails_ = re.findall(r'[\w\.-]+@[\w\.-]+', info_)
                for email_ in emails_:
                    ven.business_email = email_
            #    website_ = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', info_)
            #    for web_  in website_:
            #       ven.business_website = web_
                if ven.business_email != None:
                    if ven.business_email.startswith('http'):
                        ven.business_email = None
                    ven.business_email = None
                if info_.find('No Website') == -1:
                    arrays__ = info_.split(' ')
                    for i in range(0, len(arrays__)):
                        if arrays__[i].find('Website') >= 0:
                            web_ = arrays__[i + 1].replace('\t', ' ').replace(
                                '\n', ' ').split()[0].replace('No', '')
                            ven.business_website = self.formatWeb_(web_)
                            print ven.business_website
                            break
                address_ = ''
                if ven.street == None:
                    address_ = ven.city + ', ' + ven.zipcode
                    #ven.formatted_address = ven.city+', '+ven.zipcode
                else:
                    if ven.zipcode != None:
                        address_ = ven.street + ', ' + ven.city + ', ' + ven.zipcode
                    else:
                        address_ = ven.street + ', ' + ven.city
                ven.pricelist_link = [ven.scrape_page]
                ''' get lat -lng '''
                if address_ != '':
                    try:
                        (ven.latitude,
                         ven.longitude) = self.getLatlng(address_, 'UK')
                    except Exception, ex:
                        Util.log.running_logger.error(ven.scrape_page + ' : ' +
                                                      ex)
                        return None
            ven.is_get_by_address = True
            return ven