Esempio n. 1
0
 def __init__(self):
     QThread.__init__(self)
     self.logger = LogManager(__name__)
     self.spider = Spider()
     self.regex = Regex()
     dupCsvReader = Csv()
     self.dupCsvRows = dupCsvReader.readCsvRow('cs_cat.csv')
     self.csvWriter = Csv('cs_cat.csv')
     dupFilterCsvReader = Csv()
     self.dupFilterCsvRows = dupFilterCsvReader.readCsvRow('filter_cat' + '.csv')
     self.csvW = Csv('filter_cat' + '.csv')
     self.mainUrl = 'http://www.cs-catering-equipment.co.uk/'
     self.totalCategory = 0
    def __init__(self):
        QtCore.QThread.__init__(self)
        self.isExiting = False
        self.logger = LogManager(__name__)
        self.spider = Spider()
        self.regex = Regex()
        dupCsvReader = Csv()
        self.dupCsvRows = dupCsvReader.readCsvRow("nisbets.csv", 0)
        self.csvWriter = Csv("nisbets.csv")
        self.mainUrl = "http://www.nisbets.co.uk"
        csvHeaderList = [
            "URL",
            "Product Code",
            "Product Technical Specifications",
            "Product Name",
            "Brand",
            "Product Price",
            "Product Short Description",
            "Product Long Description",
            "Image File Name",
            "User Manual File Name",
            "Exploded View File Name",
            "Spares Code",
            "Accessories",
            "Product Status" "Category1",
            "Category2",
            "Category3",
            "Category4",
        ]
        if "URL" not in self.dupCsvRows:
            self.csvWriter.writeCsvRow(csvHeaderList)
            self.dupCsvRows.append(csvHeaderList[0])

        self.utils = Utils()
 def __init__(self, parent=None):
     super(Form, self).__init__(parent)
     self.createGui()
     self.memberDic = {}
     self.excludedMember = None
     dupCsvReader = Csv()
     self.dupCsvRows = dupCsvReader.readCsvRow("linkedIn.csv", 0)
     self.csvWriter = Csv("linkedIn.csv")
     self.allMembers = []
Esempio n. 4
0
 def __init__(self, parent=None):
     super(Form, self).__init__(parent)
     self.createGui()
     self.memberDic = {}
     self.excludedMember = None
     dupCsvReader = Csv()
     self.dupCsvRows = dupCsvReader.readCsvRow('linkedIn.csv', 0)
     self.csvWriter = Csv('linkedIn.csv')
     self.allMembers = []
 def __init__(self):
     QThread.__init__(self)
     self.logger = LogManager(__name__)
     self.spider = Spider()
     self.regex = Regex()
     dupCsvReader = Csv()
     self.dupCsvRows0 = dupCsvReader.readCsvRow('cs_product.csv', 0)
     self.dupCsvRows = dupCsvReader.readCsvRow('cs_product.csv', 1)
     self.csvWriter = Csv('cs_product.csv')
     self.mainUrl = 'http://www.cs-catering-equipment.co.uk/'
     self.utils = Utils()
     if 'Product Code' not in self.dupCsvRows:
         self.csvWriter.writeCsvRow(
             ['URL', 'Product Code', 'Product Name', 'Manufacturer', 'List Price', 'Product Price', 'Discount',
              'Product Short Description', 'Product Long Description', 'Product Technical Specifications', 'Warranty'
                 ,
              'Delivery',
              'Product Image',
              'Category 1', 'Category 2', 'Category 3', 'Category 4', 'Brand Image'])
     self.totalProducts = len(self.dupCsvRows)
Esempio n. 6
0
    def scrapBertos(self, retry=0):
        #        self.downloadFile('http://s900.bertos.it/download.php?file=editorcms/documentazione/schede/scheda_13722600.pdf', 'a.pdf')
        #        self.scrapSubCategory('http://s900.bertos.it/en/', '', None, None)
        #        self.scrapProducts('http://s900.bertos.it/en/pasta_cookers/', '', '', None, None)
        #        return
        self.notifyProduct.emit(
            '<font color=green><b>Try to get all language links.</b></font>')
        self.logger.debug(self.mainUrl)
        data = self.spider.fetchData(self.mainUrl)
        if data and len(data) > 0:
            data = self.regex.reduceNewLine(data)
            data = self.regex.reduceBlankSpace(data)

            languages = self.regex.getAllSearchedData(
                '(?i)<div class="[^"]*"><a href="([^"]*)"\s*?class="boxalingua">([^<]*)</a>',
                data)
            if languages and len(languages) > 0:
                self.logger.debug('Total languages: %s' % str(len(languages)))
                self.notifyProduct.emit('<b>Total languages found[%s]</b>' %
                                        str(len(languages)))
                for language in languages:
                    self.totalProducts = 0
                    url = language[0]
                    #                    if str(language[1]).lower() != 'en':
                    #                        continue
                    urlChunk = self.spider.fetchData(url)
                    if urlChunk and len(urlChunk) > 0:
                        urlChunk = self.regex.reduceNewLine(urlChunk)
                        urlChunk = self.regex.reduceBlankSpace(urlChunk)
                        url = self.regex.getSearchedData(
                            '(?i)<a href="([^"]*)" onmouseover="vedi_po_cat\(2\)\s*?"',
                            urlChunk)
                        csvFile = str(
                            language[1].strip()).lower() + '_' + 'bertos.csv'
                        dupCsvReader = Csv()
                        dupCsvRows = dupCsvReader.readCsvRow(csvFile)
                        csvWriter = Csv(csvFile)
                        if self.csvHeader not in dupCsvRows:
                            dupCsvRows.append(self.csvHeader)
                            csvWriter.writeCsvRow(self.csvHeader)
                        self.notifyProduct.emit(
                            '<font color=green><b>Try to get data for language [%s].</b></font>'
                            % language[1])
                        self.scrapCategory(url, dupCsvRows, csvWriter)
                        self.notifyProduct.emit(
                            '<font color=red><b>=====  Finish scraping data for [%s] =====</b></font><br /><br />'
                            % language[1])
        else:
            if retry < 5:
                return self.scrapBertos(retry + 1)
 def __init__(self):
     self.logger = LogManager(__name__)
     self.spider = Spider()
     self.regex = Regex()
     self.utils = Utils()
     self.main_url = 'http://www.walgreens.com'
     self.url = 'http://www.walgreens.com/store/catalog/shopLanding'
     self.sitemap_xml = 'http://www.walgreens.com/sitemap.xml'
     dupCsvReader = Csv()
     self.dupCsvRows = dupCsvReader.readCsvRow('walgreens.csv')
     self.csvWriter = Csv('walgreens.csv')
     csvDataHeader = ['Product Name', 'Price', 'Description', 'Shipping', 'Ingredients', 'Image']
     if csvDataHeader not in self.dupCsvRows:
         self.csvWriter.writeCsvRow(csvDataHeader)
 def __init__(self):
     QtCore.QThread.__init__(self)
     self.isExiting = False
     self.logger = LogManager(__name__)
     self.spider = Spider()
     self.regex = Regex()
     dupCsvReader = Csv()
     self.dupCsvRows = dupCsvReader.readCsvRow('nisbetCat.csv')
     self.csvWriter = Csv('nisbetCat.csv')
     self.mainUrl = 'http://www.nisbets.co.uk'
     csvHeaderList = ['Parent Category', 'Category Name', 'Category Description']
     if csvHeaderList not in self.dupCsvRows:
         self.csvWriter.writeCsvRow(csvHeaderList)
         self.dupCsvRows.append(csvHeaderList)
Esempio n. 9
0
 def __init__(self):
     QThread.__init__(self)
     self.logger = LogManager(__name__)
     self.spider = Spider()
     self.regex = Regex()
     self.utils = Utils()
     self.mainUrl = 'http://www.paodeacucar.com.br/'
     self.url = 'http://www.paodeacucar.com.br/'
     dupCsvReader = Csv()
     self.dupCsvRows = dupCsvReader.readCsvRow('paodeacucar.csv', 4)
     self.csvWriter = Csv('paodeacucar.csv')
     csvDataHeader = ['SKU', 'Category', 'Subcategory', 'Name', 'URL', 'URL Image', 'Details',
                      'Nutrients Table html code', 'Price from, 28/abr/14', '28/abr/14']
     if 'URL' not in self.dupCsvRows:
         self.dupCsvRows.append(csvDataHeader)
         self.csvWriter.writeCsvRow(csvDataHeader)
 def __init__(self):
     self.browser = None
     self.url = "http://environmentclearance.nic.in/Search.aspx"
     self.statuses = []
     self.categories = []
     self.years = []
     self.states = []
     self.csvDataHeader = ['Status', 'Category', 'Year', 'State', 'Serial No', 'Proposal details', 'Location',
                           'Important Date', 'Category', 'Company Proponent']
     self.logger = LogManager(__name__)
     self.regex = Regex()
     dupCsvReader = Csv()
     self.dupCsvRows = dupCsvReader.readCsvRow('env_clearance.csv')
     self.csvWriter = Csv('env_clearance.csv')
     if self.csvDataHeader not in self.dupCsvRows:
         self.csvWriter.writeCsvRow(self.csvDataHeader)
         self.dupCsvRows.append(self.csvDataHeader)
    def scrapBertos(self, retry=0):
    #        self.downloadFile('http://s900.bertos.it/download.php?file=editorcms/documentazione/schede/scheda_13722600.pdf', 'a.pdf')
    #        self.scrapSubCategory('http://s900.bertos.it/en/', '', None, None)
    #        self.scrapProducts('http://s900.bertos.it/en/pasta_cookers/', '', '', None, None)
    #        return
        self.notifyProduct.emit('<font color=green><b>Try to get all language links.</b></font>')
        self.logger.debug(self.mainUrl)
        data = self.spider.fetchData(self.mainUrl)
        if data and len(data) > 0:
            data = self.regex.reduceNewLine(data)
            data = self.regex.reduceBlankSpace(data)

            languages = self.regex.getAllSearchedData(
                '(?i)<div class="[^"]*"><a href="([^"]*)"\s*?class="boxalingua">([^<]*)</a>', data)
            if languages and len(languages) > 0:
                self.logger.debug('Total languages: %s' % str(len(languages)))
                self.notifyProduct.emit('<b>Total languages found[%s]</b>' % str(len(languages)))
                for language in languages:
                    self.totalProducts = 0
                    url = language[0]
                    #                    if str(language[1]).lower() != 'en':
                    #                        continue
                    urlChunk = self.spider.fetchData(url)
                    if urlChunk and len(urlChunk) > 0:
                        urlChunk = self.regex.reduceNewLine(urlChunk)
                        urlChunk = self.regex.reduceBlankSpace(urlChunk)
                        url = self.regex.getSearchedData('(?i)<a href="([^"]*)" onmouseover="vedi_po_cat\(2\)\s*?"',
                            urlChunk)
                        csvFile = str(language[1].strip()).lower() + '_' + 'bertos.csv'
                        dupCsvReader = Csv()
                        dupCsvRows = dupCsvReader.readCsvRow(csvFile)
                        csvWriter = Csv(csvFile)
                        if self.csvHeader not in dupCsvRows:
                            dupCsvRows.append(self.csvHeader)
                            csvWriter.writeCsvRow(self.csvHeader)
                        self.notifyProduct.emit(
                            '<font color=green><b>Try to get data for language [%s].</b></font>' % language[1])
                        self.scrapCategory(url, dupCsvRows, csvWriter)
                        self.notifyProduct.emit(
                            '<font color=red><b>=====  Finish scraping data for [%s] =====</b></font><br /><br />' %
                            language[1])
        else:
            if retry < 5:
                return self.scrapBertos(retry + 1)
Esempio n. 12
0
 def __init__(self):
     self.browser = None
     self.url = "http://environmentclearance.nic.in/Search.aspx"
     self.statuses = []
     self.categories = []
     self.years = []
     self.states = []
     self.csvDataHeader = [
         'Status', 'Category', 'Year', 'State', 'Serial No',
         'Proposal details', 'Location', 'Important Date', 'Category',
         'Company Proponent'
     ]
     self.logger = LogManager(__name__)
     self.regex = Regex()
     dupCsvReader = Csv()
     self.dupCsvRows = dupCsvReader.readCsvRow('env_clearance.csv')
     self.csvWriter = Csv('env_clearance.csv')
     if self.csvDataHeader not in self.dupCsvRows:
         self.csvWriter.writeCsvRow(self.csvDataHeader)
         self.dupCsvRows.append(self.csvDataHeader)
Esempio n. 13
0
 def initScrapper(self):
     try:
         dupCsvReader = Csv()
         dupCsvRows = dupCsvReader.readCsvRow('omvic.csv')
         self.dbHelper = DbHelper('omvic.db')
         self.dbHelper.createTable('omvic')
         self.totaldata = self.dbHelper.getTotalProduct('omvic')
         self.csvWriter = Csv('omvic.csv')
         csvDataHeader = ['URL', 'Legal Name', 'Business Name', 'Status', 'Class of Registration', 'Subclass',
                          'Operating Status',
                          'Business Address', 'Email', 'Phone Number', 'Salesperson(s) Names']
         if len(dupCsvRows) == 0:
             self.csvWriter.writeCsvRow(csvDataHeader)
         del dupCsvReader
         del dupCsvRows
         gc.collect()
         del gc.garbage[:]
         gc.collect()
     except Exception, x:
         print x
 def __init__(self, urlList, category):
     QThread.__init__(self)
     self.logger = LogManager(__name__)
     self.spider = Spider()
     self.regex = Regex()
     self.utils = Utils()
     self.urlList = urlList
     self.category = category
     dupCsvReader = Csv()
     self.dupCsvRows = dupCsvReader.readCsvRow(category + '.csv')
     self.csvWriter = Csv(category + '.csv')
     csvDataHeader = ['SKU', 'Title', 'Sub Title', 'Price', 'Shipping Weight', 'Image URL']
     if csvDataHeader not in self.dupCsvRows:
         self.dupCsvRows.append(csvDataHeader)
         self.csvWriter.writeCsvRow(csvDataHeader)
     self.mainUrl = 'http://www.amazon.com'
     self.scrapUrl = None
     self.dbHelper = DbHelper('amazon.db')
     self.dbHelper.createTable(category)
     self.total = self.dbHelper.getTotalProduct(category)
Esempio n. 15
0
 def __init__(self):
     QThread.__init__(self)
     self.logger = LogManager(__name__)
     self.spider = Spider()
     self.regex = Regex()
     self.utils = Utils()
     dupCsvReader = Csv()
     self.dupCsvRows = dupCsvReader.readCsvRow("cs_Brands.csv")
     self.csvWriter = Csv("cs_Brands.csv")
     self.mainUrl = "http://www.cs-catering-equipment.co.uk/brands"
     self.isExiting = False
     headerData = [
         "URL",
         "Parent Category",
         "Brand Category",
         "Brand Description",
         "Image File",
         "Product Codes in this category",
     ]
     if headerData not in self.dupCsvRows:
         self.csvWriter.writeCsvRow(headerData)
    def __init__(self):
        QtCore.QThread.__init__(self)
        self.isExiting = False
        self.totalProducts = 0
        self.logger = LogManager(__name__)
        self.spider = Spider()
        self.regex = Regex()
        dupCsvReader = Csv()
        self.dupCsvRows = dupCsvReader.readCsvRow('nisbets.csv', 0)
        self.csvWriter = Csv('nisbets.csv')
        self.mainUrl = 'http://www.nisbets.co.uk'
        csvHeaderList = ['URL', 'Product Code', 'Product Technical Specifications', 'Product Name', 'Brand',
                         'Product Price', 'Product Short Description',
                         'Product Long Description', 'Image File Name', 'User Manual File Name',
                         'Exploded View File Name', 'Spares Code', 'Accessories', 'Product Status' 'Category1',
                         'Category2', 'Category3',
                         'Category4']
        if 'URL' not in self.dupCsvRows:
            self.csvWriter.writeCsvRow(csvHeaderList)
            self.dupCsvRows.append(csvHeaderList[0])

        self.utils = Utils()
Esempio n. 17
0
    def __init__(self):
        QtCore.QThread.__init__(self)
        self.isExiting = False
        self.logger = LogManager(__name__)
        self.spider = Spider()
        self.regex = Regex()
        dupCsvReader = Csv()
        self.dupCsvRows = dupCsvReader.readCsvRow('nisbets.csv', 0)
        self.csvWriter = Csv('nisbets.csv')
        self.mainUrl = 'http://www.nisbets.co.uk'
        csvHeaderList = [
            'URL', 'Product Code', 'Product Technical Specifications',
            'Product Name', 'Brand', 'Product Price',
            'Product Short Description', 'Product Long Description',
            'Image File Name', 'User Manual File Name',
            'Exploded View File Name', 'Spares Code', 'Accessories',
            'Product Status'
            'Category1', 'Category2', 'Category3', 'Category4'
        ]
        if 'URL' not in self.dupCsvRows:
            self.csvWriter.writeCsvRow(csvHeaderList)
            self.dupCsvRows.append(csvHeaderList[0])

        self.utils = Utils()