def _search(self, movie, quality, results):

		  # Cookie login
        #if not self.last_login_check and not self.login():
        #    pass
        #    return

        TitleStringReal = (getTitle(movie['info']) + ' ' + simplifyString(quality['identifier'] )).replace('-',' ').replace(' ',' ').replace(' ',' ').replace(' ',' ').encode("utf8")
        
        URL = (self.urls['search']).encode('UTF8')
        URL=unicodedata.normalize('NFD',unicode(URL,"utf8","replace"))
        URL=URL.encode('ascii','ignore')
        URL = urllib2.quote(URL.encode('utf8'), ":/?=")
        
        values = {
          'champ_recherche' : TitleStringReal
        }

        data_tmp = urllib.urlencode(values)
        req = urllib2.Request(URL, data_tmp, headers={'User-Agent' : "Mozilla/5.0"} )
        searcher = Searcher()
        data = urllib2.urlopen(req )
        id = 1000

        if data:
            try:
                html = BeautifulSoup(data)
                lin=0
                erlin=0
                resultdiv=[]
                while erlin==0:
                    try:
                        classlin='ligne'+str(lin)
                        resultlin=html.findAll(attrs = {'class' : [classlin]})
                        if resultlin:
                            for ele in resultlin:
                                resultdiv.append(ele)
                            lin+=1
                        else:
                            erlin=1
                    except:
                        erlin=1
                for result in resultdiv:
                    try:
                        new = {}
                        name = result.findAll(attrs = {'class' : ["titre"]})[0].text
                        testname=searcher.correctName(name,movie['title'])
                        if testname==0:
                            continue
                        detail_url = result.find("a")['href']
                        tmp = detail_url.split('/')[-1].replace('.html','.torrent')
                        url_download = (self.urls['download'] % tmp)
                        size = result.findAll(attrs = {'class' : ["poid"]})[0].text
                        seeder = result.findAll(attrs = {'class' : ["seed_ok"]})[0].text
                        leecher = result.findAll(attrs = {'class' : ["down"]})[0].text
                        age = '1'

                        verify = getTitle(movie['info']).split(' ')
                        add = 1
                        
                        for verify_unit in verify:
                            if (name.lower().find(verify_unit.lower()) == -1) :
                                add = 0

                        def extra_check(item):
                            return True

                        if add == 1:

                            new['id'] = id
                            new['name'] = name.strip()
                            new['url'] = url_download
                            new['detail_url'] = detail_url
                           
                            new['size'] = self.parseSize(str(size))
                            new['age'] = self.ageToDays(age)
                            new['seeders'] = tryInt(seeder)
                            new['leechers'] = tryInt(leecher)
                            new['extra_check'] = extra_check
                            new['download'] = self.loginDownload             
    
                            #new['score'] = fireEvent('score.calculate', new, movie, single = True)
                            #log.error('score')
                            #log.error(new['score'])
    
                            results.append(new)
                            id = id+1
                        
                    except:
                        log.error('Failed parsing cPASbien: %s', traceback.format_exc())

            except AttributeError:
                log.debug('No search results found.')
        else:
            log.debug('No search results found.')
    def _search(self, movie, quality, results):
        # Cookie login
        if not self.last_login_check and not self.login():
            return
        searchStrings= self.getSearchParams(movie,quality)
        lastsearch=0
        searcher = Searcher()

        for searchString in searchStrings:
            actualtime=int(time.time())
            if actualtime-lastsearch<10:
                timetosleep= 10-(actualtime-lastsearch)
                time.sleep(timetosleep)
            URL = self.urls['search']+searchString
                
            r = self.opener.open(URL)   
            soup = BeautifulSoup( r, "html.parser" )
            if soup.find('table', attrs = {'class':'results'}):
                resultdiv = soup.find('table', attrs = {'class':'results'}).find('tbody')
            else:
                continue
            if resultdiv:
                try:   
                    for result in resultdiv.findAll('tr'):
                        try:
                            categorie = result.findAll('td')[0].findAll('a')[0]['href'][result.findAll('td')[0].findAll('a')[0]['href'].find('='):]
                            insert = 0
                        
                            if categorie == '=631':
                                insert = 1
                            if categorie == '=455':
                                insert = 1
                            if categorie == '=634':
                                insert = 1
                         
                            if insert == 1 :
                         
                                new = {}
        
                                idt = result.findAll('td')[2].findAll('a')[0]['href'][1:].replace('torrents/nfo/?id=','')
                                name = result.findAll('td')[1].findAll('a')[0]['title']
                                testname=searcher.correctName(name,movie['title'])
                                if not testname:
                                    continue
                                url = (self.urls['download'] % idt)
                                detail_url = (self.urls['detail'] % idt)
                                leecher = result.findAll('td')[8].text
                                size = result.findAll('td')[5].text
                                age = result.findAll('td')[4].text
                                seeder = result.findAll('td')[7].text
        
                                def extra_check(item):
                                    return True
        
                                new['id'] = idt
                                new['name'] = name + ' french'
                                new['url'] = url
                                new['detail_url'] = detail_url
                                new['size'] = self.parseSize(str(size))
                                new['age'] = self.ageToDays(str(age))
                                new['seeders'] = tryInt(seeder)
                                new['leechers'] = tryInt(leecher)
                                new['extra_check'] = extra_check
                                new['download'] = self.download

                                log.debug("url='%s'"%str(url))
                                results.append(new)
    
                        except:
                            log.error('Failed parsing T411: %s', traceback.format_exc())
    
                except AttributeError:
                    log.debug('No search results found.')
            else:
                log.debug('No search results found.')
    def _search(self, movie, quality, results):
        # Cookie login
        if not self.last_login_check and not self.login():
            return
        searchStrings = self.getSearchParams(movie, quality)
        lastsearch = 0
        searcher = Searcher()

        for searchString in searchStrings:
            actualtime = int(time.time())
            if actualtime - lastsearch < 10:
                timetosleep = 10 - (actualtime - lastsearch)
                time.sleep(timetosleep)
            URL = self.urls['search'] + searchString

            r = self.opener.open(URL)
            soup = BeautifulSoup(r, "html.parser")
            if soup.find('table', attrs={'class': 'results'}):
                resultdiv = soup.find('table', attrs={
                    'class': 'results'
                }).find('tbody')
            else:
                continue
            if resultdiv:
                try:
                    for result in resultdiv.findAll('tr'):
                        try:
                            categorie = result.findAll('td')[0].findAll('a')[
                                0]['href'][result.findAll('td')[0].
                                           findAll('a')[0]['href'].find('='):]
                            insert = 0

                            if categorie == '=631':
                                insert = 1
                            if categorie == '=455':
                                insert = 1
                            if categorie == '=634':
                                insert = 1

                            if insert == 1:

                                new = {}

                                idt = result.findAll('td')[2].findAll(
                                    'a')[0]['href'][1:].replace(
                                        'torrents/nfo/?id=', '')
                                name = result.findAll('td')[1].findAll(
                                    'a')[0]['title']
                                testname = searcher.correctName(
                                    name, movie['title'])
                                if not testname:
                                    continue
                                url = (self.urls['download'] % idt)
                                detail_url = (self.urls['detail'] % idt)
                                leecher = result.findAll('td')[8].text
                                size = result.findAll('td')[5].text
                                age = result.findAll('td')[4].text
                                seeder = result.findAll('td')[7].text

                                def extra_check(item):
                                    return True

                                new['id'] = idt
                                new['name'] = name + ' french'
                                new['url'] = url
                                new['detail_url'] = detail_url
                                new['size'] = self.parseSize(str(size))
                                new['age'] = self.ageToDays(str(age))
                                new['seeders'] = tryInt(seeder)
                                new['leechers'] = tryInt(leecher)
                                new['extra_check'] = extra_check
                                new['download'] = self.download

                                log.debug("url='%s'" % str(url))
                                results.append(new)

                        except:
                            log.error('Failed parsing T411: %s',
                                      traceback.format_exc())

                except AttributeError:
                    log.debug('No search results found.')
            else:
                log.debug('No search results found.')
        if not results:
            media_title = fireEvent('library.query',
                                    movie,
                                    include_year=False,
                                    single=True)

            for title in possibleTitles(media_title):
                self._searchOnTitle(title, movie, quality, results)
    def _search(self, movie, quality, results):

        # Cookie login
        #if not self.last_login_check and not self.login():
        #    pass
        #    return

        TitleStringReal = (getTitle(movie['info']) + ' ' +
                           simplifyString(quality['identifier'])).replace(
                               '-', ' ').replace(' ', ' ').replace(
                                   ' ', ' ').replace(' ', ' ').encode("utf8")

        URL = (self.urls['search']).encode('UTF8')
        URL = unicodedata.normalize('NFD', unicode(URL, "utf8", "replace"))
        URL = URL.encode('ascii', 'ignore')
        URL = urllib2.quote(URL.encode('utf8'), ":/?=")

        values = {'champ_recherche': TitleStringReal}

        data_tmp = urllib.urlencode(values)
        req = urllib2.Request(URL,
                              data_tmp,
                              headers={'User-Agent': "Mozilla/5.0"})
        searcher = Searcher()
        data = urllib2.urlopen(req)
        id = 1000

        if data:
            try:
                html = BeautifulSoup(data)
                lin = 0
                erlin = 0
                resultdiv = []
                while erlin == 0:
                    try:
                        classlin = 'ligne' + str(lin)
                        resultlin = html.findAll(attrs={'class': [classlin]})
                        if resultlin:
                            for ele in resultlin:
                                resultdiv.append(ele)
                            lin += 1
                        else:
                            erlin = 1
                    except:
                        erlin = 1
                for result in resultdiv:
                    try:
                        new = {}
                        name = result.findAll(
                            attrs={'class': ["titre"]})[0].text
                        testname = searcher.correctName(name, movie['title'])
                        if testname == 0:
                            continue
                        detail_url = result.find("a")['href']
                        tmp = detail_url.split('/')[-1].replace(
                            '.html', '.torrent')
                        url_download = (self.urls['download'] % tmp)
                        size = result.findAll(
                            attrs={'class': ["poid"]})[0].text
                        seeder = result.findAll(
                            attrs={'class': ["seed_ok"]})[0].text
                        leecher = result.findAll(
                            attrs={'class': ["down"]})[0].text
                        age = '1'

                        verify = getTitle(movie['info']).split(' ')
                        add = 1

                        for verify_unit in verify:
                            if (name.lower().find(verify_unit.lower()) == -1):
                                add = 0

                        def extra_check(item):
                            return True

                        if add == 1:

                            new['id'] = id
                            new['name'] = name.strip()
                            new['url'] = url_download
                            new['detail_url'] = detail_url

                            new['size'] = self.parseSize(str(size))
                            new['age'] = self.ageToDays(age)
                            new['seeders'] = tryInt(seeder)
                            new['leechers'] = tryInt(leecher)
                            new['extra_check'] = extra_check
                            new['download'] = self.loginDownload

                            #new['score'] = fireEvent('score.calculate', new, movie, single = True)
                            #log.error('score')
                            #log.error(new['score'])

                            results.append(new)
                            id = id + 1

                    except:
                        log.error('Failed parsing cPASbien: %s',
                                  traceback.format_exc())

            except AttributeError:
                log.debug('No search results found.')
        else:
            log.debug('No search results found.')
    def _searchOnTitle(self, title, movie, quality, results):

        searcher = Searcher()
        index = self.getJsonData(self.urls['index'])

        if isinstance(title, str):
            title = title.decode('utf8')

        # movieYear = année du film référencée par CP
        movieYear = str(movie['info']['year'])
        # frTitle = titre version française récupéré sur TMDB
        frTitle = self.getFrenchTitle(title, movieYear)
        if frTitle is None:
            frTitle = title

        log.debug('#### CP is using this movie title : ' + title)
        log.debug('#### Searching BlueTigers for the CP title : ' + frTitle)
        request = urllib2.quote(title.encode('utf8'))
        if (self.conf('ignoreyear')):
            searchUrl = self.urls['search'] % request
        else:
            searchUrl = (self.urls['search'] + '&year=%s') % (request, movieYear)
        data = self.urlopen(searchUrl)
        if data:
            html = BeautifulSoup(data)
            lin=1
            erlin=0
            resultdiv=[]
            while erlin==0:
                try:
                    classlin='ttable_col'+str(lin)
                    resultlin=html.findAll(attrs = {'class' : [classlin]})
                    if resultlin:
                        for ele in resultlin:
                            resultdiv.append(ele)
                        lin+=1
                    else:
                        erlin=1
                except:
                    erlin=1
            for result in resultdiv:
                new = {}
                testname=0
                resultb=result.find_all('b')
                alltext=result.find_all(text=True)
                for resulta in resultb:
                    name_real=str(resulta).replace("<b>","").replace("</b>","")
                    name=str(resulta).replace("<b>","").replace("</b>","").replace("."," ")
                    testname=searcher.correctName(name,title)
                    if testname:
                       break
                if not testname:
                   continue

                idx = result.find_all('a')[1]['href'].replace('torrents-details.php?id=','').replace('&hit=1','')
                detail_url = self.urls['detail'] % (idx)
                url_download = self.urls['download'] % (idx,name_real)
                size = None
                for index,text in enumerate(alltext):
                    if 'Taille' in text:
                        size=alltext[index+1].replace('MB','').replace('GB','').replace(':','')
                        if 'GB' in alltext[index+1]:
                            size = float(size) * 1024
                        break
                age = '1'
                new['id'] = idx
                new['name'] = name.strip()
                new['url'] = url_download
                new['detail_url'] = detail_url
                new['size'] = size
                #new['age'] = self.ageToDays(str(age))
                #new['seeders'] = tryInt(seeder)
                #new['leechers'] = tryInt(leecher)
                #new['extra_check'] = extra_check
                new['download'] = self.loginDownload
                results.append(new)
        else:
            log.debug('No search results found.')
        if not results:
            log.debug('#### Searching BlueTigers for the FR title : ' + frTitle)
            if (self.conf('ignoreyear')):
                searchUrl2 = self.urls['search'] % request
            else:
                searchUrl2 = (self.urls['search'] + '&year=%s') % (request, movieYear)
            data = self.urlopen(searchUrl2)
            if data:
                try:
                    html = BeautifulSoup(data)
                    lin=1
                    erlin=0
                    resultdiv=[]
                    while erlin==0:
                        try:
                            classlin='ttable_col'+str(lin)
                            resultlin=html.findAll(attrs = {'class' : [classlin]})
                            if resultlin:
                                for ele in resultlin:
                                    resultdiv.append(ele)
                                lin+=1
                            else:
                                erlin=1
                        except:
                            erlin=1
                    for result in resultdiv:
                        try:
                            new = {}
                            testname=0
                            resultb=result.find_all('b')
                            alltext=result.find_all(text=True)
                            for resulta in resultb:
                                name_real=str(resulta).replace("<b>","").replace("</b>","")
                                name=str(resulta).replace("<b>","").replace("</b>","").replace("."," ")
                                testname=searcher.correctName(name,frTitle)
                                if testname:
                                   break
                            if not testname:
                               continue

                            idx = result.find_all('a')[1]['href'].replace('torrents-details.php?id=','').replace('&hit=1','')
                            detail_url = self.urls['detail'] % (idx)
                            url_download = self.urls['download'] % (idx,name_real)
                            size = None
                            for index,text in enumerate(alltext):
                                if 'Taille' in text:
                                    size=alltext[index+1].replace('MB','').replace('GB','').replace(':','')
                                    if 'GB' in alltext[index+1]:
                                        size = float(size) * 1024
                                    break
                            age = '1'
                            new['id'] = idx
                            new['name'] = name.strip()
                            new['url'] = url_download
                            new['detail_url'] = detail_url
                            new['size'] = size
                            #new['age'] = self.ageToDays(str(age))
                            #new['seeders'] = tryInt(seeder)
                            #new['leechers'] = tryInt(leecher)
                            #new['extra_check'] = extra_check
                            new['download'] = self.loginDownload
                            results.append(new)

                        except:
                            log.error('Failed parsing BlueTigers: %s', traceback.format_exc())

                except AttributeError:
                    log.debug('No search results found.')
            else:
                log.debug('No search results found.')
        else:
            log.debug('some results are found')