def query(searchurl, langs, file_original_path, filename_string):
    sublinks = []
    socket.setdefaulttimeout(3)
    request = urllib2.Request(searchurl)
    request.add_header("Pragma", "no-cache")
    page = urllib2.build_opener().open(request)
    content = page.read()
    content = content.replace("The safer, easier way", 'The safer, easier way " />')
    soup = BeautifulSoup(content)

    file_name = str(os.path.basename(file_original_path)).split("-")[-1].lower()

    for langs_html in soup("td", {"class": "language"}):

        try:
            subs = langs_html.findPrevious("td", {"class": "NewsTitle", "colspan": "3"})
            fullLanguage = str(langs_html).split('class="language">')[1].split("<a")[0].replace("\n", "")
            subteams = self_release_pattern.match(str(subs.contents[1])).groups()[0]

            if (str(subteams.replace("WEB-DL-", "").lower()).find(str(file_name))) > -1:
                hashed = True
            else:
                hashed = False

            try:
                lang = get_language_info(fullLanguage)
            except:
                lang = ""

            statusTD = langs_html.findNext("td")
            status = statusTD.find("b").string.strip()

            linkTD = statusTD.findNext("td")
            link = "%s%s" % (self_host, linkTD.find("a")["href"])

            if subs.findNext("td", {"class": "newsDate", "colspan": "2"}).findAll("img", {"title": "Hearing Impaired"}):
                HI = True
            else:
                HI = False

            if status == "Completed" and (lang["3let"] in langs):
                sublinks.append(
                    {
                        "rating": "0",
                        "filename": "%s-%s" % (filename_string, subteams),
                        "sync": hashed,
                        "link": link,
                        "lang": lang,
                        "hearing_imp": HI,
                    }
                )
        except:
            log(__name__, "ERROR IN BS")
            pass

    sublinks.sort(key=lambda x: [not x["sync"]])
    log(__name__, "sub='%s'" % (sublinks))

    for s in sublinks:
        append_subtitle(s)
def query(search_url, languages, file_original_path=None):
    sub_links = []
    socket.setdefaulttimeout(20)
    request = urllib2.Request(search_url, headers=req_headers)
    request.add_header('Pragma', 'no-cache')
    page = urllib2.build_opener().open(request)
    content = page.read()
    content = content.replace("The safer, easier way", "The safer, easier way \" />")
    soup = BeautifulSoup(content)

    if file_original_path is not None:
        file_original_path_clean = normalize_string(file_original_path.encode('utf-8'))
        file_name = str(os.path.basename(file_original_path_clean)).split("-")[-1].lower()
    else:
        file_name = None

    for language_html in soup("td", {"class": "language"}):
        box = language_html.findPrevious("td", {"class": "NewsTitle", "colspan": "3"})
        full_language = str(language_html).split('class="language">')[1].split('<a')[0].replace("\n", "")
        sub_teams = self_release_pattern.match(str(box.contents[1])).groups()[0]

        if file_name is not None and (str(sub_teams.replace("WEB-DL-", "").lower()).find(str(file_name))) > -1:
            hashed = True
        else:
            hashed = False

        sub_language = get_language_info(full_language)
        if sub_language is None:
            sub_language = {}

        status_td = language_html.findNext("td")
        status = status_td.find("b").string.strip()

        link_td = status_td.findNext("td")
        link = "%s%s" % (self_host, link_td.find("a")["href"])

        if box.findNext("td", {"class": "newsDate", "colspan": "2"}).findAll('img', {'title': 'Hearing Impaired'}):
            hearing_imp = True
        else:
            hearing_imp = False

        if status == "Completed" and (sub_language['3let'] in languages):
            title = soup.find('span', {'class': 'titulo'}).contents[0].strip(' \t\n\r')
            sub_links.append(
                {'rating': '0',
                 'filename': "%s - %s" % (title, sub_teams),
                 'sync': hashed,
                 'link': link,
                 'lang': sub_language,
                 'hearing_imp': hearing_imp})

    sub_links.sort(key=lambda x: [not x['sync']])
    log(__name__, "sub='%s'" % sub_links)

    for sub_link in sub_links:
        append_subtitle(sub_link)
def query_TvShow(name, season, episode, langs, file_original_path):
    sublinks = []
    
    name = name.lower().replace(" ", "_").replace("$#*!","shit").replace("'","") # need this for $#*! My Dad Says and That 70s show
    searchurl = "%s/serie/%s/%s/%s/addic7ed" %(self_host, name, season, episode)
    
    socket.setdefaulttimeout(3)
    request = urllib2.Request(searchurl)
    request.add_header('Pragma', 'no-cache')
    page = urllib2.build_opener().open(request)
    content = page.read()
    content = content.replace("The safer, easier way", "The safer, easier way \" />")
    soup = BeautifulSoup(content)
    
    file_name = str(os.path.basename(file_original_path)).split("-")[-1].lower()

    for subs in soup("td", {"class":"NewsTitle", "colspan" : "3"}):

      try:
        langs_html = subs.findNext("td", {"class" : "language"})
        fullLanguage = str(langs_html).split('class="language">')[1].split('<a')[0].replace("\n","")
        subteams = self_release_pattern.match(str(subs.contents[1])).groups()[0]

        if (str(subteams.replace("WEB-DL-", "").lower()).find(str(file_name))) > -1:
          hashed = True
        else:
          hashed = False

        try:
          lang = get_language_info(fullLanguage)
        except:
          lang = ""

        statusTD = langs_html.findNext("td")
        status = statusTD.find("b").string.strip()

        linkTD = statusTD.findNext("td")
        link = "%s%s" % (self_host,linkTD.find("a")["href"])

        if(subs.findNext("td", {"class":"newsDate", "colspan" : "2"}).findAll('img', {'title': 'Hearing Impaired'})):
          HI = True
        else:
          HI = False

        if status == "Completed" and (lang['3let'] in langs) :
          sublinks.append({'rating': '0', 'filename': "%s.S%.2dE%.2d-%s" %(name.replace("_", ".").title(), int(season), int(episode),subteams ), 'sync': hashed, 'link': link, 'lang': lang, 'hearing_imp': HI})
      except:
        log(__name__, "ERROR IN BS")        
        pass      
    
    sublinks.sort(key=lambda x: [not x['sync']])
    log(__name__, "sub='%s'" % (sublinks))
    
    for s in sublinks:
      append_subtitle(s)
def query(searchurl, langs, file_original_path, filename_string):
  sublinks = []
  socket.setdefaulttimeout(20)
  request = urllib2.Request(searchurl, headers=req_headers)
  request.add_header('Pragma', 'no-cache')
  page = urllib2.build_opener().open(request)
  content = page.read()
  content = content.replace("The safer, easier way", "The safer, easier way \" />")
  soup = BeautifulSoup(content)

  file_original_path_clean = normalizeString(file_original_path.encode('utf-8'))
  file_name = str(os.path.basename(file_original_path_clean)).split("-")[-1].lower()

  for langs_html in soup("td", {"class" : "language"}):

    try:
      subs = langs_html.findPrevious("td", {"class":"NewsTitle", "colspan" : "3"})
      fullLanguage = str(langs_html).split('class="language">')[1].split('<a')[0].replace("\n","")
      subteams = self_release_pattern.match(str(subs.contents[1])).groups()[0]

      if (str(subteams.replace("WEB-DL-", "").lower()).find(str(file_name))) > -1:
        hashed = True
      else:
        hashed = False

      try:
        lang = get_language_info(fullLanguage)
      except:
        lang = ""

      statusTD = langs_html.findNext("td")
      status = statusTD.find("b").string.strip()

      linkTD = statusTD.findNext("td")
      link = "%s%s" % (self_host,linkTD.find("a")["href"])

      if(subs.findNext("td", {"class":"newsDate", "colspan" : "2"}).findAll('img', {'title': 'Hearing Impaired'})):
        HI = True
      else:
        HI = False

      if status == "Completed" and (lang['3let'] in langs) :
        sublinks.append({'rating': '0', 'filename': "%s-%s" %(filename_string, subteams ), 'sync': hashed, 'link': link, 'lang': lang, 'hearing_imp': HI})
    except:
      log(__name__, "ERROR IN BS")
      pass

  sublinks.sort(key=lambda x: [not x['sync']])
  log(__name__, "sub='%s'" % (sublinks))

  for s in sublinks:
    append_subtitle(s)
def query(searchurl, langs, file_original_path, filename_string):
  sublinks = []
  socket.setdefaulttimeout(3)
  request = urllib2.Request(searchurl)
  request.add_header('Pragma', 'no-cache')
  page = urllib2.build_opener().open(request)
  content = page.read()
  content = content.replace("The safer, easier way", "The safer, easier way \" />")
  soup = BeautifulSoup(content)

  file_name = str(os.path.basename(file_original_path)).split("-")[-1].lower()

  for langs_html in soup("td", {"class" : "language"}):

    try:
      subs = langs_html.findPrevious("td", {"class":"NewsTitle", "colspan" : "3"})
      fullLanguage = str(langs_html).split('class="language">')[1].split('<a')[0].replace("\n","")
      subteams = self_release_pattern.match(str(subs.contents[1])).groups()[0]

      if (str(subteams.replace("WEB-DL-", "").lower()).find(str(file_name))) > -1:
        hashed = True
      else:
        hashed = False

      try:
        lang = get_language_info(fullLanguage)
      except:
        lang = ""

      statusTD = langs_html.findNext("td")
      status = statusTD.find("b").string.strip()

      linkTD = statusTD.findNext("td")
      link = "%s%s" % (self_host,linkTD.find("a")["href"])

      if(subs.findNext("td", {"class":"newsDate", "colspan" : "2"}).findAll('img', {'title': 'Hearing Impaired'})):
        HI = True
      else:
        HI = False

      if status == "Completed" and (lang['3let'] in langs) :
        sublinks.append({'rating': '0', 'filename': "%s-%s" %(filename_string, subteams ), 'sync': hashed, 'link': link, 'lang': lang, 'hearing_imp': HI})
    except:
      log(__name__, "ERROR IN BS")
      pass

  sublinks.sort(key=lambda x: [not x['sync']])
  log(__name__, "sub='%s'" % (sublinks))

  for s in sublinks:
    append_subtitle(s)
Example #6
0
File: service.py Project: teosan5/0
def get_subs(url, langs, filename_string, cont=None):
    if cont:
        content = cont
    else:
        content = get_url(url)
    sublinks = []
    regex = '''>Version(.+?),.+?uploaded(.+?)table footer'''
    regex2 = '''(?:|\'>(.+?)</a>(.+?)</td>.+?)(?:|Translated(.+?))language">(.+?)<.+?(?:|<b>(\d{1,2}).+?)Download" href="(.+?)".+?(?:.+?(impaired)|)'''
    for match in re.compile(regex, re.IGNORECASE | re.MULTILINE | re.DOTALL).findall(content):
        ver = match[0]
        infos = re.compile(regex2, re.IGNORECASE | re.MULTILINE | re.DOTALL).findall(match[1])
        for uploadr, edit, translator, language, percent, rawurl, impaired in infos:
            lang = get_language_info(language)
            if lang['3let'] in langs:
                if uploadr: uploader = uploadr
                if edit: edited = re.sub('\s+', '', edit)
                link = "%s%s" % (self_host, rawurl)
                hi = '1' if impaired else ''
                rating = str(int(round(float(percent)/20))) if percent else '5'
                sublinks.append({'rating': rating, 'filename': "%s-%s-%s-%s" % (filename_string, ver, uploader, edited), 'sync': '', 'link': link, 'lang': lang, 'hearing_imp': hi})
                #log(__name__, "rating: \"%s\"" % (rating))
    return sublinks