Esempio n. 1
0
def getVideoJSON(video_url):
  url = BASE_URL + API_URL + "title_page;title=" + video_url
  r = requests.get(url)
  if r.status_code != 200:
    common.log("Failed to get JSON for "+url)
    return None
  return r.json()
	def getThemen(self):
		html = common.fetchPage({'link': self.__urlTopics})
		html_content = html.get("content")

		content = common.parseDOM(html_content,name='section',attrs={})
		#topics = common.parseDOM(content,name='section',attrs={'class':'item_wrapper'})

		for topic in content:
			title = common.parseDOM(topic,name='h3',attrs={'class':'item_wrapper_headline.subheadline'})
			if title:
				title = common.replaceHTMLCodes(title[0]).encode('UTF-8')

				link = common.parseDOM(topic,name='a',attrs={'class':'more.service_link.service_link_more'},ret="href")
				link = common.replaceHTMLCodes(link[0]).encode('UTF-8')

				image = common.parseDOM(topic,name='img',ret="src")
				image = common.replaceHTMLCodes(image[0]).replace("width=395","width=500").replace("height=209.07070707071","height=265").encode('UTF-8')

				descs = common.parseDOM(topic,name='h4',attrs={'class':'item_title'})
				description = ""
				for desc in descs:
					description += "* "+common.replaceHTMLCodes(desc).encode('UTF-8') + "\n"

				parameters = {"link" : link, "mode" : "getThemenDetail"}
				url = sys.argv[0] + '?' + urllib.parse.urlencode(parameters)
				self.html2ListItem(title,image,"",description,"","","",url,None,True, False);
	def getLaneTopicOverview(self,url):
		html = common.fetchPage({'link': url})
		container = common.parseDOM(html.get("content"),name='section',attrs={'class': "b-list-container"},ret=False)
		
		items = common.parseDOM(container,name='div',attrs={'class': "b-lane.*?"},ret=False)

		for item in items:
			title_link = common.parseDOM(item,name='h3',attrs={'class': "title"},ret=False)
			
		
			title = common.parseDOM(title_link,name='a',attrs={},ret=False)
			title = common.replaceHTMLCodes(title[0]).encode('UTF-8')
			
			link = common.parseDOM(title_link,name='a',attrs={},ret='href')
			link = link[0].encode('UTF-8')
			link = "%s%s" % (self.__urlBase,link)
			
			desc = ""
			desc = self.formatDescription(title,"","",desc,"","")

			figure = common.parseDOM(item,name='figure',attrs={'class':'teaser-img'},ret=False)
			image = common.parseDOM(figure,name='img',attrs={},ret='src')
			image = common.replaceHTMLCodes(image[0]).encode('UTF-8')

			parameters = {"link" : link, "banner" : image, "mode" : "getArchiveDetail"}

			url = sys.argv[0] + '?' + urllib.parse.urlencode(parameters)
			self.html2ListItem(title,image,"",desc,"","","",url,None,True, False);
Esempio n. 4
0
def getAlphas():
  """
  Returns a list of all letters in the alphabet that has programs.
  """
  html = getPage(URL_A_TO_O)
  container = common.parseDOM(html, "ul", attrs = { "class" : "[^\"']*play_alphabetic-list[^\"']*" })

  if not container:
    helper.errorMsg("No container found!")
    return None

  letters = common.parseDOM(container[0], "h3", attrs = { "class" : "[^\"']*play_alphabetic-list__letter[^\"']*" })

  if not letters:
    helper.errorMsg("Could not find any letters!")
    return None

  alphas = []

  for letter in letters:
    alpha = {}
    alpha["title"] = helper.convertChar(letter)
    alpha["char"] =  letter
    alphas.append(alpha)

  return alphas
Esempio n. 5
0
def getLatestNews():
  """
  Returns a list of latest news programs.
  """
  url = BASE_URL+API_URL+"cluster_latest;cluster=nyheter"
  r = requests.get(url)
  if r.status_code != 200:
    common.log("Could not get JSON for url: "+url)
    return None

  programs = []
  for item in r.json():
    live_str = ""
    thumbnail = item.get("poster", "")
    if not thumbnail:
      thumbnail = item.get("thumbnail", "")
    if item["broadcastedNow"]:
      live_str = " " + "[COLOR red](Live)[/COLOR]"
    program = {
        "title" : common.replaceHTMLCodes(item["programTitle"] + " " + item["title"] + live_str),
        "thumbnail" : helper.prepareThumb(thumbnail, baseUrl=BASE_URL),
        "url" : "video/" + str(item["versions"][0]["articleId"])
        }
    programs.append(program)
  return programs
Esempio n. 6
0
def grifthost(url):
    try:
        url = url.replace('/embed-', '/').split('-')[0]

        result = getUrl(url, close=False).result

        post = {}
        f = common.parseDOM(result, "Form", attrs = { "name": "F1" })[-1]
        k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
        for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
        post.update({'method_free': '', 'method_premium': ''})
        post = urllib.urlencode(post)

        import time
        request = urllib2.Request(url, post)

        for i in range(0, 4):
            try:
                response = urllib2.urlopen(request, timeout=5)
                result = response.read()
                response.close()
                url = re.compile('(<a .+?</a>)').findall(result)
                url = [i for i in url if '/download.png' in i][-1]
                url = common.parseDOM(url, "a", ret="href")[0]
                return url
            except:
                time.sleep(1)
    except:
        return
def movdivx(url):
    try:
        url = re.compile('//.+?/([\w]+)').findall(url)[0]
        url = 'http://www.movdivx.com/%s' % url
 
        result = getUrl(url).result

        post = {}
        f = common.parseDOM(result, "Form", attrs = { "name": "myForm" })[0]
        k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
        for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
        post.update({'method_free': 'Continue to Stream'})
        post = urllib.urlencode(post)

        result = getUrl(url, post=post).result

        result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
        result = jsunpack(result)

        url = re.compile("'file' *, *'(.+?)'").findall(result)
        url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result)
        url += common.parseDOM(result, "embed", ret="src")
        url = 'http://' + url[-1].split('://', 1)[-1]
        return url
    except:
        return
def getFile(subject, filename, folder):
    common.log(subject)
    global m

    file = findInFolder(subject, folder)
    if file:
        dest, dest_filename = os.path.split(filename)

        delay = 1
        res = False
        while not res and delay < 10:
            try:
                #m.download(file, dest_path=dest, dest_filename=dest_filename)
                megaDownload(file, dest_path=dest, dest_filename=dest_filename)
                res = True
            except errors.RequestError as e:
                if e[0] == -3:
                    common.log("[%s] EAGAIN: Retrying with exponential backoff: %s " %( repr(e[0]), repr(delay)))
                    time.sleep(delay)
                    delay += 1
                else:
                    common.log("errors.RequestError: " + repr(e))
                    sys.exit(1)

        common.log("Done: " + repr(res))
        return True
    else:
        common.log("Failure")
        return False
Esempio n. 9
0
def bestreams(url):
    try:
        url = url.replace('/embed-', '/')
        url = re.compile('//.+?/([\w]+)').findall(url)[0]
        url = 'http://bestreams.net/%s' % url

        result = getUrl(url, mobile=True, close=False).result

        post = {}
        f = common.parseDOM(result, "Form", attrs = { "method": "POST" })[-1]
        k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
        for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
        post.update({'imhuman': 'Proceed to video'})
        post = urllib.urlencode(post)

        import time

        for i in range(0, 5):
            try:
                result = getUrl(url, mobile=True, close=False, post=post).result
                if 'Proceed to video' in result: raise Exception()

                url = common.parseDOM(result, "div", attrs = { "id": "main" })[0]
                url = common.parseDOM(url, "a", ret="href")[0]
                return url
            except:
                time.sleep(1)
    except:
        return
Esempio n. 10
0
def sum_files(inst_name, accumulator, files):
    """ Custom sum for multiple runs

        Left for compartibility as internal summation had some unspecified problems.
        Will go in a future
    """
    accum_name = accumulator
    if isinstance(accum_name,api.Workspace): # it is actually workspace
        accum_name  = accumulator.name()


    if type(files) == list:
         #tmp_suffix = '_plus_tmp'

         for filename in files:
              print 'Summing run ',filename,' to workspace ',accumulator
              temp = common.load_run(inst_name,filename, force=False,load_with_workspace=Reducer.load_monitors_with_workspace)

              if accum_name in mtd: # add current workspace to the existing one
                  if not isinstance(accumulator,api.Workspace):
                      accumulator = mtd[accum_name]
                  accumulator+=  temp
                  DeleteWorkspace(Workspace=temp)
              else:
                   print 'Create output workspace: '
                   accumulator=RenameWorkspace(InputWorkspace=temp,OutputWorkspace=accum_name)

         return accumulator
    else:
        temp = common.load_run(inst_name,files, force=False,load_with_workspace=Reducer.load_monitors_with_workspace)
        accumulator=RenameWorkspace(InputWorkspace=temp,OutputWorkspace=accum_name)
        return accumulator;
Esempio n. 11
0
def hlsStrip(videoUrl):
    """
    Extracts the stream that supports the
    highest bandwidth and is not using the avc1.77.30 codec.
    """
    common.log("Stripping file: " + videoUrl)

    ufile = urllib.urlopen(videoUrl)
    lines = ufile.readlines()

    hlsurl = ""
    bandwidth = 0
    foundhigherquality = False

    for line in lines:
      if foundhigherquality:
        # The stream url is on the line proceeding the header
        foundhigherquality = False
        hlsurl = line
      if "EXT-X-STREAM-INF" in line: # The header
        if not "avc1.77.30" in line:
          match = re.match(r'.*BANDWIDTH=(\d+).+', line)
          if match:
            if bandwidth < int(match.group(1)):
              foundhigherquality = True
              bandwidth = int(match.group(1))
          continue

    if bandwidth == 0:
      return None

    ufile.close()
    hlsurl = hlsurl.rstrip()
    common.log("Returned stream url : " + hlsurl)
    return hlsurl
def deleteFile(subject, folder):
    common.log(subject)
    global m

    file = findInFolder(subject, folder)

    if file:
        delay = 1
        res = False
        while not res and delay < 10:
            try:
                res = m.destroy(file[0])
            except errors.RequestError as e:
                if e[0] == -3:
                    common.log("[%s] EAGAIN: Retrying with exponential backoff: %s " %( repr(e[0]), repr(delay)))
                    time.sleep(delay)
                    delay += 1
                else:
                    common.log("errors.RequestError: " + repr(e))
                    sys.exit(1)

        common.log("Done: " + repr(res))
        return True
    else:
        common.log("Failure")
        return False
 def get_topics(self):
     html = self.get_HTML(URLTOPICS)
     for topic_div in xbmc_common.parseDOM(html, 'div', {'class':'topics__list__topic'}):
         title = xbmc_common.parseDOM(topic_div, 'a')[0]
         link = xbmc_common.parseDOM(topic_div, 'a', ret='href')[0]
         topic = link.split('/')[-1]
         yield title, topic
Esempio n. 14
0
    def getArchiv(self,url):
        html = common.fetchPage({'link': url})
        articles = common.parseDOM(html.get("content"),name='a',attrs={'class': 'day_wrapper'})
        articles_href = common.parseDOM(html.get("content"),name='a',attrs={'class': 'day_wrapper'},ret="href")
        i = 0
            
        for article in articles:
            link = articles_href[i]
            i = i+1

            day = common.parseDOM(article,name='strong',ret=False)
            if len(day) > 0:
                day = day[0].encode("utf-8")
            else:
                day = ''
              
            date = common.parseDOM(article,name='small',ret=False)
            if len(date) > 0:
                date = date[0].encode("utf-8")
            else:
                date = ''
                
            title = day + " - " + date
            
            parameters = {"link" : link,"title" : title,"banner" : "","backdrop" : "", "mode" : "getArchivDetail"}
            url = sys.argv[0] + '?' + urllib.urlencode(parameters)
            liz = self.html2ListItem(title,"","","","",date,"",url,None,True,'false');
Esempio n. 15
0
    def livetv_list2(self, url):
        try:
            url = getattr(link(), url)
            result = getUrl(url, timeout='30').result
            result = result.decode('iso-8859-1').encode('utf-8')
            result = result.replace('\n','')

            pages = re.compile('(<a href="/en/team/.+?">.+?</a>)').findall(result)
        except:
            return

        for page in pages:
            try:
                name = common.parseDOM(page, "a")[0]
                name = re.sub('<.+?>|</.+?>', '', name)
                name = common.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                url = common.parseDOM(page, "a", ret="href")[0]
                url = re.sub('/calendar/', '/video/', url)
                url = '%s%s' % (link().livetv_base, url)
                url = common.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                self.list.append({'name': name, 'url': url})
            except:
                pass

        try:
            self.list = [i for n,i in enumerate(self.list) if i not in self.list[:n]]
            self.list = sorted(self.list, key=itemgetter('name'))
        except:
            pass

        return self.list
def cloudyvideos(url):
    try:
        result = getUrl(url).result

        post = {}
        f = common.parseDOM(result, "Form", attrs = { "name": "F1" })[-1]
        k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
        for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
        post.update({'method_free': '', 'method_premium': ''})
        post = urllib.urlencode(post)

        import time
        request = urllib2.Request(url, post)

        for i in range(0, 4):
            try:
                response = urllib2.urlopen(request, timeout=10)
                result = response.read()
                response.close()
                btn = common.parseDOM(result, "input", ret="value", attrs = { "class": "graybt.+?" })[0]
                url = re.compile('href=[\'|\"](.+?)[\'|\"]><input.+?class=[\'|\"]graybt.+?[\'|\"]').findall(result)[0]
                return url
            except:
                time.sleep(1)
    except:
        return
Esempio n. 17
0
def mp4Handler(jsonObj):
  """
  Returns a mp4 stream URL.

  If there are several mp4 streams in the JSON object:
  pick the one with the highest bandwidth.

  Some programs are available with multiple mp4 streams
  for different bitrates. This function ensures that the one
  with the highest bitrate is chosen.

  Can possibly be extended to support some kind of quality
  setting in the plugin.
  """
  videos = []

  # Find all mp4 videos
  for video in jsonObj["video"]["videoReferences"]:
    if video["url"].endswith(".mp4"):
      videos.append(video)

  if len(videos) == 1:
    return videos[0]["url"]

  bitrate = 0
  url = ""

  # Find the video with the highest bitrate
  for video in videos:
    if video["bitrate"] > bitrate:
      bitrate = video["bitrate"]
      url = video["url"]

  common.log("Info: bitrate="+str(bitrate)+" url="+url)
  return url
Esempio n. 18
0
def getProgramsByLetter(letter):
  """
  Returns a list of all program starting with the supplied letter.
  """
  letter = urllib.unquote(letter)
  url = BASE_URL+API_URL+"all_titles"
 
  r = requests.get(url)
  if r.status_code != 200:
    common.log("Did not get any response for: "+url)
    return None

  letter = letter.decode("utf-8")
  pattern = "[%s]" % letter.upper()

  titles = r.json()
  items = []
  
  programs = []
  for title in titles:
    if re.search(pattern, title["programTitle"][0].upper()):
      item = {}
      item["url"] = "/" + title["contentUrl"]
      item["title"] = common.replaceHTMLCodes(title["programTitle"])
      item["thumbnail"] = ""
      items.append(item)

  return items
Esempio n. 19
0
    def lfv_list2(self, url, meta):
        try:
            result = getUrl(url, timeout='30').result

            title = common.parseDOM(result, "h1", attrs = { "class": "title" })[0]
            title = title.split(':', 1)[-1].split('>', 1)[-1].split('<', 1)[0].strip()

            videos = result.replace('"//', '"http://').replace("'//", '"http://')
            videos = re.findall('[\'|\"](http://.+?)[\'|\"]', videos, re.I)
            videos = uniqueList(videos).list
            videos = [i for i in videos if any(i.startswith(x) for x in resolver().hostList)]
            videos = [i for i in videos if not i.endswith('.js')]
        except:
            return

        for video in videos:
            try:
                name = title
                name = common.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                url = video
                url = common.replaceHTMLCodes(url)
                if url.startswith('//') : url = 'http:' + url
                url = url.encode('utf-8')

                self.list.append({'name': name, 'url': url, 'meta': meta})
            except:
                pass

        return self.list
Esempio n. 20
0
 def getLinks(self,url,banner,playlist):
     playlist.clear()
     url = str(urllib.unquote(url))
     if banner != None:
         banner = urllib.unquote(banner)
     
     html = common.fetchPage({'link': url})
     data = common.parseDOM(html.get("content"),name='div',attrs={'class': "jsb_ jsb_VideoPlaylist"},ret='data-jsb')
     
     data = data[0]
     data = common.replaceHTMLCodes(data)
     data = json.loads(data)
     
     video_items = data.get("playlist")["videos"]
     
     try:
         current_title_prefix = data.get("selected_video")["title_prefix"]
         current_title = data.get("selected_video")["title"]
         current_desc = data.get("selected_video")["description"].encode('UTF-8')
         current_duration = data.get("selected_video")["duration"]
         current_preview_img = data.get("selected_video")["preview_image_url"]
         if self.useSubtitles:
             if "subtitles" in data.get("selected_video"):
                 current_subtitles = []
                 for sub in data.get("selected_video")["subtitles"]:
                     current_subtitles.append(sub.get(u'src'))
             else:
                 current_subtitles = None
         else:
             current_subtitles = None
         current_id = data.get("selected_video")["id"]
         current_videourl = self.getVideoUrl(data.get("selected_video")["sources"]);
     except Exception, e:
         current_subtitles = None
         print e
def movreel(url):
    try:
        user = xbmcaddon.Addon().getSetting("movreel_user")
        password = xbmcaddon.Addon().getSetting("movreel_password")

        login = '******'
        post = {'op': 'login', 'login': user, 'password': password, 'redirect': url}
        post = urllib.urlencode(post)
        result = getUrl(url, close=False).result
        result += getUrl(login, post=post, close=False).result

        post = {}
        f = common.parseDOM(result, "Form", attrs = { "name": "F1" })[-1]
        k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
        for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
        post.update({'method_free': '', 'method_premium': ''})
        post = urllib.urlencode(post)

        import time
        request = urllib2.Request(url, post)

        for i in range(0, 3):
            try:
                response = urllib2.urlopen(request, timeout=10)
                result = response.read()
                response.close()
                url = re.compile('(<a .+?</a>)').findall(result)
                url = [i for i in url if 'Download Link' in i][-1]
                url = common.parseDOM(url, "a", ret="href")[0]
                return url
            except:
                time.sleep(1)
    except:
        return
Esempio n. 22
0
def getChannels():
  """
  Returns the live channels from the page "Kanaler".
  """
  url = BASE_URL+API_URL+"channel_page"
  r = requests.get(url)
  if r.status_code != 200:
    common.log("Could not get response for: "+url)
    return None
  contents = r.json()

  items = []

  for channel in contents["channels"]:
    item = {}
    program_title = channel["schedule"][0]["title"]
    item["title"] = channel["name"]+" - "+program_title
    item["thumbnail"] = \
      "http://svtplay.se//public/images/channels/posters/%s.png" % channel["title"]
    item["info"] = {}
    try:
      item["info"]["plot"] = channel["schedule"][0]["titlePage"]["description"]
      item["info"]["fanart"] = channel["schedule"][0]["titlePage"]["thumbnailLarge"]
      item["info"]["title"] = channel["schedule"][0]["titlePage"]["title"]
    except KeyError as e:
      # Some items are missing titlePage, skip them
      pass
    for videoRef in channel["videoReferences"]:
      if videoRef["playerType"] == "ios":
        item["url"] = videoRef["url"]
    items.append(item)

  return items
Esempio n. 23
0
def getEpisodes(title):
  """
  Returns the episodes for a program URL.
  """
  url = BASE_URL+API_URL+"video_title_page;title="+title
  r = requests.get(url)
  if r.status_code != 200:
    common.log("Could not get JSON for "+url)
    return None
  programs = []
  for item in r.json()["relatedVideos"]["episodes"]:
    program = {}
    program["title"] = item["title"]
    try:
      program["title"] = program["title"] + "[COLOR green] (S%sE%s)[/COLOR]" % (str(item["season"]), str(item["episodeNumber"]))
    except KeyError as e:
      # Supress
      pass
    program["url"] = "video/" + str(item["id"])
    program["thumbnail"] = helper.prepareThumb(item.get("thumbnail", ""), BASE_URL)
    info = {}
    info["plot"] = item.get("description", "")
    info["fanart"] = helper.prepareFanart(item.get("poster", ""), BASE_URL)
    program["info"] = info
    programs.append(program)
  return programs
def _180upload(url):
    try:
        url = re.compile('//.+?/([\w]+)').findall(url)[0]
        url = 'http://180upload.com/embed-%s.html' % url

        result = getUrl(url).result

        post = {}
        f = common.parseDOM(result, "form", attrs = { "id": "captchaForm" })[0]
        k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
        for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
        post = urllib.urlencode(post)

        result = getUrl(url, post=post).result

        result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
        result = jsunpack(result)

        url = re.compile("'file' *, *'(.+?)'").findall(result)
        url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result)
        url += common.parseDOM(result, "embed", ret="src")
        url = 'http://' + url[-1].split('://', 1)[-1]
        return url
    except:
        return
def grifthost(url):
    try:
        url = url.replace('/embed-', '/')
        url = re.compile('//.+?/([\w]+)').findall(url)[0]
        url = 'http://grifthost.com/embed-%s.html' % url

        result = getUrl(url).result

        try:
            post = {}
            f = common.parseDOM(result, "Form", attrs = { "method": "POST" })[0]
            f = f.replace('"submit"', '"hidden"')
            k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
            for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
            post = urllib.urlencode(post)
            result = getUrl(url, post=post).result
        except:
            pass

        result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
        result = jsunpack(result)

        url = re.compile("'file' *, *'(.+?)'").findall(result)
        url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result)
        url += common.parseDOM(result, "embed", ret="src")
        url = 'http://' + url[-1].split('://', 1)[-1]
        return url
    except:
        return
Esempio n. 26
0
    def convert_to_energy(self, mono_run, ei, white_run=None, mono_van=None,\
                          abs_ei=None, abs_white_run=None, save_path=None, Tzero=None, \
                          motor=None, offset=None):
        """
        One-shot function to convert the given runs to energy
        """
        # Check if we need to perform the absolute normalisation first
        if not mono_van is None:
            if abs_ei is None:
                abs_ei = ei
            mapping_file = self.abs_map_file
            spectrum_masks = self.spectra_masks 
            monovan_wkspace = self.mono_van(mono_van, abs_ei, abs_white_run, mapping_file, spectrum_masks)
            
            # TODO: Need a better check than this...
            if (abs_white_run is None):
                self.log("Performing Normalisation to Mono Vanadium.")
                norm_factor = self.calc_average(monovan_wkspace)
            else:
                self.log("Performing Absolute Units Normalisation.")
                # Perform Abs Units...
                norm_factor = self.monovan_abs(monovan_wkspace)
            mtd.deleteWorkspace(monovan_wkspace.getName())
        else:
            norm_factor = None

        # Figure out what to call the workspace 
        result_name = mono_run
        if not result_name is None:
            result_name = common.create_resultname(mono_run)
        
        # Main run file conversion
        sample_wkspace = self.mono_sample(mono_run, ei, white_run, self.map_file,
                                          self.spectra_masks, result_name, Tzero)
        if not norm_factor is None:
            sample_wkspace /= norm_factor

        
        #calculate psi from sample environment motor and offset 
        if (offset is None):
            self.motor_offset = 0
        else:
            self.motor_offset = float(offset)
        
        self.motor=0
        if not (motor is None):
        # Check if motor name exists    
            if sample_wkspace.getRun().hasProperty(motor):
                self.motor=sample_wkspace.getRun()[motor].value[0]
                self.log("Motor value is %s" % self.motor)
            else:
                self.log("Could not find such sample environment log. Will use psi=offset")
        self.psi = self.motor+self.motor_offset
        # Save then finish
        self.save_results(sample_wkspace, save_path)
        # Clear loaded raw data to free up memory
        common.clear_loaded_data()
        
        return sample_wkspace
Esempio n. 27
0
def drawXspf(html,icon=''):
	lists = common.parseDOM(html,"track") #rusian acelive format
	if len(lists)>0:
		for item in lists:
			name = common.parseDOM(item,"title")[0].encode("utf-8")
			value = common.parseDOM(item,"location")[0].encode("utf-8")
			logger.info("Added: "+name+", url: "+value)
			add_dir(name, value, 2, icon,'', 0)
def primeshare(url):
    try:
        result = getUrl(url, mobile=True).result

        url = common.parseDOM(result, "video")[0]
        url = common.parseDOM(url, "source", ret="src", attrs = { "type": ".+?" })[0]
        return url
    except:
        return
Esempio n. 29
0
def openload(url):
    try:
        result = getUrl(url).result

        url = common.parseDOM(result, "span", attrs = { "id": "realdownload" })[0]
        url = common.parseDOM(url, "a", ret="href")[0]
        return url
    except:
        return
Esempio n. 30
0
def postFile(subject, filename, folder, git_top_level):
    common.log("%s to %s - %s" % ( filename, repr(folder), subject))

    def func(progress, done):
        common.log("func: %s - %s" % (repr(progress), repr(done)))
        if len(done):
            print("Done uploading")
        else:
            print("At %s%%" % progress)

    width, height, pixels, meta, text = png.Reader(filename=pwd + "/logo_small.png").read()
    upper_limit = 40234050
    if os.path.getsize(filename) > upper_limit:
        print("%s size: %s more than %s. Skipping" % ( filename, os.path.getsize(filename), upper_limit))
        sys.exit(1)

    tags = []
    if conf["encrypted"]:
        tfile = pwd + "/temp/encoded-" + subject
        f = open(tfile, 'wb')
        text = readFile(filename, "rb")
        text = base64.b64encode(text)
    
        w = png.Writer(width, height, text={"data": text})
        w.write(f, pixels)
        f.close()
    else:
        tfile = filename

        if git_top_level:
            common.log("git top level directory: %s" % git_top_level)
            dirpath = os.path.relpath(os.path.dirname(tfile), git_top_level)
            if dirpath.find(".git") == -1:
                tags = dirpath.split(os.sep)
                common.log("Tags added to photo " + repr(tags))

    common.log("Uploading: " + tfile)

    res = flickr.upload(filename=tfile, is_public=0, title=subject,
                        description=os.path.basename(tfile), tags = '"' + '" "'.join(tags) + '"',
                        callback=func)

    if len(res):
        if isinstance(folder, int) or isinstance(folder, long):
            flickr.photosets_addPhoto(photoset_id=folder, photo_id=res[0].text)
        else:
            flickr.photosets_create(title=folder, primary_photo_id=res[0].text)

    if conf["encrypted"]:
        os.unlink(pwd + "/temp/encoded-" + subject)

    if len(res):
        common.log("Done: " + repr(res))
    else:
        print("Failed to store: " + repr(res))
        sys.exit(1)
 def results_remaining(self, html):
     search_results = xbmc_common.parseDOM(
         html, 'div', {'class': 'heading search__results__heading'})
     if search_results:
         results_count_matches = __results_count_re__.findall(
             search_results[0])
         if results_count_matches:
             match = results_count_matches[0]
             return int(match[1]) - int(match[0])
         if __result_count_re__.findall(search_results[0]):
             return 0  # All results on this page
     # We don't know so just make sure that it is positive so that we keep paging.
     return 1
Esempio n. 32
0
def uptobox(url):
    try:
        result = getUrl(url).result

        post = {}
        f = common.parseDOM(result, "form", attrs={"name": "F1"})[0]
        k = common.parseDOM(f, "input", ret="name", attrs={"type": "hidden"})
        for i in k:
            post.update({
                i:
                common.parseDOM(f, "input", ret="value", attrs={"name": i})[0]
            })
        post = urllib.urlencode(post)

        result = getUrl(url, post=post).result

        url = common.parseDOM(result, "div", attrs={"align": ".+?"})
        url = [i for i in url if 'button_upload' in i][0]
        url = common.parseDOM(url, "a", ret="href")[0]
        return url
    except:
        return
Esempio n. 33
0
def youtube(url):
    try:
        id = url.split("?v=")[-1].split("/")[-1].split("?")[0].split("&")[0]
        result = getUrl('http://gdata.youtube.com/feeds/api/videos/%s?v=2' %
                        id).result

        state, reason = None, None
        try:
            state = common.parseDOM(result, "yt:state", ret="name")[0]
        except:
            pass
        try:
            reason = common.parseDOM(result, "yt:state", ret="reasonCode")[0]
        except:
            pass
        if state == 'deleted' or state == 'rejected' or state == 'failed' or reason == 'requesterRegion':
            return

        url = 'plugin://plugin.video.youtube/?action=play_video&videoid=%s' % id
        return url
    except:
        return
Esempio n. 34
0
 def OnTimer1Timer(self, event):
     global StartTime
     global DirCount
     global FilesCount
     global KnownFilesCount
     ElapsedTime = CommonFunctions.ConvertSecondsToDayHourMinSec(time.time() - StartTime)
     self.lblTotalDir.SetLabel(PlatformMethods.Convert(DirCount))
     self.lblElapsedTime.SetLabel(PlatformMethods.Convert(ElapsedTime))
     #self.lblCurrentDir.SetLabel(PlatformMethods.Convert(evt.currentDir))
     self.lblFilesCount.SetLabel(PlatformMethods.Convert(FilesCount))
     self.lblKnownFilesCount.SetLabel(PlatformMethods.Convert(KnownFilesCount))
     self.RefreshLabels()
     event.Skip()
Esempio n. 35
0
 def OnTimer1Timer(self, event):
     ElapsedTime = CommonFunctions.ConvertSecondsToDayHourMinSec(
         time.time() - self.StartTime)
     self.lblTotalDir.SetLabel(
         PlatformMethods.Decode(self.scanThread.GetDirCount()))
     self.lblElapsedTime.SetLabel(PlatformMethods.Decode(ElapsedTime))
     #self.lblCurrentDir.SetLabel(PlatformMethods.Decode(evt.currentDir))
     self.lblFilesCount.SetLabel(
         PlatformMethods.Decode(self.scanThread.GetFilesCount()))
     self.lblKnownFilesCount.SetLabel(
         PlatformMethods.Decode(self.scanThread.GetKnownFilesCount()))
     self.RefreshLabels()
     event.Skip()
Esempio n. 36
0
    def __prepare_items(self, items, result):
        out = []
        for video in items:
            title = re.compile('<b>(.+?)</b>').findall(video)
            title = [i for i in title if '&ndash;' in i or '-' in i][-1]
            title = title.split('<b>')[-1]
            title = title.replace('&ndash;', '-')
            title = common.replaceHTMLCodes(title)
            title = title.encode('utf-8')
            url = self.base + re.compile('<a.+?href="(.+?)"').findall(video)[0]
            out += [(title, url, info().icon)]

        return out
Esempio n. 37
0
    def resolve(self, url):
        try:
            id = url.split("?v=")[-1].split("/")[-1].split("?")[0].split(
                "&")[0]
            result = getUrl('http://www.youtube.com/watch?v=%s' % id).result

            message = common.parseDOM(result,
                                      "div",
                                      attrs={"id": "unavailable-submessage"})
            message = ''.join(message)

            alert = common.parseDOM(result,
                                    "div",
                                    attrs={"id": "watch7-notification-area"})

            if len(alert) > 0: raise Exception()
            if re.search('[a-zA-Z]', message): raise Exception()

            url = 'plugin://plugin.video.youtube/?action=play_video&videoid=%s' % id
            return url
        except:
            return
Esempio n. 38
0
    def OnBtnExportWordFeaturesButton(self, event):
        db = SqliteDatabase(Globals.TextCatFileName)
        if not db.OpenConnection():
            return

        dlg = wx.FileDialog(self, "Save Words List", ".", "", "*.csv", wx.SAVE)
        try:
            if dlg.ShowModal() == wx.ID_OK:
                fileName = dlg.GetPath()
                busy = wx.BusyInfo(
                    "It might take some time depending on the total number of unique words..."
                )
                wx.Yield()
                fout = open(fileName, 'wb')
                #query = "select ID, `Word` from " + Constants.TextCatWordsTable + " order by `ID`; "
                query = "select words.word, count(WordLocation.WordID) as total from words left join WordLocation on words.rowid = wordlocation.wordid "
                query += "group by wordlocation.wordid order by total desc;"
                #print 'before'
                rows = db.FetchAllRows(query)
                #rint 'after'
                i = 1
                for row in rows:
                    #print row
                    #if i == 0:
                    #try:
                    fout.write(PlatformMethods.Encode(row[0]))
                    fout.write(" (%d)" % row[1])
                    #fout.write(row[1])
                    #i += 1
                    #except Exception, value:
                    #    print "Error: writing word: ", value
                    #else:
                    #try:
                    fout.write(", ,")
                    #fout.write(row[0])
                    #fout.write(" - %d"%row[1])
                    #fout.write(row[1])
                    i += 1
                    if i == 4:
                        i = 0
                        fout.write("\n")
                        #except Exception, value:
                        #    print "Error: writing word: ", value

                db.CloseConnection()
                fout.close()
        except Exception, value:
            db.CloseConnection()
            fout.close()
            CommonFunctions.ShowErrorMessage(
                self, "Failed to Export Word List. Error: %s" % value)
Esempio n. 39
0
 def OnBtnExportSearchResultsButton(self, event):
     db = SqliteDatabase(Globals.KeywordsFileName)
     if not db.OpenConnection():
         return
        
     dlg = wx.DirDialog(self, message="Empty Directory to Save Search Results")
     #try:
     if dlg.ShowModal() == wx.ID_OK:
         dirPath = dlg.GetPath()
         if os.listdir(dirPath):
             CommonFunctions.ShowErrorMessage(self, "Selected directory is not empty! Please select an empty directory!")
         else:
             busy = wx.BusyInfo("It may take some time depending on the total number of kewywords...")
             wx.Yield()
             fout = open(os.path.join(dirPath, "SearchResultsSummary.txt"), 'wb')
             fout.write("%s%s%s%s\n"%("Keyword".ljust(20, " "), "File Path".ljust(200, " "), "Case Sens.".rjust(12, " "), "Case Insens.".rjust(12, " ")))
             fout.write("%s%s%s%s\n"%("=".ljust(20, "="), "=".ljust(200, "="), "=".rjust(12, "="), "=".rjust(12, "="))) 
             for word in Globals.Keywords:
                 keywordPath = os.path.join(dirPath, word)
                 if not os.path.isdir(keywordPath):
                     os.mkdir(keywordPath)
                     
                 fout.write(word.ljust(20, " "))
                 query = "select FileName, " + word + "_CS," + word + "_CI from " + Constants.KeywordsFrequencyTable
                 query += " where " + word + "_CI > 0 or " + word + "_CS > 0;"  
                 
                 rows = db.FetchAllRows(query)
                 i = 0
                 for row in rows:
                     try:
                         if i> 0:
                             fout.write(" ".ljust(20, " "))
                         i += 1
                         srcFilePath = PlatformMethods.Decode(row[0]) #.replace("\\\\", "\\") 
                         fileName = os.path.basename(row[0])
                         dstFilePath = PlatformMethods.Decode(os.path.join(keywordPath, fileName))
                         fout.write(srcFilePath.ljust(200, " "))
                         fout.write(PlatformMethods.Encode(row[1]).rjust(12, " "))
                         fout.write(PlatformMethods.Encode(row[2]).rjust(12, " "))
                         shutil.copyfile(srcFilePath, dstFilePath)
                         shutil.copystat(srcFilePath, dstFilePath)
                     except Exception, value:
                         print 'Error occured while exporting: Error: ', value
                         
                     fout.write("\n")
                 
                 fout.write("\n")
                 fout.write("%s\n"%("*".ljust(250, "*")))
                 
             db.CloseConnection()
             fout.close()
Esempio n. 40
0
    def livetv_list2(self, url):
        try:
            url = getattr(link(), url)
            result = getUrl(url, timeout='30').result
            result = result.decode('iso-8859-1').encode('utf-8')
            result = result.replace('\n', '')

            pages = re.compile('(<a href="/enx/team/.+?">.+?</a>)').findall(
                result)
        except:
            return

        for page in pages:
            try:
                name = common.parseDOM(page, "a")[0]
                name = re.sub('<.+?>|</.+?>', '', name)
                name = common.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                url = common.parseDOM(page, "a", ret="href")[0]
                url = re.sub('/calendar/', '/video/', url)
                url = '%s%s' % (link().livetv_base, url)
                url = common.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                self.list.append({'name': name, 'url': url})
            except:
                pass

        try:
            self.list = [
                i for n, i in enumerate(self.list) if i not in self.list[:n]
            ]
            self.list = sorted(self.list, key=itemgetter('name'))
        except:
            pass

        return self.list
Esempio n. 41
0
    def getSchedule(self):
        html = common.fetchPage({'link': self.__urlSchedule})
        articles = common.parseDOM(html.get("content"),
                                   name='a',
                                   attrs={'class': 'day_wrapper'})
        articles_href = common.parseDOM(html.get("content"),
                                        name='a',
                                        attrs={'class': 'day_wrapper'},
                                        ret="href")
        i = 0

        for article in articles:
            link = articles_href[i]
            i = i + 1

            day = common.parseDOM(article, name='strong', ret=False)
            if len(day) > 0:
                day = day[0].encode("utf-8")
            else:
                day = ''

            date = common.parseDOM(article, name='small', ret=False)
            if len(date) > 0:
                date = date[0].encode("utf-8")
            else:
                date = ''

            title = day + " - " + date

            parameters = {
                "link": link,
                "title": title,
                "banner": "",
                "mode": "getScheduleDetail"
            }
            url = sys.argv[0] + '?' + urllib.urlencode(parameters)
            self.html2ListItem(title, "", "", "", "", date, "", url, None,
                               True, False)
Esempio n. 42
0
    def OnBtnAddDestinationButton(self, event):
        imagePath = self.txtImagePath.GetValue().strip()
        if not imagePath:
            msg = "Please select dd image file path first!"
            CommonFunctions.ShowErrorMessage(self, msg, True)
            return

        if not os.path.exists(imagePath):
            CommonFunctions.ShowErrorMessage(
                self, "Please enter a valid image file path!", True)
            return

        st = os.stat(imagePath)
        imageSize = st[ST_SIZE]

        dlg = wx.DirDialog(self)
        try:
            if dlg.ShowModal() == wx.ID_OK:
                driveName = dlg.GetPath()
                driveName = driveName[:driveName.find(':') + 1]
                #print driveName

                try:
                    rfin = Win32RawIO.Win32RAWIO(r'\\.\%s' % driveName, 'r')
                    if imageSize > rfin.size:
                        CommonFunctions.ShowErrorMessage(
                            self,
                            "Warning! Image file size bigger than the disk size!!",
                            False)
                    rfin.close()
                    self.listDriveNames.append(r'\\.\%s' % driveName)
                    self.lstDestinations.Append([driveName])
                except Exception, msg:
                    CommonFunctions.ShowErrorMessage(self, str(msg), True)

        finally:
            dlg.Destroy()
        event.Skip()
Esempio n. 43
0
    def CheckInputError(self):
        errMsg = ""

        if not self.txtKeywordsFile.GetValue():
            errMsg = "Please Enter or Browse to Keywords File Path!"

        elif not self.txtOutputPath.GetValue():
            errMsg = "Please Enter or Browse Path to Output Report!"

        if errMsg:
            CommonFunctions.ShowErrorMessage(self, errMsg)
            return True
        else:
            return False
Esempio n. 44
0
    def commandRouter(
        self, dataEnc, module
    ):  # Routes commands to functions that handle the corresponding states.

        if self.state == "status":
            dataDec = dataEnc.decode()
            self.stateStatus(dataDec, module)

        elif self.state == "keyExchange":
            dataDec = dataEnc.decode()
            self.stateKeyExchange(dataDec, module)

        elif self.state == "login":
            dataDec = self.securityServer.decryptData(dataEnc).decode()
            self.stateLogin(dataDec, module)

        elif self.state == "default":
            dataDec = self.securityServer.decryptData(dataEnc).decode()
            self.stateDefault(dataDec, module)
        else:
            CommonFunctions.sendData(
                "Command couldn't be routed " + self.state + " state unknown",
                module, self.securityServer)
Esempio n. 45
0
def mrfile(url):
    try:
        result = getUrl(url).result

        post = {}
        f = common.parseDOM(result, "Form", attrs={"name": "F1"})[-1]
        k = common.parseDOM(f, "input", ret="name", attrs={"type": "hidden"})
        for i in k:
            post.update({
                i:
                common.parseDOM(f, "input", ret="value", attrs={"name": i})[0]
            })
        post.update({'method_free': '', 'method_premium': ''})
        post = urllib.urlencode(post)

        result = getUrl(url, post=post).result

        url = re.compile('(<a\s+href=.+?>Download\s+.+?</a>)').findall(
            result)[-1]
        url = common.parseDOM(url, "a", ret="href")[0]
        return url
    except:
        return
Esempio n. 46
0
 def SearchDocuments(self, searchWords):
     if self.SearchDocumentsMenu.FindItem(searchWords) < 0:
         id = wx.NewId()
         self.SearchDocumentsMenu.Append(id, searchWords)
         self.Bind(wx.EVT_MENU, self.OnSearchDocumentsMenu,
           id=id)
     
     #print searchWords
     DocPaths =[]
     totalResults = 0
     try:
         DocPaths, totalResults = self.search.GetRankedDocuments(searchWords)
     except Exception, msg:
         CommonFunctions.ShowErrorMessage(self, 'No Indexing has been performed!', error=True)
Esempio n. 47
0
    def get_speakers_for_pages(self, pages):
        '''
        First yields the number of pages of speakers.
        After that yields tuples of title, link, img.
        '''

        returned_count = False
        for page in pages:
            html = self.__get_speaker_page__(page)
            if not returned_count:
                returned_count = True
                yield self.__get_speaker_page_count__(html)

            attrs = {'class': 'results__result media media--sm-v m4'}
            hrefs = xbmc_common.parseDOM(html, 'a', attrs, ret='href')
            content = xbmc_common.parseDOM(html, 'a', attrs)

            for result in zip(hrefs, content):
                url = URLTED + result[0]
                header = xbmc_common.parseDOM(result[1], 'h4')[0]
                title = ' '.join(header.replace('<br>', ' ').split())
                img = xbmc_common.parseDOM(result[1], 'img', ret='src')[0]
                yield title, url, img
    def get_talks_for_search(self, search_string, page_index):
        '''
        Yields number of results left to show after this page,
        then tuples of title, link, img for results on this page.
        '''
        # TODO Read result count and use for paging control

        search_string = urllib.quote_plus(search_string)
        html = self.get_HTML(URLSEARCH % (search_string, page_index))

        yield self.results_remaining(html)

        results = xbmc_common.parseDOM(html, 'article',
                                       {'class': 'm1 search__result'})

        html_parser = HTMLParser.HTMLParser()
        for result in results:
            header = xbmc_common.parseDOM(result, 'h3')[0]
            title = html_parser.unescape(
                xbmc_common.parseDOM(header, 'a')[0].strip())
            url = URLTED + xbmc_common.parseDOM(header, 'a', ret='href')[0]
            img = xbmc_common.parseDOM(result, 'img', ret='src')[0]
            yield title, url, img
Esempio n. 49
0
def search(url, query='', type='f', page=0):

    if type == 'f':
        if query is None or query == '':
            query = common.getUserInput('Search', '')
            if query is None:
                return

        searchList = cache.get('searchList').split("\n")
        if not query in searchList:
            searchList.append(query)
            cache.set('searchList', '\n'.join(searchList))

        url = SEARCH_URL % (str(page + 1), query.replace(' ', '+'))

    try:
        soup = BeautifulSoup(str(make_request(url)),
                             convertEntities=BeautifulSoup.HTML_ENTITIES)

        for a in soup.findAll('a'):
            try:
                if a['href'].find('/d/') > 0:
                    add_dir(a.text, a['href'], 2, icon, '', 'd', page + 1)
                else:
                    if a['href'].find('/d/') == 0:
                        add_dir(a.text, 'http://4share.vn' + a['href'], 2,
                                icon, '', 'd', page + 1)
                    else:
                        if a['href'].find('/f/') > 0:
                            if a.findParent('tr').findAll('td')[2].text.find(
                                    'KB') < 0:
                                add_link(
                                    '', a.text + ' (' +
                                    a.findParent('tr').findAll('td')[2].text +
                                    ')', 0, a['href'], '', '')
                        else:
                            if a['href'].find('/f/') == 0:
                                if a.findParent('tr').findAll(
                                        'td')[1].text.find('KB') < 0:
                                    add_link(
                                        '', a.text + ' (' + a.findParent(
                                            'tr').findAll('td')[1].text + ')',
                                        0, 'http://4share.vn' + a['href'], '',
                                        '')
            except:
                pass
    except:
        pass

    add_dir('>', url, 2, icon, query, type, page + 1)
Esempio n. 50
0
    def capimage(self, data):
        try:
            url = common.parseDOM(data, "img", ret="src")
            url = [i for i in url if 'captcha' in i]

            if len(url) > 0: self.type = 'capimage'
            else:
                self.type = None
                return

            response = self.keyboard(url[0])
            self.captcha.update({'code': response})
        except:
            pass
Esempio n. 51
0
def nosvideo(url):
    try:
        result = getUrl(url).result

        post = {}
        f = common.parseDOM(result, "Form", attrs = { "method": "POST" })[0]
        k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
        for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
        post.update({'method_free': 'Free Download'})
        post = urllib.urlencode(post)

        result = getUrl(url, post=post).result

        result = re.compile('(eval.*?\)\)\))').findall(result)[0]
        result = jsunpack(result)

        url = re.compile("playlist=(.+?)&").findall(result)[0]

        result = getUrl(url).result
        url = common.parseDOM(result, "file")[0]
        return url
    except:
        return
Esempio n. 52
0
def hostingbulk(url):
    try:
        result = getUrl(url, mobile=True).result

        result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
        result = jsunpack(result)

        url = re.compile("'file' *, *'(.+?)'").findall(result)
        url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result)
        url += common.parseDOM(result, "embed", ret="src")
        url = 'http://' + url[-1].split('://', 1)[-1]
        return url
    except:
        return
Esempio n. 53
0
def search(url, query='', type='folder', page=0):
    if query is None or query == '':
        query = common.getUserInput('Search', '')

    if query is None:
        return

    if saveSearch == 'true':
        searchList = cache.get('searchList').split("\n")
        if not query in searchList:
            searchList.append(query)
            cache.set('searchList', '\n'.join(searchList))

    hdrepo('search4', query)
Esempio n. 54
0
def search():
    query = common.getUserInput('Tìm kiếm Phim', '')
    if query is None:
        return
    url = fptplay + '/search/' + urllib.quote_plus(query)

    link = make_request(url)
    match = re.compile(
        '<a href="(.+?)" title="(.+?)" class="item_image">\s*<img src="(.+?)".+?'
    ).findall(link)

    for url, name, thumbnail in match:
        title = name.replace('&#39;', "'")
        addDir(title, fptplay + url, 4, thumbnail, playable=True)
Esempio n. 55
0
def readFile(fname, flags="r"):
    common.log(repr(fname) + " - " + repr(flags))

    if not os.path.exists(fname):
        common.log("File doesn't exist")
        return False
    d = ""
    try:
        t = open(fname, flags)
        d = t.read()
        t.close()
    except Exception as e:
        common.log("Exception: " + repr(e), -1)

    common.log("Done")
    return d
Esempio n. 56
0
    def lfv_list2(self, url, meta):
        try:
            result = getUrl(url, timeout='30').result

            title = common.parseDOM(result, "h1", attrs={"class": "title"})[0]
            title = title.split(':', 1)[-1].split('>',
                                                  1)[-1].split('<',
                                                               1)[0].strip()

            videos = result.replace('"//',
                                    '"http://').replace("'//", '"http://')
            videos = re.findall('[\'|\"](http://.+?)[\'|\"]', videos, re.I)
            videos = uniqueList(videos).list
            videos = [
                i for i in videos if any(
                    i.startswith(x) for x in resolver().hostList)
            ]
            videos = [i for i in videos if not i.endswith('.js')]
        except:
            return

        for video in videos:
            try:
                name = title
                name = common.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                url = video
                url = common.replaceHTMLCodes(url)
                if url.startswith('//'): url = 'http:' + url
                url = url.encode('utf-8')

                self.list.append({'name': name, 'url': url, 'meta': meta})
            except:
                pass

        return self.list
Esempio n. 57
0
def grifthost(url):
    try:
        url = url.replace('/embed-', '/')
        url = re.compile('//.+?/([\w]+)').findall(url)[0]
        url = 'http://grifthost.com/embed-%s.html' % url

        result = getUrl(url).result

        try:
            post = {}
            f = common.parseDOM(result, "Form", attrs={"method": "POST"})[0]
            f = f.replace('"submit"', '"hidden"')
            k = common.parseDOM(f,
                                "input",
                                ret="name",
                                attrs={"type": "hidden"})
            for i in k:
                post.update({
                    i:
                    common.parseDOM(f, "input", ret="value", attrs={"name":
                                                                    i})[0]
                })
            post = urllib.urlencode(post)
            result = getUrl(url, post=post).result
        except:
            pass

        result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
        result = jsunpack(result)

        url = re.compile("'file' *, *'(.+?)'").findall(result)
        url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result)
        url += common.parseDOM(result, "embed", ret="src")
        url = 'http://' + url[-1].split('://', 1)[-1]
        return url
    except:
        return
    def worker(self, data):
        try:
            data = str(data).replace('\r','').replace('\n','').replace('\t','')

            url = re.compile('(.+?)<regex>').findall(data)[0]
            regex = re.compile('<regex>(.+?)</regex>').findall(data)
        except:
            return

        for x in regex:
            try:
                name = re.compile('<name>(.+?)</name>').findall(x)[0]

                expres = re.compile('<expres>(.+?)</expres>').findall(x)[0]

                referer = re.compile('<referer>(.+?)</referer>').findall(x)[0]
                referer = urllib.unquote_plus(referer)
                referer = common.replaceHTMLCodes(referer)
                referer = referer.encode('utf-8')

                page = re.compile('<page>(.+?)</page>').findall(x)[0]
                page = urllib.unquote_plus(page)
                page = common.replaceHTMLCodes(page)
                page = page.encode('utf-8')

                result = getUrl(page, referer=referer).result
                result = str(result).replace('\r','').replace('\n','').replace('\t','')
                result = str(result).replace('\/','/')

                r = re.compile(expres).findall(result)[0]
                url = url.replace('$doregex[%s]' % name, r)
            except:
                pass

        url = common.replaceHTMLCodes(url)
        url = url.encode('utf-8')
        return url
Esempio n. 59
0
def search_albums(start, query, page):
  #http://search.chiasenhac.com/search.php?s=bai+hat&mode=album&page=2&start=221
  mode = 11
  if len(query) == 0:
    query = common.getUserInput('Search', '')
    if query is None:
      return
    saved = __settings__.getSetting('saved_search_' + str(mode))
    if saved is None:
      saved = query + '~'
      __settings__.setSetting('saved_search_' + str(mode),saved)
    else:
      if query + '~' in saved:
        saved = saved.replace(query + '~','')
      saved = query + '~' + saved
      __settings__.setSetting('saved_search_' + str(mode),saved)
  if page == 0:
    page = 1
  url = 'http://search.chiasenhac.com/search.php?mode=album&s=' + urllib.quote_plus(query) + '&page=' + str(page) + '&start=' + start
  content = make_request(url)
  soup = BeautifulSoup(str(content), convertEntities=BeautifulSoup.HTML_ENTITIES)
  thumbs = soup.find('table',{'class' : 'tbtable'}).findAll('span',{'class' : 'genmed'})
  albums_thumbs = {}
  for thumb in thumbs:
    img = thumb.find('img')
    href = thumb.find('a')
    if (img is not None) and (href is not None):
      a = img.get('src');
      b = href.get('href')
      albums_thumbs[b] = a

  albums = soup.find('table',{'class' : 'tbtable'}).findAll('span',{'class' : 'gen'})
  for album in albums:
    href = album.find('a')
    if href is not None:
      link = href.get('href')
      title = album.text.replace(u'(Xem chi tiết...)','').replace('Lossless',' - Lossless').replace('320kbps',' - 320kbps').replace('192kbps',' - 192kbps').replace('128kbps',' - 128kbps')
      thumb = None
      if link in albums_thumbs:
        thumb = albums_thumbs[link]
      
      add_dir(title, link, 102, thumb, query, type, 0)
  xt = soup.find('a',{'class' : 'xt'})
  if xt is not None:
    href = xt.get('href')
    parts = href.split('=')
    start = parts[len(parts) - 1]
    add_dir(u'Trang tiếp >>', start, mode, get_thumbnail_url(), query, type, page + 1)
  return
Esempio n. 60
0
def handlelinks(pageUrl):
    html = cf.getresponsefromurl(pageUrl)
    bsObj = BeautifulSoup(html.content, 'lxml')

    price = bsObj.find("span", {"class": "xiaoquUnitPrice"}).string
    logging.info(price)

    xiaoquInfo = bsObj.find("div", {"class": "xiaoquInfo"})
    for child in xiaoquInfo.children:
        if child.find("span", {"class": "xiaoquInfoLabel"}).string == '开发商':
            kaifangshang = child.find("span", {
                "class": "xiaoquInfoContent"
            }).string
            logging.info(kaifangshang)
        elif child.find("span", {"class": "xiaoquInfoLabel"}).string == '建筑年代':
            jianzhuniandai = child.find("span", {
                "class": "xiaoquInfoContent"
            }).string
            logging.info(jianzhuniandai)
        elif child.find("span", {"class": "xiaoquInfoLabel"}).string == '建筑类型':
            jianzhuleixing = child.find("span", {
                "class": "xiaoquInfoContent"
            }).string
            logging.info(jianzhuleixing)
        elif child.find("span", {"class": "xiaoquInfoLabel"}).string == '物业费用':
            wuyefeiyong = child.find("span", {
                "class": "xiaoquInfoContent"
            }).string
            logging.info(wuyefeiyong)
        elif child.find("span", {"class": "xiaoquInfoLabel"}).string == '物业公司':
            wuyegongsi = child.find("span", {
                "class": "xiaoquInfoContent"
            }).string
            logging.info(wuyegongsi)
        elif child.find("span", {"class": "xiaoquInfoLabel"}).string == '楼栋总数':
            loudongzongshu = child.find("span", {
                "class": "xiaoquInfoContent"
            }).string
            logging.info(loudongzongshu)
        elif child.find("span", {"class": "xiaoquInfoLabel"}).string == '房屋总数':
            fangwuzongshu = child.find("span", {
                "class": "xiaoquInfoContent"
            }).string
            logging.info(fangwuzongshu)

    return [
        price, kaifangshang, jianzhuniandai, jianzhuleixing, wuyefeiyong,
        wuyegongsi, loudongzongshu, fangwuzongshu
    ]