示例#1
0
 def _scrape_cloud(self):
     try:
         threads = []
         cloud_files = []
         try:
             my_cloud_files = AllDebrid.user_cloud()
         except:
             return self.sources
         try:
             for k, v in my_cloud_files.iteritems():
                 if isinstance(v, dict):
                     cloud_files.append(v)
         except:
             for k, v in my_cloud_files.items():
                 if isinstance(v, dict):
                     cloud_files.append(v)
         my_cloud_files = [i for i in cloud_files if i['statusCode'] == 4]
         for item in my_cloud_files:
             folder_name = clean_title(normalize(item['filename']))
             assigned_content = self._assigned_content(
                 normalize(item['filename']))
             if assigned_content:
                 if assigned_content == self.rootname:
                     self.folder_results.append(
                         (normalize(item['filename']), item, True))
             elif self.folder_query in folder_name or not folder_name:
                 self.folder_results.append(
                     (normalize(item['filename']), item, False))
         if not self.folder_results: return self.sources
         for i in self.folder_results:
             threads.append(Thread(target=self._scrape_folders, args=(i, )))
         [i.start() for i in threads]
         [i.join() for i in threads]
     except:
         pass
示例#2
0
def ad_torrent_cloud(folder_id=None):
    cloud_files = []
    cloud_dict = AllDebrid.user_cloud()
    try:
        for k, v in cloud_dict.iteritems():
            if isinstance(v, dict):
                cloud_files.append(v)
    except:
        for k, v in cloud_dict.items():
            if isinstance(v, dict):
                cloud_files.append(v)
    cloud_files = [i for i in cloud_files if i['statusCode'] == 4]
    for count, item in enumerate(cloud_files, 1):
        try:
            cm = []
            folder_name = item['filename']
            name = clean_file_name(folder_name).upper()
            display = '%02d | [B]FOLDER[/B] | [I]%s [/I]' % (count, name)
            url_params = {
                'mode': 'alldebrid.browse_ad_cloud',
                'folder': json.dumps(item)
            }
            link_folders_add = {
                'mode': 'debrid_link_folders',
                'debrid_service': 'AD',
                'folder_name': normalize(folder_name),
                'action': 'add'
            }
            link_folders_remove = {
                'mode': 'debrid_link_folders',
                'debrid_service': 'AD',
                'folder_name': normalize(folder_name),
                'action': 'remove'
            }
            url = build_url(url_params)
            cm.append(("[B]Link TV Show[/B]",
                       'XBMC.RunPlugin(%s)' % build_url(link_folders_add)))
            cm.append(("[B]Clear TV Show Link[/B]",
                       'XBMC.RunPlugin(%s)' % build_url(link_folders_remove)))
            listitem = xbmcgui.ListItem(display)
            listitem.addContextMenuItems(cm)
            listitem.setArt({
                'icon': default_ad_icon,
                'poster': default_ad_icon,
                'thumb': default_ad_icon,
                'fanart': fanart,
                'banner': default_ad_icon
            })
            xbmcplugin.addDirectoryItem(__handle__,
                                        url,
                                        listitem,
                                        isFolder=True)
        except:
            pass
    xbmcplugin.setContent(__handle__, 'files')
    xbmcplugin.endOfDirectory(__handle__)
    setView('view.premium')
示例#3
0
def rd_torrent_cloud():
    try:
        my_cloud_files = RealDebrid.user_cloud()
    except:
        my_cloud_files = None
    if not my_cloud_files: return
    my_cloud_files = [i for i in my_cloud_files if i['status'] == 'downloaded']
    for count, item in enumerate(my_cloud_files, 1):
        try:
            cm = []
            folder_name = item['filename']
            name = clean_file_name(folder_name).upper()
            display = '%02d | [B]FOLDER[/B] | [I]%s [/I]' % (count, name)
            url_params = {
                'mode': 'real_debrid.browse_rd_cloud',
                'id': item['id']
            }
            link_folders_add = {
                'mode': 'debrid_link_folders',
                'debrid_service': 'RD',
                'folder_name': normalize(folder_name),
                'action': 'add'
            }
            link_folders_remove = {
                'mode': 'debrid_link_folders',
                'debrid_service': 'RD',
                'folder_name': normalize(folder_name),
                'action': 'remove'
            }
            url = build_url(url_params)
            cm.append(("[B]Link TV Show[/B]",
                       'XBMC.RunPlugin(%s)' % build_url(link_folders_add)))
            cm.append(("[B]Clear TV Show Link[/B]",
                       'XBMC.RunPlugin(%s)' % build_url(link_folders_remove)))
            listitem = xbmcgui.ListItem(display)
            listitem.addContextMenuItems(cm)
            listitem.setArt({
                'icon': default_rd_icon,
                'poster': default_rd_icon,
                'thumb': default_rd_icon,
                'fanart': fanart,
                'banner': default_rd_icon
            })
            xbmcplugin.addDirectoryItem(__handle__,
                                        url,
                                        listitem,
                                        isFolder=True)
        except:
            pass
    xbmcplugin.setContent(__handle__, 'files')
    xbmcplugin.endOfDirectory(__handle__)
    setView('view.premium')
示例#4
0
 def results(self, info):
     try:
         self.info = info
         self.db_type = self.info.get("db_type")
         self.title = self.info.get("title")
         self.year = self.info.get("year")
         if self.year: self.rootname = '%s (%s)' % (self.title, self.year)
         else: self.rootname = self.title
         self.season = self.info.get("season", None)
         self.episode = self.info.get("episode", None)
         self.extensions = supported_video_extensions()
         self.folder_query = clean_title(normalize(self.title))
         self.file_query = clean_title(normalize(self.title))
         self.query_list = self._year_query_list(
         ) if self.db_type == 'movie' else self._episode_query_list()
         self._scrape_cloud()
         if not self.scrape_results: return self.sources
         self.label_settings = label_settings(self.info['scraper_settings'],
                                              self.scrape_provider)
         for item in self.scrape_results:
             try:
                 file_name = normalize(item['name'])
                 file_dl = item['url_link']
                 size = float(int(item['size'])) / 1073741824
                 video_quality = get_release_quality(file_name)
                 details = get_file_info(file_name)
                 label, multiline_label = build_internal_scrapers_label(
                     self.label_settings, file_name, details, size,
                     video_quality)
                 self.sources.append({
                     'name': file_name,
                     'label': label,
                     'multiline_label': multiline_label,
                     'title': file_name,
                     'quality': video_quality,
                     'size': size,
                     'url_dl': file_dl,
                     'id': file_dl,
                     'downloads': False,
                     'direct': True,
                     'source': self.scrape_provider,
                     'scrape_provider': self.scrape_provider
                 })
             except:
                 pass
         window.setProperty('ad-cloud_source_results',
                            json.dumps(self.sources))
     except Exception as e:
         from modules.utils import logger
         logger('FEN alldebrid scraper Exception', e)
     return self.sources
示例#5
0
			def _process():
				for item in self.scrape_results:
					try:
						file_name = normalize(item[0])
						if self.title_filter:
							if not check_title(self.title, file_name, self.aliases, self.year, self.season, self.episode): continue
						file_dl = item[1]
						URLName = clean_file_name(file_name).replace('html', ' ').replace('+', ' ').replace('-', ' ')
						try: size = item[2]
						except: size = self._get_size(file_dl)
						video_quality, details = get_file_info(name_info=release_info_format(file_name))
						source_item = {'name': file_name,
										'title': file_name,
										'URLName': URLName,
										'quality': video_quality,
										'size': size,
										'size_label': '%.2f GB' % size,
										'extraInfo': details,
										'url_dl': file_dl,
										'id': file_dl,
										self.scrape_provider : True,
										'direct': True,
										'source': self.scraper_name,
										'scrape_provider': 'folders'}
						yield source_item
					except: pass
示例#6
0
 def _scrape_directory(self, folder_name):
     folder_results = []
     dirs, files = xbmcvfs.listdir(folder_name)
     cloud_files = files + dirs
     for item in cloud_files:
         ext = os.path.splitext(urlparse(item).path)[-1]
         file_type = 'file' if ext else 'folder'
         item_name = clean_title(normalize(item))
         if file_type == 'file' and ext in self.extensions:
             if self.db_type == 'movie':
                 if self.title_query in item_name:
                     url_path = self.url_path(folder_name, item)
                     self.scrape_results.append((item, url_path))
             elif any(x in item_name for x in self.file_query):
                 if not folder_name == self.download_path:
                     url_path = self.url_path(folder_name, item)
                     self.scrape_results.append((item, url_path))
                 elif self.title_query in item_name:
                     url_path = self.url_path(folder_name, item)
                     self.scrape_results.append((item, url_path))
         elif file_type == 'folder':
             if self.title_query in item_name or any(
                     x in item_name for x in self.folder_query):
                 new_folder = os.path.join(folder_name, item)
                 folder_results.append(new_folder)
     if not folder_results: return
     return self._scraper_worker(folder_results)
示例#7
0
 def _scrape_cloud(self):
     try:
         try:
             my_cloud_files = RealDebrid.user_cloud()
         except:
             return self.sources
         append = self.folder_results.append
         for item in my_cloud_files:
             normalized = normalize(item['filename'])
             folder_name = clean_title(normalized)
             assigned_content = self._assigned_content(normalized)
             if assigned_content:
                 if assigned_content == self.rootname:
                     append((normalized, item['id'], True))
             elif self.folder_query in folder_name or not folder_name:
                 append((normalized, item['id'], False))
         if not self.folder_results: return self.sources
         threads = []
         append = threads.append
         for i in self.folder_results:
             append(Thread(target=self._scrape_folders, args=(i, )))
         [i.start() for i in threads]
         [i.join() for i in threads]
     except:
         pass
 def _scrape_directory(self, folder_name):
     folder_files = []
     folder_results = []
     dirs, files = xbmcvfs.listdir(folder_name)
     for i in dirs:
         folder_files.append((i, 'folder'))
     for i in files:
         folder_files.append((i, 'file'))
     for item in folder_files:
         file_type = item[1]
         item_name = clean_title(normalize(item[0]))
         if file_type == 'file':
             ext = os.path.splitext(urlparse(item[0]).path)[-1]
             if ext in self.extensions:
                 if self.db_type == 'movie':
                     if self.title_query in item_name:
                         url_path = self.url_path(folder_name, item[0])
                         self.scrape_results.append((item[0], url_path))
                 elif any(x in item_name for x in self.file_query):
                     if not folder_name in self.folder_path:
                         url_path = self.url_path(folder_name, item[0])
                         self.scrape_results.append((item[0], url_path))
                     elif self.title_query in item_name:
                         url_path = self.url_path(folder_name, item[0])
                         self.scrape_results.append((item[0], url_path))
         elif file_type == 'folder':
             if self.title_query in item_name or any(
                     x in item_name for x in self.folder_query):
                 new_folder = os.path.join(folder_name, item[0])
                 folder_results.append(new_folder)
     if not folder_results: return
     return self._scraper_worker(folder_results)
示例#9
0
def perona_malik_art(image):
    print("Perona-Malik model...")
    image = image_to_variable(image)
    pm = PeronaMalik(image.size()
                     , diffusion_rate=0.2
                     , delta_t=0.2
                     , coefficient='exp'
                     , learn_operator=True)

    lr = 0.000002
    optimizer = optim.SGD(pm.parameters(), lr=lr, momentum=0.9, dampening=0, weight_decay=0.0005)
    pm.cuda()
    original = normalize(image)

    out = pm.forward(original)
    for i in range(100001):
        optimizer.zero_grad()
        out = Variable(out.data, requires_grad=False)
        out = pm.forward(out)
        loss = torch.sum(torch.pow((out - original),2) + torch.abs(pm.gradients) + torch.pow(out,2))
        loss.backward(retain_graph=True)
        optimizer.step()
        if i % 75 == 0:
            print(loss)
            print("iteration %d" % i)
            imshow(squeeze(squeeze(out, 0), 0).cpu().data.numpy())

            # update the learning rate and refresh the operator
            if i % 375 == 300:
                pm.laplace = make_operator('laplace', requires_grad=True)
                lr *= 0.995
                optimizer = optim.SGD(pm.parameters(), lr=lr, momentum=0.9, dampening=0, weight_decay=0.0005)
                out = out * 0.6 + original * 0.4
示例#10
0
 def _process():
     for item in self.scrape_results:
         try:
             file_name = normalize(item['name'])
             if self.title_filter:
                 if not check_title(self.title, file_name,
                                    self.aliases, self.year,
                                    self.season, self.episode):
                     continue
             URLName = clean_file_name(file_name).replace(
                 'html', ' ').replace('+', ' ').replace('-', ' ')
             path = item['path']
             file_dl = item['id']
             size = round(float(item['size']) / 1073741824, 2)
             video_quality, details = get_file_info(
                 name_info=release_info_format(file_name))
             source_item = {
                 'name': file_name,
                 'title': file_name,
                 'URLName': URLName,
                 'quality': video_quality,
                 'size': size,
                 'size_label': '%.2f GB' % size,
                 'extraInfo': details,
                 'url_dl': file_dl,
                 'id': file_dl,
                 'downloads': False,
                 'direct': True,
                 'source': self.scrape_provider,
                 'scrape_provider': self.scrape_provider
             }
             yield source_item
         except:
             pass
示例#11
0
 def _scrape_downloads(self):
     try:
         my_downloads = RealDebrid.downloads()
         my_downloads = [
             i for i in my_downloads
             if i['download'].lower().endswith(tuple(self.extensions))
         ]
         append = self.scrape_results.append
         for item in my_downloads:
             match = False
             normalized = normalize(item['filename'])
             filename = clean_title(normalized)
             if self.db_type == 'movie':
                 if any(x in filename for x in self._year_query_list()):
                     match = True
             else:
                 if seas_ep_filter(self.season, self.episode, normalized):
                     match = True
             if match and self.folder_query in filename:
                 item = self.make_downloads_item(item)
                 if item['path'] not in [
                         d['path'] for d in self.scrape_results
                 ]:
                     append(item)
     except:
         pass
示例#12
0
 def _builder():
     for count, item in enumerate(my_cloud_files, 1):
         try:
             cm = []
             cm_append = cm.append
             folder_name = item['filename']
             normalized_folder_name = normalize(folder_name)
             string = 'FEN_RD_%s' % normalized_folder_name
             link_folders_add = {
                 'mode': 'link_folders',
                 'service': 'RD',
                 'folder_name': normalized_folder_name,
                 'action': 'add'
             }
             link_folders_remove = {
                 'mode': 'link_folders',
                 'service': 'RD',
                 'folder_name': normalized_folder_name,
                 'action': 'remove'
             }
             current_link = main_cache.get(string)
             if current_link:
                 ending = '[COLOR=limegreen][B][I]\n      %s[/I][/B][/COLOR]' % (
                     linkedto_str % current_link)
             else:
                 ending = ''
             display = '%02d | [B]%s[/B] | [I]%s [/I]%s' % (
                 count, folder_str,
                 clean_file_name(normalized_folder_name).upper(), ending)
             url_params = {
                 'mode': 'real_debrid.browse_rd_cloud',
                 'id': item['id']
             }
             delete_params = {
                 'mode': 'real_debrid.delete',
                 'id': item['id'],
                 'cache_type': 'torrent'
             }
             url = build_url(url_params)
             cm_append(
                 ('[B]%s %s[/B]' % (delete_str, folder_str.capitalize()),
                  'RunPlugin(%s)' % build_url(delete_params)))
             cm_append((addlink_str,
                        'RunPlugin(%s)' % build_url(link_folders_add)))
             cm_append((clearlink_str,
                        'RunPlugin(%s)' % build_url(link_folders_remove)))
             listitem = make_listitem()
             listitem.setLabel(display)
             listitem.addContextMenuItems(cm)
             listitem.setArt({
                 'icon': default_rd_icon,
                 'poster': default_rd_icon,
                 'thumb': default_rd_icon,
                 'fanart': fanart,
                 'banner': default_rd_icon
             })
             yield (url, listitem, True)
         except:
             pass
示例#13
0
	def _scrape_cloud(self):
		try:
			threads = []
			results_append = self.folder_results.append
			append = threads.append
			try: my_cloud_files = AllDebrid.user_cloud()['magnets']
			except: return self.sources
			my_cloud_files = [i for i in my_cloud_files if i['statusCode'] == 4]
			for item in my_cloud_files:
				folder_name = clean_title(normalize(item['filename']))
				assigned_content = self._assigned_content(normalize(item['filename']))
				if assigned_content:
					if assigned_content == self.rootname:
						results_append((normalize(item['filename']), item, True))
				elif self.folder_query in folder_name or not folder_name:
					results_append((normalize(item['filename']), item, False))
			if not self.folder_results: return self.sources
			for i in self.folder_results: append(Thread(target=self._scrape_folders, args=(i,)))
			[i.start() for i in threads]
			[i.join() for i in threads]
		except: pass
示例#14
0
 def _scrape_history(self):
     try:
         my_downloads = RealDebrid.downloads()
         my_downloads = [i for i in my_downloads if i['download'].lower().endswith(tuple(self.extensions))]
         for item in my_downloads:
             filename = clean_title(normalize(item['filename']))
             if any(x in filename for x in self.query_list):
                 if self.folder_query in filename:
                     item = self.make_history_item(item)
                     if item['path'] not in [d['path'] for d in self.scrape_results]:
                         self.scrape_results.append(item)
     except: pass
示例#15
0
		def _process(item):
			file_type = item[1]
			normalized = normalize(item[0])
			item_name = clean_title(normalized)
			if file_type == 'file':
				ext = os.path.splitext(urlparse(item[0]).path)[-1].lower()
				if ext in self.extensions:
					if self.db_type == 'movie':
						if self.assigned_content or self.title_query in item_name:
							url_path = self.url_path(folder_name, item[0])
							size = self._get_size(url_path)
							scrape_results_append((item[0], url_path, size))
					else:
						if seas_ep_filter(self.season, self.episode, normalized):
							if self.assigned_content or not folder_name in self.folder_path:
								url_path = self.url_path(folder_name, item[0])
								size = self._get_size(url_path)
								scrape_results_append((item[0], url_path, size))
							elif self.title_query in item_name:
								url_path = self.url_path(folder_name, item[0])
								size = self._get_size(url_path)
								scrape_results_append((item[0], url_path, size))  
			elif file_type == 'folder':
				if not assigned_folder:
					self.assigned_content = self._assigned_content(normalize(item[0]))
					if self.assigned_content:
						if self.assigned_content == self.rootname:
							new_folder = os.path.join(folder_name, item[0])
							foler_results_append((new_folder, True))
					elif self.title_query in item_name or any(x in item_name for x in self.folder_query):
						new_folder = os.path.join(folder_name, item[0])
						foler_results_append((new_folder, self.assigned_content))
				elif assigned_folder:
					if any(x in item_name for x in self.folder_query):
						new_folder = os.path.join(folder_name, item[0])
						foler_results_append((new_folder, True))
				elif self.title_query in item_name or any(x in item_name for x in self.folder_query):
					new_folder = os.path.join(folder_name, item[0])
					foler_results_append((new_folder, self.assigned_content))
示例#16
0
def get_filename_match(title, url, name=None):
    from modules.utils import clean_file_name
    if name: return clean_file_name(name)
    from modules.utils import clean_title, normalize
    title_match = None
    try:
        title = clean_title(normalize(title))
        name_url = unquote(url)
        try:
            file_name = clean_title(name_url.split('/')[-1])
        except:
            return title_match
        test = name_url.split('/')
        for item in test:
            test_url = string(clean_title(normalize(item)))
            if title in test_url:
                title_match = clean_file_name(string(item)).replace(
                    'html', ' ').replace('+', ' ')
                break
    except:
        pass
    return title_match
def getFileNameMatch(title, url):
    try:
        from urllib import unquote
    except ImportError:
        from urllib.parse import unquote
    from modules.utils import clean_title, normalize, clean_file_name
    title_match = None
    try:
        title = clean_title(normalize(title))
        if 'magnet' in url:
            url = url.split('&dn=')[1]
        name_url = unquote(url)
        try:
            file_name = clean_title(name_url.split('/')[-1])
        except:
            return title_match
        test = name_url.split('/')
        for item in test:
            test_url = str(clean_title(normalize(item)))
            if title in test_url:
                title_match = clean_file_name(str(item)).replace(
                    '&tr=udp:', '').replace('&tr=http:', '').replace(
                        '&tr=udp',
                        '').replace('&tr=http',
                                    '').replace('html', ' ').replace('+', ' ')
                try:
                    title_match = title_match.split('&xl=')[0]
                except:
                    pass
                try:
                    title_match = title_match.split(' p=')[0]
                except:
                    pass
                break
    except:
        pass
    return title_match
示例#18
0
	def results(self, info):
		try:
			self.info = info
			self.db_type = self.info.get('db_type')
			self.folder_path = source_folders_directory(self.db_type, self.scrape_provider)
			if not self.folder_path: return internal_results(self.scraper_name, self.sources)
			self.title = self.info.get('title')
			self.year = self.info.get('year')
			if self.year: self.rootname = '%s (%s)' % (self.title, self.year)
			else: self.rootname = self.title
			self.season = self.info.get('season')
			self.episode = self.info.get('episode')
			self.title_query = clean_title(normalize(self.title))
			self.folder_query = self._season_query_list() if self.db_type == 'episode' else self._year_query_list()
			self._scrape_directory((self.folder_path, False))
			if not self.scrape_results: return internal_results(self.scraper_name, self.sources)
			self.aliases = get_aliases_titles(info.get('aliases', []))
			def _process():
				for item in self.scrape_results:
					try:
						file_name = normalize(item[0])
						if self.title_filter:
							if not check_title(self.title, file_name, self.aliases, self.year, self.season, self.episode): continue
						file_dl = item[1]
						URLName = clean_file_name(file_name).replace('html', ' ').replace('+', ' ').replace('-', ' ')
						try: size = item[2]
						except: size = self._get_size(file_dl)
						video_quality, details = get_file_info(name_info=release_info_format(file_name))
						source_item = {'name': file_name,
										'title': file_name,
										'URLName': URLName,
										'quality': video_quality,
										'size': size,
										'size_label': '%.2f GB' % size,
										'extraInfo': details,
										'url_dl': file_dl,
										'id': file_dl,
										self.scrape_provider : True,
										'direct': True,
										'source': self.scraper_name,
										'scrape_provider': 'folders'}
						yield source_item
					except: pass
			self.sources = list(_process())
		except Exception as e:
			from modules.kodi_utils import logger
			logger('FEN folders scraper Exception', e)
		internal_results(self.scraper_name, self.sources)
		return self.sources
示例#19
0
	def results(self, info):
		try:
			if not enabled_debrids_check('ad'): return internal_results(self.scrape_provider, self.sources)
			self.title_filter = filter_by_name(self.scrape_provider)
			self.sources, self.folder_results, self.scrape_results = [], [], []
			self.db_type = info.get('db_type')
			self.title = info.get('title')
			self.year = info.get('year')
			if self.year: self.rootname = '%s (%s)' % (self.title, self.year)
			else: self.rootname = self.title
			self.season = info.get('season')
			self.episode = info.get('episode')
			if self.db_type == 'episode': self.seas_ep_query_list = seas_ep_query_list(self.season, self.episode)
			self.extensions = supported_video_extensions()
			self.folder_query = clean_title(normalize(self.title))
			self._scrape_cloud()
			if not self.scrape_results: return internal_results(self.scrape_provider, self.sources)
			self.aliases = get_aliases_titles(info.get('aliases', []))
			def _process():
				for item in self.scrape_results:
					try:
						file_name = normalize(item['filename'])
						if self.title_filter and not 'assigned_folder' in item:
							if not check_title(self.title, file_name, self.aliases, self.year, self.season, self.episode): continue
						file_dl = item['link']
						URLName = clean_file_name(file_name).replace('html', ' ').replace('+', ' ').replace('-', ' ')
						size = round(float(int(item['size']))/1073741824, 2)
						video_quality, details = get_file_info(name_info=release_info_format(file_name))
						source_item = {'name': file_name,
										'title': file_name,
										'URLName': URLName,
										'quality': video_quality,
										'size': size,
										'size_label': '%.2f GB' % size,
										'extraInfo': details,
										'url_dl': file_dl,
										'id': file_dl,
										'downloads': False,
										'direct': True,
										'source': self.scrape_provider,
										'scrape_provider': self.scrape_provider}
						yield source_item
					except: pass
			self.sources = list(_process())
		except Exception as e:
				from modules.kodi_utils import logger
				logger('FEN alldebrid scraper Exception', e)
		internal_results(self.scrape_provider, self.sources)
		return self.sources
示例#20
0
 def _scrape_cloud(self):
     try:
         cloud_files = Premiumize.user_cloud_all()['files']
         cloud_files = [
             i for i in cloud_files
             if i['path'].lower().endswith(tuple(self.extensions))
         ]
         cloud_files = sorted(cloud_files, key=lambda k: k['name'])
     except:
         return self.sources
     for item in cloud_files:
         item_name = clean_title(normalize(item['name']))
         if self.query in item_name:
             if any(x in item_name for x in self.file_query):
                 self.scrape_results.append(item)
示例#21
0
 def results(self, info):
     try:
         self.info = info
         search_name = self._search_name()
         files = EasyNews.search(search_name)
         files = files[0:self.max_results]
         self.label_settings = label_settings(self.info['scraper_settings'],
                                              self.scrape_provider)
         for item in files:
             try:
                 if self.max_bytes:
                     match = re.search('([\d.]+)\s+(.*)', item['size'])
                     if match:
                         size_bytes = self.to_bytes(*match.groups())
                         if size_bytes > self.max_bytes:
                             continue
                 file_name = normalize(item['name'])
                 file_dl = item['url_dl']
                 size = float(int(item['rawSize'])) / 1073741824
                 details = get_file_info(file_name)
                 video_quality = get_release_quality(file_name, file_dl)
                 label, multiline_label = build_internal_scrapers_label(
                     self.label_settings, file_name, details, size,
                     video_quality)
                 self.sources.append({
                     'name': file_name,
                     'label': label,
                     'multiline_label': multiline_label,
                     'quality': video_quality,
                     'size': size,
                     'url_dl': file_dl,
                     'id': file_dl,
                     'local': False,
                     'direct': True,
                     'source': self.scrape_provider,
                     'scrape_provider': self.scrape_provider
                 })
             except:
                 pass
         window.setProperty('easynews_source_results',
                            json.dumps(self.sources))
     except Exception as e:
         from modules.utils import logger
         logger('FEN easynews scraper Exception', e)
     return self.sources
示例#22
0
 def _process():
     for i in cached_files:
         try:
             size = round(float(int(i['size'])) / 1073741824, 2)
             if self.info.get('db_type') == 'movie':
                 files_num_video = 1
             else:
                 files_num_video = int(i['files_num_video'])
             if files_num_video > 3:
                 package = 'true'
                 size = float(size) / files_num_video
             else:
                 package = 'false'
             file_name = normalize(i['name'])
             if self.title_filter and package == 'false':
                 if not check_title(self.title, file_name,
                                    self.aliases, self.year,
                                    self.season, self.episode):
                     continue
             file_id = i['id']
             file_dl = i['url_dl']
             URLName = clean_file_name(file_name).replace(
                 'html', ' ').replace('+', ' ').replace('-', ' ')
             video_quality, details = get_file_info(
                 name_info=release_info_format(file_name))
             source_item = {
                 'name': file_name,
                 'title': file_name,
                 'URLName': URLName,
                 'quality': video_quality,
                 'size': size,
                 'size_label': '%.2f GB' % size,
                 'extraInfo': details,
                 'url_dl': file_dl,
                 'id': file_id,
                 'local': False,
                 'direct': True,
                 'package': package,
                 'source': self.scrape_provider,
                 'scrape_provider': self.scrape_provider
             }
             yield source_item
         except Exception as e:
             from modules.kodi_utils import logger
             logger('FURK ERROR - 65', e)
示例#23
0
 def _scrape_folders(self, folder_info):
     try:
         assigned_folder = folder_info[2]
         folder_files = RealDebrid.user_cloud_info(folder_info[1])
         file_info = [i for i in folder_files['files'] if i['path'].lower().endswith(tuple(self.extensions))]
         file_urls = folder_files['links']
         for c, i in enumerate(file_info):
             try: i.update({'folder_name': folder_info[0], 'url_link': file_urls[c]})
             except: pass
         contents = sorted(file_info, key=lambda k: k['path'])
         for item in contents:
             filename = clean_title(normalize(item['path']))
             if any(x in filename for x in self.query_list):
                 if assigned_folder:
                     self.scrape_results.append(item)
                 elif self.folder_query in filename:
                     self.scrape_results.append(item)
     except: pass
示例#24
0
def perona_malik_vanilla(image):
    print("Perona-Malik model...")
    image = image_to_variable(image)

    pm = PeronaMalik(image.size()
                     , diffusion_rate=0.2
                     , delta_t=0.01
                     , coefficient='exp')
    pm.cuda()
    original = normalize(image)
    out = pm.forward(original)

    for i in range(5001):
        pm.zero_grad()
        out = pm.forward(Variable(out.data))
        if i % 100 == 0:
            print("iteration %d" % i)
            imshow(squeeze(squeeze(out, 0), 0).cpu().data.numpy())
示例#25
0
 def _scrape_folders(self, folder_info):
     try:
         final_files = []
         extensions = supported_video_extensions()
         assigned_folder = folder_info[2]
         torrent_folder = folder_info[1]
         links = torrent_folder['links']
         total_size = torrent_folder['size']
         try:
             links_count = len([
                 v for k, v in links.items()
                 if v.lower().endswith(tuple(self.extensions))
             ])
             for k, v in links.items():
                 if v.lower().endswith(tuple(self.extensions)):
                     size = total_size / links_count
                     final_files.append({
                         'name': v,
                         'url_link': k,
                         'size': size
                     })
         except:
             links_count = len([
                 v for k, v in links.iteritems()
                 if v.lower().endswith(tuple(self.extensions))
             ])
             for k, v in links.iteritems():
                 if v.lower().endswith(tuple(self.extensions)):
                     size = total_size / links_count
                     final_files.append({
                         'name': v,
                         'url_link': k,
                         'size': size
                     })
         for item in final_files:
             filename = clean_title(normalize(item['name']))
             if any(x in filename for x in self.query_list):
                 if assigned_folder:
                     self.scrape_results.append(item)
                 elif self.folder_query in filename:
                     self.scrape_results.append(item)
     except:
         return
示例#26
0
 def _scrape_folders(self, folder_info):
     try:
         assigned_folder = folder_info[2]
         folder_files = RealDebrid.user_cloud_info(folder_info[1])
         contents = [
             i for i in folder_files['files']
             if i['path'].lower().endswith(tuple(self.extensions))
         ]
         file_urls = folder_files['links']
         append = self.scrape_results.append
         for c, i in enumerate(contents):
             try:
                 i.update({
                     'folder_name': folder_info[0],
                     'url_link': file_urls[c]
                 })
             except:
                 pass
         contents.sort(key=lambda k: k['path'])
         for item in contents:
             match = False
             normalized = normalize(item['path'])
             filename = clean_title(normalized)
             if assigned_folder and self.db_type == 'movie': match = True
             else:
                 if self.db_type == 'movie':
                     if any(x in filename for x in self._year_query_list()
                            ) and self.folder_query in filename:
                         match = True
                 else:
                     if assigned_folder:
                         if any(x in normalized.lower()
                                for x in self.seas_ep_query_list):
                             match = True
                     elif seas_ep_filter(self.season, self.episode,
                                         normalized):
                         match = True
             if match:
                 if assigned_folder: item['assigned_folder'] = True
                 append(item)
     except:
         pass
示例#27
0
 def _scrape_cloud(self):
     try:
         cloud_files = Premiumize.user_cloud_all()['files']
         cloud_files = [
             i for i in cloud_files
             if i['path'].lower().endswith(tuple(self.extensions))
         ]
         cloud_files.sort(key=lambda k: k['name'])
     except:
         return self.sources
     append = self.scrape_results.append
     for item in cloud_files:
         normalized = normalize(item['name'])
         item_name = clean_title(normalized)
         if self.query in item_name:
             if self.db_type == 'movie':
                 if any(x in item['name'] for x in self._year_query_list()):
                     append(item)
             elif seas_ep_filter(self.season, self.episode, normalized):
                 append(item)
示例#28
0
 def _process():
     for item in files:
         try:
             file_name = normalize(item['name'])
             if self.title_filter and not check_title(
                     self.title, file_name, self.aliases, self.year,
                     self.season, self.episode):
                 continue
             if self.filter_lang and not any(
                     i in self.lang_filters
                     for i in item['language']):
                 continue
             URLName = clean_file_name(file_name).replace(
                 'html', ' ').replace('+', ' ').replace('-', ' ')
             url_dl = item['url_dl']
             size = round(
                 float(int(item['rawSize'])) / 1073741824, 2)
             video_quality, details = get_file_info(
                 name_info=release_info_format(file_name))
             source_item = {
                 'name': file_name,
                 'title': file_name,
                 'URLName': URLName,
                 'quality': video_quality,
                 'size': size,
                 'size_label': '%.2f GB' % size,
                 'extraInfo': details,
                 'url_dl': url_dl,
                 'id': url_dl,
                 'local': False,
                 'direct': True,
                 'source': self.scrape_provider,
                 'scrape_provider': self.scrape_provider
             }
             yield source_item
         except Exception as e:
             from modules.kodi_utils import logger
             logger('FEN easynews scraper yield source error',
                    str(e))
示例#29
0
	def _scrape_folders(self, folder_info):
		try:
			assigned_folder = folder_info[2]
			torrent_folder = folder_info[1]
			links = torrent_folder['links']
			append = self.scrape_results.append
			links = [i for i in links if i['filename'].lower().endswith(tuple(self.extensions))]
			for item in links:
				match = False
				normalized = normalize(item['filename'])
				filename = clean_title(normalized)
				if assigned_folder and self.db_type == 'movie': match = True
				else:
					if self.db_type == 'movie':
						if any(x in filename for x in self._year_query_list()) and self.folder_query in filename: match = True
					else:
						if assigned_folder:
							if any(x in normalized.lower() for x in self.seas_ep_query_list): match = True
						elif seas_ep_filter(self.season, self.episode, normalized): match = True
				if match:
					if assigned_folder: item['assigned_folder'] = True
					append(item)
		except: return
示例#30
0
def filter_furk_tlist(t_files, filtering_list=None):
    from modules.utils import clean_title, normalize
    t_files = [i for i in t_files if 'video' in i['ct'] and any(x in clean_title(normalize(i['name'])) for x in filtering_list) and not any(x in i['name'].lower() for x in ['furk320', 'sample'])][0] if filtering_list else [i for i in t_files if 'is_largest' in i][0]
    return t_files['name'], t_files['url_dl'], t_files['size']