def stream_answer_play_list(self, queue_event): uri_elements = queue_event.data['uri'].split(':') source = uri_elements[0] if source != self.plugin_names[1]: return queue_event provider = uri_elements[1] time_stamp = time.time() try: epg_list = self.timeline[provider] found = None nr_of_entries = len(epg_list) i = 0 while i < nr_of_entries and time_stamp > int( epg_list[i].timestamp): i += 1 if i < nr_of_entries and i > 0: # we found an entry first_movie_info = epg_list[i - 1].movie_info second_movie_info = epg_list[i].movie_info combined_movie_info = MovieInfo( uri=first_movie_info['uri'], title=first_movie_info['title'], category=second_movie_info['title'], provider=first_movie_info['provider'], timestamp=second_movie_info['timestamp'], duration=0, # description=first_movie_info['description'], query=first_movie_info['query']) combined_movie_info['recordable'] = True self.modref.message_handler.queue_event( None, defaults.STREAM_ANSWER_PLAY_LIST, { 'uri': queue_event.data['uri'], 'movie_info': combined_movie_info }) except: print('unknown provider', provider)
def query_handler(self, queue_event, max_result_count): ''' try to send simulated answers ''' # logger.info(f"hd_recorder query handler" {queue_event.type} {queue_event.user} {max_result_count"}) if queue_event.type == defaults.QUERY_MOVIE_ID: new_uri = queue_event.params for record_movie in self.records.read( 'all', {}).values(): # 'all': read the whole config if record_movie['new_uri'] == new_uri: return [ MovieInfo( source=self.plugin_names[0], source_type=defaults.MOVIE_TYPE_RECORD, provider=record_movie['new_uri'].split(':') [1], # extracts the original provider back out of the uri category=record_movie['category'], title=record_movie['title'], timestamp=record_movie['timestamp'], duration=record_movie['duration'], description=record_movie['description'], url=record_movie['new_url'], mime=record_movie['mime']) ] return []
def load_from_xmltv(self, channel_id, day_text): ''' ''' var_url = urlopen('https://xmltv.xmltv.se/' + channel_id + '_' + day_text + '.xml') epg_xml = parse(var_url) result = [] count = 0 for programme in epg_xml.iterfind('programme'): provider = self.get_attrib(programme, 'channel') start = self.string_to_timestamp( self.get_attrib(programme, 'start')) stop = self.string_to_timestamp(self.get_attrib(programme, 'stop')) title = self.get_text(programme.find('title'), '') desc = self.get_text(programme.find('desc'), '') category = self.get_text(programme.find('category'), '') episode = programme.find('episode-num') episode_num = None channel_info = self.search_channel_info(provider) url = None media_type = None if channel_info: url = channel_info['url'] media_type = channel_info['mediatype'] if episode: num_system = self.get_attrib(episode, 'system') if num_system == 'xmltv_ns': episode_num = self.get_text(episode) count += 1 plugin_name = self.plugin_names[0] self.providers.add(provider) self.categories.add(category) new_movie = Movie(source=plugin_name, source_type=defaults.MOVIE_TYPE_STREAM, provider=provider, category=category, title=title, timestamp=str(int(start)), duration=stop - start, description=desc, url=url) new_movie.add_stream(media_type, '', url) if not plugin_name in self.movies: self.movies[plugin_name] = {} self.movies[plugin_name][new_movie.uri()] = new_movie movie_info = MovieInfo.movie_to_movie_info(new_movie, category) movie_info['recordable'] = True result.append(movie_info) print("epg loaded, {0} entries".format(count)) return result
def create_new_movie_list_item(self, user, quick_search_name, uri, movie_list_uuid, query, record_request): movie_list = self.modref.message_handler.query( Query(user, defaults.QUERY_MOVIE_ID, uri)) if movie_list: if not movie_list[0].duration and record_request: # if the duration is 0, then we can't record it, as this indicates an endless life stream return # TODO # ist es ein Live- Movie? Dann wird es als Live- Schnipsl angehängt # ist es ein benamter Quick-Search? Gibt es ihn schon oder ist er neu? # ist ein normaler Stream? # ist es ein Record- Eintrag? # an existing entry was edited, and it was not a quicksearch if movie_list_uuid in self.movielist and not quick_search_name and not record_request: print("Movie list Eintrag existiert schon") movie_list_entry = self.movielist[movie_list_uuid] else: movie_list_entry = { 'clients': {}, } movie_list_entry['clients'][user] = {'current_time': 0} # new entry, so it gets its own identifier movie_list_uuid = str(uuid.uuid4()) self.movielist[movie_list_uuid] = movie_list_entry if record_request: movie_list_entry['type'] = defaults.MOVIE_TYPE_TIMER else: movie_list_entry['type'] = movie_list[0].source_type # we need to make a copy here, because in case of a new created quicksearch item the quicksearch query data and this normal item both points to the same query object, # which causes an error (name="") in the quicksearch item when we remove the name here... movie_list_entry['query'] = copy.copy(query) # as this is not a quicksearch entry anymore, we must make sure that it does not contain a # quicksearch name anymore movie_list_entry['query']['name'] = '' movie_list_entry['movie_info'] = MovieInfo( uri, # uri movie_list[0].title, # title movie_list[0].category, # category movie_list[0].provider, # provider movie_list[0].timestamp, # timestamp movie_list[0].duration, # duration movie_list[0].description # description ) self.modref.store.write_users_value('movielist', self.movielist) return movie_list_uuid, movie_list[0].uri() else: self.modref.store.write_users_value('movielist', self.movielist) return None
def update_movie_list(self, queue_event, record_request=False): ''' ''' with self.lock: # is it a quick search? Then update the quicksearch data first quick_search_name = queue_event.data['query']['name'] if quick_search_name: quick_search_entry = None for movie_list_entry in self.movielist.values(): if movie_list_entry['query'] and movie_list_entry['query'][ 'name'].lower() == quick_search_name.lower( ) and queue_event.user in movie_list_entry[ 'clients']: quick_search_entry = movie_list_entry break if not quick_search_entry: quick_search_entry = { 'clients': {}, } quick_search_entry['clients'][queue_event.user] = { 'current_time': 0 } # new entry, so it gets its own identifier quick_search_entry_id = str(uuid.uuid4()) self.movielist[quick_search_entry_id] = quick_search_entry if record_request: quick_search_entry[ 'type'] = defaults.MOVIE_TYPE_RECORD_TEMPLATE else: quick_search_entry[ 'type'] = defaults.MOVIE_TYPE_TEMPLATE quick_search_entry['query'] = queue_event.data['query'] quick_search_entry['movie_info'] = MovieInfo( '0', # url '', # mime quick_search_name, # title '', # category '', # source '', # source_type '', # provider '', # timestamp '', # duration '' # description ) return self.create_new_movie_list_item( queue_event.user, quick_search_name, queue_event.data['movie_uri'], queue_event.data['uuid'], queue_event.data['query'], record_request)
def loadChannels(self): for server in self.config.read('all'): try: f = requests.get(server['url']) content = f.text match = re.search(r'<ol class="items">(.*)</ol>', content, re.DOTALL) if match: lines = match.group(1).split('\n') item_regex = re.compile( r'<li value=".*"><a href="(.*)" vod tvid=".*">(.*)</a>' ) plugin_name = self.plugin_names[0] source_type = defaults.MOVIE_TYPE_STREAM with self.lock: for line in lines: item_match = re.search(item_regex, line) if item_match: full_url = urljoin(server['url'], item_match.group(1)) provider = item_match.group(2) self.providers.add(provider) new_movie = MovieInfo(url=full_url, mime='video/MP2T', title=provider + ' Live', category='live', source=plugin_name, source_type=source_type, provider=provider, timestamp=0, duration=0, description='') if not plugin_name in self.movies: self.movies[plugin_name] = {} self.movies[plugin_name][ new_movie['uri']] = new_movie except Exception as e: print(str(e))
def add_movie(self, provider, full_url): plugin_name = self.plugin_names[0] source_type = defaults.MOVIE_TYPE_STREAM self.providers.add(provider) new_movie = MovieInfo( url=full_url, mime='video/MP2T', title=provider+' Live', category='live', source=plugin_name, source_type=source_type, provider=provider, timestamp=0, duration=0, description='', # we have to handmade the uri here to not have the title crc32 hash as part of it uri=':'.join([plugin_name, provider, '0']) ) if not plugin_name in self.movies: self.movies[plugin_name] = {} self.movies[plugin_name][new_movie['uri']] = new_movie
def query_handler(self, queue_event, max_result_count): ''' answers with list[] of results ''' # print("mediathek_ard query handler", queue_event.type, queue_event.user, max_result_count) if queue_event.type == defaults.QUERY_AVAILABLE_SOURCES: return self.plugin_names if queue_event.type == defaults.QUERY_AVAILABLE_PROVIDERS: res = [] for plugin_name in self.plugin_names: if plugin_name in queue_event.params[ 'select_source_values']: # this plugin is one of the wanted for provider in self.providers: if max_result_count > 0: res.append(provider) max_result_count -= 1 else: return res # maximal number of results reached return res if queue_event.type == defaults.QUERY_AVAILABLE_CATEGORIES: # just do nothing, the mediathek does not have categories pass if queue_event.type == defaults.QUERY_MOVIE_ID: elements = queue_event.params.split(':') try: return [self.movies[elements[0]][queue_event.params]] except: return [] if queue_event.type == defaults.QUERY_AVAILABLE_MOVIES: res = [] titles = queue_event.params['select_title'].split() #descriptions=queue_event.params['select_description'].split() description_regexs = [ re.compile(r'\b{}\b'.format(description), re.IGNORECASE) for description in queue_event.params['select_description'].split() ] for plugin_name in self.plugin_names: if plugin_name in queue_event.params[ 'select_source_values']: # this plugin is one of the wanted if plugin_name in self.movies: # are there any movies stored for this plugin? for movie in self.movies[plugin_name].values(): if movie.provider in queue_event.params[ 'select_provider_values']: if titles: found = False for title in titles: if title.lower() in movie.title.lower( ): found = True if title.lower( ) in movie.category.lower(): found = True if not found: continue if description_regexs: found = False for description_regex in description_regexs: if re.search(description_regex, movie.description): found = True if not found: continue if max_result_count > 0: movie_info = MovieInfo.movie_to_movie_info( movie, '') movie_info['streamable'] = True movie_info['recordable'] = True res.append(movie_info) max_result_count -= 1 else: return res # maximal number of results reached return res return []
def check_for_updates(self): file_name = 'online_filmlist' full_file_name = DirectoryMapper.abspath(self.plugin_id, 'tmpfs', file_name, True) try: # does the file exist at all already? filmlist_time_stamp = DirectoryMapper.getmtime( self.plugin_id, 'tmpfs', file_name) except: filmlist_time_stamp = 0 if filmlist_time_stamp < time.time( ) - 60 * 60 * 48: # file is older as 48 hours ''' Bootstrap to read the filmlist: 1. read the list of actual filmlist URLs from https://res.mediathekview.de/akt.xml ''' self.logger.info("Retrieve film list") try: var_url = urlopen('https://res.mediathekview.de/akt.xml') server_list = parse(var_url) print(server_list) url = None prio = 999 # dummy start value for item in server_list.iterfind('Server'): this_prio = int(item.findtext('Prio')) if this_prio < prio: # filter for the server with the lowest prio prio = this_prio url = item.findtext('URL') self.logger.info(f'Mediathek filmlist url {url}') if url: try: urlretrieve(url, full_file_name + '.pack') self.logger.info("filmlist downloaded") except Exception as e: self.logger.warning( f'failed filmlist download {str(e)}') try: with DirectoryMapper.open(self.plugin_id, 'tmpfs', file_name, 'wb') as unpack_file_handle: with lzma.open( DirectoryMapper.open( self.plugin_id, 'tmpfs', file_name + '.pack', 'rb')) as archive_file_handle: bytes = archive_file_handle.read(4096) while bytes: unpack_file_handle.write(bytes) bytes = archive_file_handle.read(4096) self.reset_index() # destroy the existing index self.logger.info('filmlist server list unpacked') except Exception as e: print('failed filmlist unpack', str(e)) except Exception as e: print('failed filmlist server list download') else: if not self.is_empty() and self.providers: return # no need to load, we have already movie data loader_remember_data = {'provider': '', 'category': ''} try: with DirectoryMapper.open(self.plugin_id, 'tmpfs', file_name) as data: self.reset_index() with self.whoosh_ix.writer() as whoosh_writer: count = 0 self.logger.info(f"loading filmlist...") for liste in JsonSlicer(data, ('X'), path_mode='map_keys'): count += 1 data_array = liste[1] # "Sender" 0, # "Thema" 1, # "Titel" 2, # "Datum" 3, # "Zeit" 4, # "Dauer" 5, # "Größe [MB]" 6, # "Beschreibung" 7, # "Url" 8, # "Website" 9, # "Url Untertitel" 10, # "Url RTMP" 11, # "Url Klein" 12, # "Url RTMP Klein" 13, # "Url HD" 14, # "Url RTMP HD" 15, # "DatumL" 16, # "Url History" 17, # "Geo" 18, # "neu" 19 provider = data_array[0] category = data_array[1] if provider: loader_remember_data['provider'] = provider else: provider = loader_remember_data['provider'] if category: loader_remember_data['category'] = category else: category = loader_remember_data['category'] if category == 'Livestream': source_type = defaults.MOVIE_TYPE_STREAM plugin_name = self.plugin_names[1] provider = provider.replace('Livestream', '').strip() #print("Livestream") else: plugin_name = self.plugin_names[0] source_type = defaults.MOVIE_TYPE_RECORD self.providers.add(provider) try: # livestream do not have a duration timestamp = int(data_array[16]) timestamp_datetime = datetime.datetime.fromtimestamp( timestamp) except: timestamp = 1 timestamp_datetime = datetime.datetime.fromtimestamp( timestamp) movie_info = MovieInfo( url=data_array[8], mime='video/mp4', title=data_array[2], category=category, source=plugin_name, source_type=source_type, provider=provider, timestamp=timestamp, duration=self.time_string_to_secs(data_array[5]), description=data_array[7], ) # fill the search engine whoosh_writer.update_document( source=plugin_name, source_type=source_type, provider=provider, title=data_array[2], category=category, uri=movie_info['uri'], description=data_array[7], timestamp=timestamp_datetime, url=movie_info['url'], mime=movie_info['mime'], duration=movie_info['duration']) if not plugin_name in self.movies: self.movies[plugin_name] = {} # experimental: Do not save the movies in mem anymore, just in Whoosh #self.movies[plugin_name][movie_info['uri']]=movie_info self.provider_storage.write('provider_cache', list(self.providers)) self.logger.info(f"filmlist loaded, {count} entries") except Exception as err: self.logger.warning(f'failed to read filmlist:{err}')
def stream_answer_play_list(self, queue_event): uri = queue_event.data['uri'] uri_elements = uri.split(':') source = uri_elements[0] if source != self.stream_source: return queue_event provider = uri_elements[1] if not provider in self.all_EPG_Data: movie_info_list = self.modref.message_handler.query( Query(None, defaults.QUERY_MOVIE_ID, source + ':' + provider + ':0')) if movie_info_list: movie_info = movie_info_list[0] with self.lock: self.all_EPG_Data[provider] = { 'requested': True, 'url': movie_info['url'], 'epg_data': {}, 'lastmodified': 0 } else: self.all_EPG_Data[provider]['requested'] = True time_stamp = time.time() try: epg_list = [] if provider in self.timeline: epg_list = self.timeline[provider] nr_of_entries = len(epg_list) i = 0 while i < nr_of_entries and time_stamp > int( epg_list[i].timestamp): i += 1 if i < nr_of_entries and i > 0 and time_stamp < int( epg_list[i - 1].timestamp) + int(epg_list[ i - 1].movie_info['duration']): # we found an entry first_movie_info = epg_list[i - 1].movie_info second_movie_info = epg_list[i].movie_info processed_time_percentage = ( time_stamp - int(first_movie_info['timestamp']) ) * 100 / first_movie_info['duration'] if processed_time_percentage < 0: processed_time_percentage = 0 if processed_time_percentage > 100: processed_time_percentage = 100 combined_movie_info = MovieInfo( url=first_movie_info['url'], mime=first_movie_info['mime'], source=first_movie_info['source'], source_type=first_movie_info['source_type'], uri=first_movie_info['uri'], title=first_movie_info['title'], category=first_movie_info['category'], next_title=second_movie_info['title'], provider=first_movie_info['provider'], timestamp=second_movie_info['timestamp'], duration=processed_time_percentage, # description=first_movie_info['description'], query=first_movie_info['query']) combined_movie_info['recordable'] = True else: combined_movie_info = MovieInfo( url='', mime='', source='', source_type='', uri=':'.join([self.stream_source, provider, '0']), title='-', category='', provider=provider, timestamp=time_stamp, duration=0, # description='', query=None) combined_movie_info['recordable'] = False # as we didn't found a matching EPG record, we "rewind" the provider update time by 2 hours to force another epg read self.all_EPG_Data[provider]['lastmodified'] = time.time( ) - 2 * 60 * 60 self.modref.message_handler.queue_event( None, defaults.STREAM_ANSWER_PLAY_LIST, { 'uri': queue_event.data['uri'], 'movie_info': combined_movie_info }) except Exception as e: print('unknown provider', provider, str(e))
def get_epg_from_receiver(self, provider, url): # reduce the pids to the ones containing SDT (0x11) and EIT (0x12) url_st = urlparse(url) queries = url_st.query new_queries = "" if queries: for eq in queries.split("&"): key = eq.split("=")[0] value = eq.split("=")[1] if key == 'pids': value = "0,17,18" new_queries += key + "=" + value + "&" new_queries = new_queries.strip("&") url_epd_pids_only = urlunparse(( url_st.scheme, url_st.netloc, url_st.path, url_st.params, new_queries, url_st.fragment, )) attr = [ os.path.join(self.origin_dir, 'epg_grap.sh'), url_epd_pids_only, provider, str(self.config.read('epgloops')), str(self.config.read('epgtimeout')) ] # process arguments self.logger.info("epg_grap started {0} {1} {2}".format( provider, url_epd_pids_only, repr(attr))) try: self.process = subprocess.Popen(attr, stdout=subprocess.PIPE, stderr=subprocess.PIPE) cleaner = Timer( 600, self.cleanProcess ) # if epg_grap won't exit, try to terminate its process in 30 seconds cleaner.start() epg_out, err = self.process.communicate() #self.process.wait() # oops... not needed? harmless! cleaner.cancel() if err: self.logger.warning("epg_grap ended with an error:\n%s" % (err)) else: self.logger.debug("epg_grap' ended") epg_json_string = epg_out.decode() epg_json = json.loads(epg_json_string) result = {} count = 0 for json_movie in epg_json['details'].values(): start = json_movie['unixTimeBegin'] stop = json_movie['unixTimeEnd'] if json_movie['title']: title = self.split_text_by_capital_chars( json_movie['title'])[0] else: title = json_movie['name'] desc = '\n'.join( self.split_text_by_capital_chars( json_movie['description'])) category = json_movie['name'] count += 1 # we'll use the name of the stream source plugin instead the name of the EPG plugin itself # plugin_name = self.plugin_names[0] plugin_name = self.stream_source self.providers.add(provider) # EPG has its own special hardwired categories #self.categories.add(category) new_movie = MovieInfo( url=url, mime='video/MP2T', title=title, category=category, source=plugin_name, source_type=defaults.MOVIE_TYPE_STREAM, provider=provider, timestamp=int(start), duration=stop - start, description=desc) if not plugin_name in self.movies: self.movies[plugin_name] = {} self.movies[plugin_name][new_movie['uri']] = new_movie result[start] = new_movie for json_provider in epg_json['providers']: self.logger.debug( "channel found in epg: {0}".format(json_provider)) self.logger.info("{0} epg loaded, {1} entries".format( provider, count)) return result except Exception as ex: self.logger.warning("epg_grap could not be started. Error: %s" % (ex)) return
def query_handler(self, queue_event, max_result_count): ''' answers with list[] of results ''' # print("query handler", self.get_plugin_names(), queue_event.type, queue_event.user, max_result_count) if queue_event.type == defaults.QUERY_AVAILABLE_SOURCES: return self.get_plugin_names() if queue_event.type == defaults.QUERY_AVAILABLE_PROVIDERS: res = [] for plugin_name in self.get_plugin_names(): # this plugin is one of the wanted if plugin_name in queue_event.params['select_source_values']: for provider in self.providers: if max_result_count > 0: res.append(provider) max_result_count -= 1 else: return res # maximal number of results reached return res if queue_event.type == defaults.QUERY_AVAILABLE_CATEGORIES: res = [] for plugin_name in self.get_plugin_names(): # this plugin is one of the wanted if plugin_name in queue_event.params['select_source_values']: for category in self.get_categories(): if max_result_count > 0: res.append(category) max_result_count -= 1 else: return res # maximal number of results reached return res if queue_event.type == defaults.QUERY_MOVIE_ID: elements = queue_event.params.split( ':') # split the uri given in queue_event.params res = [] if not elements[0] in self.get_plugin_names(): # its not our source pass # return res with self.whoosh_ix.searcher() as searcher: qp = QueryParser('uri', schema=self.whoosh_ix.schema) q = qp.parse('"' + queue_event.params + '"') results = searcher.search(q) for result in results: try: #movie_info = self.movies[result['source']][result['uri']] movie_info = MovieInfo( url=result['url'], mime=result['mime'], title=result['title'], category=result['category'], source=result['source'], source_type=result['source_type'], provider=result['provider'], # caution: when using timestamp(), it needs to be converted to int, otherways it will be a float timestamp=int(result['timestamp'].timestamp()), duration=result['duration'], description=result['description'], uri=result['uri']) movie_info['streamable'] = self.is_streamable() movie_info['recordable'] = True res.append(movie_info) except Exception as e: print('Exception in', self.get_plugin_names(), self.movies.keys(), result['source'], result['uri'], str(e)) return res if queue_event.type == defaults.QUERY_AVAILABLE_MOVIES: res = [] if not self.get_plugin_names( )[0] in queue_event.params['select_source_values']: return res with self.whoosh_ix.searcher() as searcher: # qp = QueryParser('title', schema=self.whoosh_ix.schema) qp = MultifieldParser(['title', 'category', 'timestamp'], schema=self.whoosh_ix.schema) query_string = '' if queue_event.params['select_provider_values']: # add a quote around each provider to make e.g. ZDF HD => 'ZDF HD' and add provider: to it quoted_values = map( lambda pr: 'provider:\'' + pr + '\'', queue_event.params['select_provider_values']) query_string += '(' + ' OR '.join(quoted_values) + ')' if queue_event.params['select_category_values']: # now we need to build a timestamp query string, which ANDs the different time/day parameters timestamp_queries_unsorted = {} for category_query in queue_event.params[ 'select_category_values']: try: json_query_data = json.loads(category_query) timestamp_query_type = json_query_data["type"] if not timestamp_query_type in timestamp_queries_unsorted: timestamp_queries_unsorted[ timestamp_query_type] = set() timestamp_queries_unsorted[ timestamp_query_type].add( json_query_data["expression"]) except: self.get_instance().logger.warning( 'malformed category received:', category_query) # after we jave sorted the expressions, we need to make a proper query string out of it timestamp_queries = {} for type, expressions in timestamp_queries_unsorted.items( ): # first we OR each category for itself timestamp_queries[type] = '(' + ' OR '.join( map(lambda pr: 'timestamp:' + pr, expressions)) + ')' # then we AND each category timestamp_query = '(' + ' AND '.join( timestamp_queries.values()) + ')' if query_string: query_string += ' AND ' query_string += timestamp_query qp.add_plugin(DateParserPlugin()) try: # if select_searchtext is empty in the browser, it will be not set in the JSON message and can cause an key exeption here, so we have to catch that! query_string += ' ' + queue_event.params[ 'select_searchtext'] except: pass q = qp.parse(query_string) results = searcher.search(q, limit=max_result_count, sortedby='timestamp') for result in results: try: #movie_info = self.movies[result['source']][result['uri']] movie_info = MovieInfo( url=result['url'], mime=result['mime'], title=result['title'], category=result['category'], source=result['source'], source_type=result['source_type'], provider=result['provider'], # caution: when using timestamp(), it needs to be converted to int, otherways it will be a float timestamp=int(result['timestamp'].timestamp()), duration=result['duration'], description=result['description'], uri=result['uri']) movie_info['streamable'] = self.is_streamable() movie_info['recordable'] = True res.append(movie_info) except Exception as e: print('Exception in', self.get_plugin_names(), self.movies.keys(), result['source'], result['uri'], str(e)) return res return []
def query_handler(self, queue_event, max_result_count): ''' answers with list[] of results ''' # print("query handler", self.plugin_id, queue_event.type, queue_event.user, max_result_count) if queue_event.type == defaults.QUERY_AVAILABLE_SOURCES: return self.plugin_names if queue_event.type == defaults.QUERY_AVAILABLE_PROVIDERS: res = [] for plugin_name in self.plugin_names: # this plugin is one of the wanted if plugin_name in queue_event.params['select_source_values']: if plugin_name == self.plugin_names[0]: for provider in self.providers: if max_result_count > 0: res.append(provider) max_result_count -= 1 else: return res # maximal number of results reached return res if queue_event.type == defaults.QUERY_AVAILABLE_CATEGORIES: res = [] for plugin_name in self.plugin_names: # this plugin is one of the wanted if plugin_name in queue_event.params['select_source_values']: for category in self.categories: if max_result_count > 0: res.append(category) max_result_count -= 1 else: return res # maximal number of results reached return res if queue_event.type == defaults.QUERY_MOVIE_ID: elements = queue_event.params.split(':') try: return [self.movies[elements[0]][queue_event.params]] except: return [] if queue_event.type == defaults.QUERY_AVAILABLE_MOVIES: res = [] titles = queue_event.params['select_title'].split() # descriptions=queue_event.params['select_description'].split() description_regexs = [ re.compile(r'\b{}\b'.format(description), re.IGNORECASE) for description in queue_event.params['select_description'].split() ] for plugin_name in self.plugin_names: # this plugin is one of the wanted if plugin_name in queue_event.params['select_source_values']: # now we need to do a dirty trick, because in our movies the entries are not store be the correct plugin name, # but the real data source instead, which is slighty confusing,, plugin_name = self.stream_source if plugin_name in self.movies: # are there any movies stored for this plugin? with self.lock: for movie in self.movies[plugin_name].values(): if movie.provider in queue_event.params[ 'select_provider_values']: if titles or description_regexs: # in case any search criteria is given if titles: found = False for title in titles: if title.lower( ) in movie.title.lower(): found = True if title.lower( ) in movie.category.lower(): found = True if not found: continue if description_regexs: found = False for description_regex in description_regexs: if re.search( description_regex, movie.description): found = True if not found: continue if max_result_count > 0: movie_info = MovieInfo.movie_to_movie_info( movie, '') movie_info['recordable'] = True res.append(movie_info) max_result_count -= 1 else: return res # maximal number of results reached return res return []
def stream_answer_play_list(self, queue_event): uri = queue_event.data['uri'] uri_elements = uri.split(':') source = uri_elements[0] if source != self.stream_source: return queue_event provider = uri_elements[1] if not provider in self.all_EPG_Data: movie_info_list = self.modref.message_handler.query( Query(None, defaults.QUERY_MOVIE_ID, source + ':' + provider + ':0')) if movie_info_list: movie = movie_info_list[0] url = movie.url with self.lock: self.all_EPG_Data[provider] = { 'requested': True, 'url': url, 'epg_data': {}, 'lastmodified': 0 } else: self.all_EPG_Data[provider]['requested'] = True time_stamp = time.time() try: epg_list = [] if provider in self.timeline: epg_list = self.timeline[provider] nr_of_entries = len(epg_list) i = 0 while i < nr_of_entries and time_stamp > int( epg_list[i].timestamp): i += 1 if i < nr_of_entries and i > 0 and time_stamp < int( epg_list[i - 1].timestamp) + int(epg_list[ i - 1].movie_info['duration']): # we found an entry first_movie_info = epg_list[i - 1].movie_info second_movie_info = epg_list[i].movie_info combined_movie_info = MovieInfo( uri=first_movie_info['uri'], title=first_movie_info['title'], category=second_movie_info['title'], provider=first_movie_info['provider'], timestamp=second_movie_info['timestamp'], duration=0, # description=first_movie_info['description'], query=first_movie_info['query']) combined_movie_info['recordable'] = True else: combined_movie_info = MovieInfo( uri=':'.join([self.stream_source, provider, '0']), title='-', category='', provider=provider, timestamp=time_stamp, duration=0, # description='', query=None) combined_movie_info['recordable'] = False self.modref.message_handler.queue_event( None, defaults.STREAM_ANSWER_PLAY_LIST, { 'uri': queue_event.data['uri'], 'movie_info': combined_movie_info }) except Exception as e: print('unknown provider', provider, str(e))
def get_epg_from_linvdr(self, provider, url): # reduce the pids to the ones containing SDT (0x11) and EIT (0x12) print('original URL:', url) url_st = urlparse(url) queries = url_st.query new_queries = "" if queries: for eq in queries.split("&"): key = eq.split("=")[0] value = eq.split("=")[1] if key == 'pids': value = "0,17,18" new_queries += key + "=" + value + "&" new_queries = new_queries.strip("&") url = urlunparse(( url_st.scheme, url_st.netloc, url_st.path, url_st.params, new_queries, url_st.fragment, )) attr = [ os.path.join(self.origin_dir, 'epg_grap.sh'), url, provider, str(self.config.read('epgloops')), str(self.config.read('epgtimeout')) ] # process arguments print("epg_grap started", provider, url, repr(attr)) try: self.process = subprocess.Popen(attr, stdout=subprocess.PIPE, stderr=subprocess.PIPE) cleaner = Timer( 600, self.cleanProcess ) # if epg_grap won't exit, try to terminate its process in 30 seconds cleaner.start() epg_out, err = self.process.communicate() #self.process.wait() # oops... not needed? harmless! cleaner.cancel() if err: print("epg_grap ended with an error:\n%s" % (err)) else: print("epg_grap' ended") epg_json_string = epg_out.decode() epg_json = json.loads(epg_json_string) result = {} count = 0 for json_movie in epg_json['details'].values(): start = json_movie['unixTimeBegin'] stop = json_movie['unixTimeEnd'] if json_movie['title']: title = json_movie['title'] + ' - ' + json_movie['name'] else: title = json_movie['name'] desc = json_movie['description'] category = '' count += 1 # we'll use the name of the stream source plugin instead the name of the EPG plugin itself # plugin_name = self.plugin_names[0] plugin_name = self.stream_source self.providers.add(provider) self.categories.add(category) new_movie = Movie(source=plugin_name, source_type=defaults.MOVIE_TYPE_STREAM, provider=provider, category=category, title=title, timestamp=str(int(start)), duration=stop - start, description=desc, url=url) new_movie.add_stream('ts', '', url) if not plugin_name in self.movies: self.movies[plugin_name] = {} self.movies[plugin_name][new_movie.uri()] = new_movie movie_info = MovieInfo.movie_to_movie_info( new_movie, category) movie_info['recordable'] = True result[start] = movie_info print("epg loaded, {0} entries".format(count)) return result except Exception as ex: print("epg_grap could not be started. Error: %s" % (ex)) return