def get_datatables_media_info(self, section_id=None, section_type=None, rating_key=None, refresh=False, kwargs=None): default_return = { 'recordsFiltered': 0, 'recordsTotal': 0, 'draw': 0, 'data': 'null', 'error': 'Unable to execute database query.' } if not session.allow_session_library(section_id): return default_return if section_id and not str(section_id).isdigit(): logger.warn( u"PlexPy Libraries :: Datatable media info called but invalid section_id provided." ) return default_return elif rating_key and not str(rating_key).isdigit(): logger.warn( u"PlexPy Libraries :: Datatable media info called but invalid rating_key provided." ) return default_return elif not section_id and not rating_key: logger.warn( u"PlexPy Libraries :: Datatable media info called but no input provided." ) return default_return # Get the library details library_details = self.get_details(section_id=section_id) if library_details['section_id'] == None: logger.debug( u"PlexPy Libraries :: Library section_id %s not found." % section_id) return default_return if not section_type: section_type = library_details['section_type'] # Get play counts from the database monitor_db = database.MonitorDatabase() if plexpy.CONFIG.GROUP_HISTORY_TABLES: count_by = 'reference_id' else: count_by = 'id' if section_type == 'show' or section_type == 'artist': group_by = 'grandparent_rating_key' elif section_type == 'season' or section_type == 'album': group_by = 'parent_rating_key' else: group_by = 'rating_key' try: query = 'SELECT MAX(session_history.started) AS last_played, COUNT(DISTINCT session_history.%s) AS play_count, ' \ 'session_history.rating_key, session_history.parent_rating_key, session_history.grandparent_rating_key ' \ 'FROM session_history ' \ 'JOIN session_history_metadata ON session_history.id = session_history_metadata.id ' \ 'WHERE session_history_metadata.section_id = ? ' \ 'GROUP BY session_history.%s ' % (count_by, group_by) result = monitor_db.select(query, args=[section_id]) except Exception as e: logger.warn( u"PlexPy Libraries :: Unable to execute database query for get_datatables_media_info2: %s." % e) return default_return watched_list = {} for item in result: watched_list[str(item[group_by])] = { 'last_played': item['last_played'], 'play_count': item['play_count'] } rows = [] # Import media info cache from json file if rating_key: try: inFilePath = os.path.join( plexpy.CONFIG.CACHE_DIR, 'media_info_%s-%s.json' % (section_id, rating_key)) with open(inFilePath, 'r') as inFile: rows = json.load(inFile) library_count = len(rows) except IOError as e: #logger.debug(u"PlexPy Libraries :: No JSON file for rating_key %s." % rating_key) #logger.debug(u"PlexPy Libraries :: Refreshing data and creating new JSON file for rating_key %s." % rating_key) pass elif section_id: try: inFilePath = os.path.join(plexpy.CONFIG.CACHE_DIR, 'media_info_%s.json' % section_id) with open(inFilePath, 'r') as inFile: rows = json.load(inFile) library_count = len(rows) except IOError as e: #logger.debug(u"PlexPy Libraries :: No JSON file for library section_id %s." % section_id) #logger.debug(u"PlexPy Libraries :: Refreshing data and creating new JSON file for section_id %s." % section_id) pass # If no cache was imported, get all library children items cached_items = {d['rating_key']: d['file_size'] for d in rows} if not refresh else {} if refresh or not rows: pms_connect = pmsconnect.PmsConnect() if rating_key: library_children = pms_connect.get_library_children_details( rating_key=rating_key, get_media_info=True) elif section_id: library_children = pms_connect.get_library_children_details( section_id=section_id, section_type=section_type, get_media_info=True) if library_children: library_count = library_children['library_count'] children_list = library_children['childern_list'] else: logger.warn( u"PlexPy Libraries :: Unable to get a list of library items." ) return default_return new_rows = [] for item in children_list: cached_file_size = cached_items.get(item['rating_key'], None) file_size = cached_file_size if cached_file_size else item.get( 'file_size', '') row = { 'section_id': library_details['section_id'], 'section_type': library_details['section_type'], 'added_at': item['added_at'], 'media_type': item['media_type'], 'rating_key': item['rating_key'], 'parent_rating_key': item['parent_rating_key'], 'grandparent_rating_key': item['grandparent_rating_key'], 'title': item['title'], 'year': item['year'], 'media_index': item['media_index'], 'parent_media_index': item['parent_media_index'], 'thumb': item['thumb'], 'container': item.get('container', ''), 'bitrate': item.get('bitrate', ''), 'video_codec': item.get('video_codec', ''), 'video_resolution': item.get('video_resolution', ''), 'video_framerate': item.get('video_framerate', ''), 'audio_codec': item.get('audio_codec', ''), 'audio_channels': item.get('audio_channels', ''), 'file_size': file_size } new_rows.append(row) rows = new_rows if not rows: return default_return # Cache the media info to a json file if rating_key: try: outFilePath = os.path.join( plexpy.CONFIG.CACHE_DIR, 'media_info_%s-%s.json' % (section_id, rating_key)) with open(outFilePath, 'w') as outFile: json.dump(rows, outFile) except IOError as e: logger.debug( u"PlexPy Libraries :: Unable to create cache file for rating_key %s." % rating_key) elif section_id: try: outFilePath = os.path.join( plexpy.CONFIG.CACHE_DIR, 'media_info_%s.json' % section_id) with open(outFilePath, 'w') as outFile: json.dump(rows, outFile) except IOError as e: logger.debug( u"PlexPy Libraries :: Unable to create cache file for section_id %s." % section_id) # Update the last_played and play_count for item in rows: watched_item = watched_list.get(item['rating_key'], None) if watched_item: item['last_played'] = watched_item['last_played'] item['play_count'] = watched_item['play_count'] else: item['last_played'] = None item['play_count'] = None results = [] # Get datatables JSON data if kwargs.get('json_data'): json_data = helpers.process_json_kwargs( json_kwargs=kwargs.get('json_data')) #print json_data # Search results search_value = json_data['search']['value'].lower() if search_value: searchable_columns = [ d['data'] for d in json_data['columns'] if d['searchable'] ] for row in rows: for k, v in row.iteritems(): if k in searchable_columns and search_value in v.lower(): results.append(row) break else: results = rows filtered_count = len(results) # Sort results results = sorted(results, key=lambda k: k['title']) sort_order = json_data['order'] for order in reversed(sort_order): sort_key = json_data['columns'][int(order['column'])]['data'] reverse = True if order['dir'] == 'desc' else False if rating_key and sort_key == 'title': results = sorted( results, key=lambda k: helpers.cast_to_int(k['media_index']), reverse=reverse) elif sort_key == 'file_size' or sort_key == 'bitrate': results = sorted( results, key=lambda k: helpers.cast_to_int(k[sort_key]), reverse=reverse) elif sort_key == 'video_resolution': results = sorted( results, key=lambda k: helpers.cast_to_int(k[sort_key].replace( '4k', '2160p').rstrip('p')), reverse=reverse) else: results = sorted(results, key=lambda k: k[sort_key], reverse=reverse) total_file_size = sum( [helpers.cast_to_int(d['file_size']) for d in results]) # Paginate results results = results[json_data['start']:(json_data['start'] + json_data['length'])] filtered_file_size = sum( [helpers.cast_to_int(d['file_size']) for d in results]) dict = { 'recordsFiltered': filtered_count, 'recordsTotal': library_count, 'data': results, 'draw': int(json_data['draw']), 'filtered_file_size': filtered_file_size, 'total_file_size': total_file_size } return dict
def get_datatables_media_info(self, section_id=None, section_type=None, rating_key=None, refresh=False, kwargs=None): default_return = {'recordsFiltered': 0, 'recordsTotal': 0, 'draw': 0, 'data': 'null', 'error': 'Unable to execute database query.'} if not session.allow_session_library(section_id): return default_return if section_id and not str(section_id).isdigit(): logger.warn(u"PlexPy Libraries :: Datatable media info called but invalid section_id provided.") return default_return elif rating_key and not str(rating_key).isdigit(): logger.warn(u"PlexPy Libraries :: Datatable media info called but invalid rating_key provided.") return default_return elif not section_id and not rating_key: logger.warn(u"PlexPy Libraries :: Datatable media info called but no input provided.") return default_return # Get the library details library_details = self.get_details(section_id=section_id) if library_details['section_id'] == None: logger.debug(u"PlexPy Libraries :: Library section_id %s not found." % section_id) return default_return if not section_type: section_type = library_details['section_type'] # Get play counts from the database monitor_db = database.MonitorDatabase() if plexpy.CONFIG.GROUP_HISTORY_TABLES: count_by = 'reference_id' else: count_by = 'id' if section_type == 'show' or section_type == 'artist': group_by = 'grandparent_rating_key' elif section_type == 'season' or section_type == 'album': group_by = 'parent_rating_key' else: group_by = 'rating_key' try: query = 'SELECT MAX(session_history.started) AS last_played, COUNT(DISTINCT session_history.%s) AS play_count, ' \ 'session_history.rating_key, session_history.parent_rating_key, session_history.grandparent_rating_key ' \ 'FROM session_history ' \ 'JOIN session_history_metadata ON session_history.id = session_history_metadata.id ' \ 'WHERE session_history_metadata.section_id = ? ' \ 'GROUP BY session_history.%s ' % (count_by, group_by) result = monitor_db.select(query, args=[section_id]) except Exception as e: logger.warn(u"PlexPy Libraries :: Unable to execute database query for get_datatables_media_info2: %s." % e) return default_return watched_list = {} for item in result: watched_list[str(item[group_by])] = {'last_played': item['last_played'], 'play_count': item['play_count']} rows = [] # Import media info cache from json file if rating_key: try: inFilePath = os.path.join(plexpy.CONFIG.CACHE_DIR,'media_info_%s-%s.json' % (section_id, rating_key)) with open(inFilePath, 'r') as inFile: rows = json.load(inFile) library_count = len(rows) except IOError as e: #logger.debug(u"PlexPy Libraries :: No JSON file for rating_key %s." % rating_key) #logger.debug(u"PlexPy Libraries :: Refreshing data and creating new JSON file for rating_key %s." % rating_key) pass elif section_id: try: inFilePath = os.path.join(plexpy.CONFIG.CACHE_DIR,'media_info_%s.json' % section_id) with open(inFilePath, 'r') as inFile: rows = json.load(inFile) library_count = len(rows) except IOError as e: #logger.debug(u"PlexPy Libraries :: No JSON file for library section_id %s." % section_id) #logger.debug(u"PlexPy Libraries :: Refreshing data and creating new JSON file for section_id %s." % section_id) pass # If no cache was imported, get all library children items cached_items = {d['rating_key']: d['file_size'] for d in rows} if refresh or not rows: pms_connect = pmsconnect.PmsConnect() if rating_key: library_children = pms_connect.get_library_children_details(rating_key=rating_key, get_media_info=True) elif section_id: library_children = pms_connect.get_library_children_details(section_id=section_id, section_type=section_type, get_media_info=True) if library_children: library_count = library_children['library_count'] children_list = library_children['childern_list'] else: logger.warn(u"PlexPy Libraries :: Unable to get a list of library items.") return default_return new_rows = [] for item in children_list: cached_file_size = cached_items.get(item['rating_key'], None) file_size = cached_file_size if cached_file_size else item.get('file_size', '') row = {'section_id': library_details['section_id'], 'section_type': library_details['section_type'], 'added_at': item['added_at'], 'media_type': item['media_type'], 'rating_key': item['rating_key'], 'parent_rating_key': item['parent_rating_key'], 'grandparent_rating_key': item['grandparent_rating_key'], 'title': item['title'], 'year': item['year'], 'media_index': item['media_index'], 'parent_media_index': item['parent_media_index'], 'thumb': item['thumb'], 'container': item.get('container', ''), 'bitrate': item.get('bitrate', ''), 'video_codec': item.get('video_codec', ''), 'video_resolution': item.get('video_resolution', ''), 'video_framerate': item.get('video_framerate', ''), 'audio_codec': item.get('audio_codec', ''), 'audio_channels': item.get('audio_channels', ''), 'file_size': file_size } new_rows.append(row) rows = new_rows if not rows: return default_return # Cache the media info to a json file if rating_key: try: outFilePath = os.path.join(plexpy.CONFIG.CACHE_DIR,'media_info_%s-%s.json' % (section_id, rating_key)) with open(outFilePath, 'w') as outFile: json.dump(rows, outFile) except IOError as e: logger.debug(u"PlexPy Libraries :: Unable to create cache file for rating_key %s." % rating_key) elif section_id: try: outFilePath = os.path.join(plexpy.CONFIG.CACHE_DIR,'media_info_%s.json' % section_id) with open(outFilePath, 'w') as outFile: json.dump(rows, outFile) except IOError as e: logger.debug(u"PlexPy Libraries :: Unable to create cache file for section_id %s." % section_id) # Update the last_played and play_count for item in rows: watched_item = watched_list.get(item['rating_key'], None) if watched_item: item['last_played'] = watched_item['last_played'] item['play_count'] = watched_item['play_count'] else: item['last_played'] = None item['play_count'] = None results = [] # Get datatables JSON data if kwargs.get('json_data'): json_data = helpers.process_json_kwargs(json_kwargs=kwargs.get('json_data')) #print json_data # Search results search_value = json_data['search']['value'].lower() if search_value: searchable_columns = [d['data'] for d in json_data['columns'] if d['searchable']] for row in rows: for k,v in row.iteritems(): if k in searchable_columns and search_value in v.lower(): results.append(row) break else: results = rows filtered_count = len(results) # Sort results results = sorted(results, key=lambda k: k['title']) sort_order = json_data['order'] for order in reversed(sort_order): sort_key = json_data['columns'][int(order['column'])]['data'] reverse = True if order['dir'] == 'desc' else False if rating_key and sort_key == 'title': results = sorted(results, key=lambda k: helpers.cast_to_int(k['media_index']), reverse=reverse) elif sort_key == 'file_size' or sort_key == 'bitrate': results = sorted(results, key=lambda k: helpers.cast_to_int(k[sort_key]), reverse=reverse) else: results = sorted(results, key=lambda k: k[sort_key], reverse=reverse) total_file_size = sum([helpers.cast_to_int(d['file_size']) for d in results]) # Paginate results results = results[json_data['start']:(json_data['start'] + json_data['length'])] filtered_file_size = sum([helpers.cast_to_int(d['file_size']) for d in results]) dict = {'recordsFiltered': filtered_count, 'recordsTotal': library_count, 'data': results, 'draw': int(json_data['draw']), 'filtered_file_size': filtered_file_size, 'total_file_size': total_file_size } return dict
def ssp_query(self, table_name=None, columns=[], custom_where=[], group_by=[], join_types=[], join_tables=[], join_evals=[], kwargs=None): if not table_name: logger.error('PlexPy DataTables :: No table name received.') return None # Set default variable values parameters = {} args = [] group = '' order = '' where = '' join = '' c_where = '' # Fetch all our parameters if kwargs.get('json_data'): parameters = helpers.process_json_kwargs(json_kwargs=kwargs.get('json_data')) else: logger.error('PlexPy DataTables :: Parameters for Datatables must be sent as a serialised json object ' 'named json_data.') return None dt_columns = parameters['columns'] extracted_columns = self.extract_columns(columns=columns) # Build grouping if group_by: for g in group_by: group += g + ', ' if group: grouping = True group = 'GROUP BY ' + group.rstrip(', ') else: grouping = False # Build join parameters if join_types: counter = 0 for join_type in join_types: if join_type.upper() == 'LEFT OUTER JOIN': join_item = 'LEFT OUTER JOIN %s ON %s = %s ' % \ (join_tables[counter], join_evals[counter][0], join_evals[counter][1]) elif join_type.upper() == 'JOIN' or join_type.upper() == 'INNER JOIN': join_item = 'JOIN %s ON %s = %s ' % \ (join_tables[counter], join_evals[counter][0], join_evals[counter][1]) else: join_item = '' counter += 1 join += join_item # Build custom where parameters if custom_where: for w in custom_where: if isinstance(w[1], (list, tuple)) and len(w[1]): c_where += '(' for w_ in w[1]: if w_ == None: c_where += w[0] + ' IS NULL OR ' else: c_where += w[0] + ' = ? OR ' args.append(w_) c_where = c_where.rstrip(' OR ') + ') AND ' else: if w[1] == None: c_where += w[0] + ' IS NULL AND ' else: c_where += w[0] + ' = ? AND ' args.append(w[1]) if c_where: c_where = 'WHERE ' + c_where.rstrip(' AND ') # Build ordering for o in parameters['order']: sort_order = ' COLLATE NOCASE' if o['dir'] == 'desc': sort_order = ' COLLATE NOCASE DESC' # We first see if a name was sent though for the column sort. if dt_columns[int(o['column'])]['data']: # We have a name, now check if it's a valid column name for our query # so we don't just inject a random value if any(d.lower() == dt_columns[int(o['column'])]['data'].lower() for d in extracted_columns['column_named']): order += dt_columns[int(o['column'])]['data'] + '%s' % sort_order else: # if we receive a bogus name, rather not sort at all. pass # If no name exists for the column, just use the column index to sort else: order += extracted_columns['column_named'][int(o['column'])] order += ', ' order = order.rstrip(', ') if order: order = 'ORDER BY ' + order # Build where parameters if parameters['search']['value']: counter = 0 for s in parameters['columns']: if s['searchable']: # We first see if a name was sent though for the column search. if s['data']: # We have a name, now check if it's a valid column name for our query # so we don't just inject a random value if any(d.lower() == s['data'].lower() for d in extracted_columns['column_named']): where += s['data'] + ' LIKE ? OR ' args.append('%' + parameters['search']['value'] + '%') else: # if we receive a bogus name, rather not search at all. pass # If no name exists for the column, just use the column index to search else: where += extracted_columns['column_named'][counter] + ' LIKE ? OR ' args.append('%' + parameters['search']['value'] + '%') counter += 1 if where: where = 'WHERE ' + where.rstrip(' OR ') # Build our queries if grouping: if c_where == '': query = 'SELECT * FROM (SELECT %s FROM %s %s %s) %s %s' \ % (extracted_columns['column_string'], table_name, join, group, where, order) else: query = 'SELECT * FROM (SELECT %s FROM %s %s %s %s) %s %s' \ % (extracted_columns['column_string'], table_name, join, c_where, group, where, order) else: if c_where == '': query = 'SELECT %s FROM %s %s %s %s' \ % (extracted_columns['column_string'], table_name, join, where, order) else: query = 'SELECT * FROM (SELECT %s FROM %s %s %s %s) %s' \ % (extracted_columns['column_string'], table_name, join, where, order, c_where) # logger.debug(u"Query: %s" % query) # Execute the query filtered = self.ssp_db.select(query, args=args) # Build grand totals totalcount = self.ssp_db.select('SELECT COUNT(id) as total_count from %s' % table_name)[0]['total_count'] # Get draw counter draw_counter = int(parameters['draw']) # Paginate results result = filtered[parameters['start']:(parameters['start'] + parameters['length'])] # Sanitize on the way out result = [{k: helpers.sanitize(v) if isinstance(v, basestring) else v for k, v in row.iteritems()} for row in result] output = {'result': result, 'draw': draw_counter, 'filteredCount': len(filtered), 'totalCount': totalcount} return output
def ssp_query(self, table_name=None, table_name_union=None, columns=[], columns_union=[], custom_where=[], custom_where_union=[], group_by=[], group_by_union=[], join_types=[], join_tables=[], join_evals=[], kwargs=None): if not table_name: logger.error('Tautulli DataTables :: No table name received.') return None # Fetch all our parameters if kwargs.get('json_data'): parameters = helpers.process_json_kwargs(json_kwargs=kwargs.get('json_data')) else: logger.error('Tautulli DataTables :: Parameters for Datatables must be sent as a serialised json object ' 'named json_data.') return None extracted_columns = self.extract_columns(columns=columns) join = self.build_join(join_types, join_tables, join_evals) group = self.build_grouping(group_by) c_where, cw_args = self.build_custom_where(custom_where) order = self.build_order(parameters['order'], extracted_columns['column_named'], parameters['columns']) where, w_args = self.build_where(parameters['search']['value'], extracted_columns['column_named'], parameters['columns']) # Build union parameters if table_name_union: extracted_columns_union = self.extract_columns(columns=columns_union) group_u = self.build_grouping(group_by_union) c_where_u, cwu_args = self.build_custom_where(custom_where_union) union = 'UNION SELECT %s FROM %s %s %s' % (extracted_columns_union['column_string'], table_name_union, c_where_u, group_u) else: union = '' cwu_args = [] args = cw_args + cwu_args + w_args # Build the query query = 'SELECT * FROM (SELECT %s FROM %s %s %s %s %s) %s %s' \ % (extracted_columns['column_string'], table_name, join, c_where, group, union, where, order) # logger.debug(u"Query: %s" % query) # Execute the query filtered = self.ssp_db.select(query, args=args) # Remove NULL rows filtered = [row for row in filtered if not all(v is None for v in row.values())] # Build grand totals totalcount = self.ssp_db.select('SELECT COUNT(id) as total_count from %s' % table_name)[0]['total_count'] # Get draw counter draw_counter = int(parameters['draw']) # Paginate results result = filtered[parameters['start']:(parameters['start'] + parameters['length'])] # Sanitize on the way out result = [{k: helpers.sanitize(v) if isinstance(v, basestring) else v for k, v in row.iteritems()} for row in result] output = {'result': result, 'draw': draw_counter, 'filteredCount': len(filtered), 'totalCount': totalcount} return output
def ssp_query(self, table_name=None, table_name_union=None, columns=[], columns_union=[], custom_where=[], custom_where_union=[], group_by=[], group_by_union=[], join_types=[], join_tables=[], join_evals=[], kwargs=None): if not table_name: logger.error('PlexPy DataTables :: No table name received.') return None # Fetch all our parameters if kwargs.get('json_data'): parameters = helpers.process_json_kwargs(json_kwargs=kwargs.get('json_data')) else: logger.error('PlexPy DataTables :: Parameters for Datatables must be sent as a serialised json object ' 'named json_data.') return None extracted_columns = self.extract_columns(columns=columns) join = self.build_join(join_types, join_tables, join_evals) group = self.build_grouping(group_by) c_where, cw_args = self.build_custom_where(custom_where) order = self.build_order(parameters['order'], extracted_columns['column_named'], parameters['columns']) where, w_args = self.build_where(parameters['search']['value'], extracted_columns['column_named'], parameters['columns']) # Build union parameters if table_name_union: extracted_columns_union = self.extract_columns(columns=columns_union) group_u = self.build_grouping(group_by_union) c_where_u, cwu_args = self.build_custom_where(custom_where_union) union = 'UNION SELECT %s FROM %s %s %s' % (extracted_columns_union['column_string'], table_name_union, c_where_u, group_u) else: union = '' cwu_args = [] args = cw_args + cwu_args + w_args # Build the query query = 'SELECT * FROM (SELECT %s FROM %s %s %s %s %s) %s %s' \ % (extracted_columns['column_string'], table_name, join, c_where, group, union, where, order) # logger.debug(u"Query: %s" % query) # Execute the query filtered = self.ssp_db.select(query, args=args) # Remove NULL rows filtered = [row for row in filtered if not all(v is None for v in row.values())] # Build grand totals totalcount = self.ssp_db.select('SELECT COUNT(id) as total_count from %s' % table_name)[0]['total_count'] # Get draw counter draw_counter = int(parameters['draw']) # Paginate results result = filtered[parameters['start']:(parameters['start'] + parameters['length'])] # Sanitize on the way out result = [{k: helpers.sanitize(v) if isinstance(v, basestring) else v for k, v in row.iteritems()} for row in result] output = {'result': result, 'draw': draw_counter, 'filteredCount': len(filtered), 'totalCount': totalcount} return output