def platform_encode(p): """ Return Unicode name, if not already Unicode, decode with UTF-8 or latin1 """ try: return decode_str(p) except (BaseException, Exception): return decode_str(p, sickbeard.SYS_ENCODING, errors='replace').replace('?', '!')
def resolve(_code): """ Transform a twocc or fourcc code into a name. Returns a 2-tuple of (cc, codec) where both are strings and cc is a string in the form '0xXX' if it's a twocc, or 'ABCD' if it's a fourcc. If the given code is not a known twocc or fourcc, the return value will be (None, 'Unknown'), unless the code is otherwise a printable string in which case it will be returned as the codec. """ if isinstance(_code, string_types): codec = u'Unknown' # Check for twocc if re.match(r'^0x[\da-f]{1,4}$', _code, re.I): # Twocc in hex form return _code, TWOCC.get(int(_code, 16), codec) elif _code.isdigit() and 0 <= int(_code) <= 0xff: # Twocc in decimal form return hex(int(_code)), TWOCC.get(int(_code), codec) elif len(_code) == 2: _code = struct.unpack('H', _code)[0] return hex(_code), TWOCC.get(_code, codec) elif len(_code) != 4 and len([x for x in _code if x not in string.printable]) == 0: # Code is a printable string. codec = decode_str(_code) if _code[:2] == 'MS' and _code[2:].upper() in FOURCC: _code = _code[2:] if _code.upper() in FOURCC: return _code.upper(), decode_str(FOURCC[_code.upper()]) return None, codec elif isinstance(_code, integer_types): return hex(_code), TWOCC.get(_code, u'Unknown') return None, u'Unknown'
def _discover_server(self): cs = socket(AF_INET, SOCK_DGRAM) mb_listen_port = 7359 cs.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) cs.setsockopt(SOL_SOCKET, SO_BROADCAST, 1) cs.settimeout(10) result, sock_issue = '', None for server in ('EmbyServer', 'MediaBrowserServer', 'JellyfinServer'): bufr = 'who is %s?' % server try: assert len(bufr) == cs.sendto(decode_bytes(bufr), ('255.255.255.255', mb_listen_port)), \ 'Not all data sent through the socket' message, host = cs.recvfrom(1024) if message: message = decode_str(message) self._log('%s found at %s: udp query response (%s)' % (server, host[0], message)) result = ('{"Address":' not in message and message.split('|')[1] or json.loads(message).get('Address', '')) if result: break except AssertionError: sock_issue = True except (BaseException, Exception): pass if not sock_issue: try: cs.shutdown(SHUT_RDWR) except (BaseException, Exception): pass return result
def build_config(**kwargs): """ kwargs is filtered for settings that enable updates to Trakt :param kwargs: kwargs to be filtered for settings that enable updates to Trakt :return: dict of parsed config kwargs where k is Trakt account id, v is a parent location """ config = {} root_dirs = [] if sickbeard.ROOT_DIRS: root_pieces = sickbeard.ROOT_DIRS.split('|') root_dirs = root_pieces[1:] for item in [ re.findall(r'update-trakt-(\d+)-(.*)', k) for k, v in iteritems(kwargs) if k.startswith('update-trakt-') ]: for account_id, location in item: account_id = try_int(account_id, None) if None is account_id: continue for cur_dir in root_dirs: account_id = try_int(account_id, None) if account_id and decode_str( base64.urlsafe_b64encode(cur_dir)) == location: if isinstance(config.get(account_id), list): config[account_id] += [cur_dir] else: config[account_id] = [cur_dir] return config
def __unicode__(self): if None is not self.series_name: to_return = self.series_name + u' - ' else: to_return = u'' if None is not self.season_number: to_return += 'S' + str(self.season_number) if self.episode_numbers and len(self.episode_numbers): for e in self.episode_numbers: to_return += 'E' + str(e) if self.is_air_by_date: to_return += str(self.air_date) if self.ab_episode_numbers: to_return += ' [ABS: %s]' % str(self.ab_episode_numbers) if self.is_anime: if self.version: to_return += ' [ANIME VER: %s]' % str(self.version) if self.release_group: to_return += ' [GROUP: %s]' % self.release_group to_return += ' [ABD: %s]' % str(self.is_air_by_date) to_return += ' [ANIME: %s]' % str(self.is_anime) to_return += ' [whichReg: %s]' % str(self.which_regex) return decode_str(to_return, errors='xmlcharrefreplace')
def to_unicode(data): """Convert a basestring to unicode :param basestring data: data to decode :return: data as unicode :rtype: unicode """ if not isinstance(data, string_types): raise ValueError('Basestring expected') if isinstance(data, text_type): return data for encoding in ('utf-8', 'latin-1'): try: return decode_str(data, encoding) except UnicodeDecodeError: pass return decode_str(data, 'utf-8', 'replace')
def _send_to_xbmc(self, command, host=None, username=None, password=None): """Handles communication to XBMC servers via HTTP API Args: command: Dictionary of field/data pairs, encoded via urllib and passed to the XBMC API via HTTP host: XBMC webserver host:port username: XBMC webserver username password: XBMC webserver password Returns: Returns response.result for successful commands or False if there was an error """ if not host: self._log_debug(u'No host passed, aborting update') return False username = self._choose(username, sickbeard.XBMC_USERNAME) password = self._choose(password, sickbeard.XBMC_PASSWORD) for key in command: if not PY2 or type(command[key]) == text_type: command[key] = command[key].encode('utf-8') enc_command = urlencode(command) self._log_debug(u'Encoded API command: ' + enc_command) url = 'http://%s/xbmcCmds/xbmcHttp/?%s' % (host, enc_command) try: req = urllib.request.Request(url) # if we have a password, use authentication if password: req.add_header( 'Authorization', 'Basic %s' % b64encodestring('%s:%s' % (username, password))) self._log_debug(u'Contacting (with auth header) via url: ' + fixStupidEncodings(url)) else: self._log_debug(u'Contacting via url: ' + fixStupidEncodings(url)) http_response_obj = urllib.request.urlopen( req) # PY2 http_response_obj has no `with` context manager result = decode_str(http_response_obj.read(), sickbeard.SYS_ENCODING) http_response_obj.close() self._log_debug(u'HTTP response: ' + result.replace('\n', '')) return result except (urllib.error.URLError, IOError) as e: self._log_warning(u'Couldn\'t contact HTTP at %s %s' % (fixStupidEncodings(url), ex(e))) return False
def _send_to_plex(self, command, host, username=None, password=None): """Handles communication to Plex hosts via HTTP API Args: command: Dictionary of field/data pairs, encoded via urllib and passed to the legacy xbmcCmds HTTP API host: Plex host:port username: Plex API username password: Plex API password Returns: Returns True for successful commands or False if there was an error """ if not host: self._log_error(u'No host specified, check your settings') return False for key in command: if not PY2 or type(command[key]) == text_type: command[key] = command[key].encode('utf-8') enc_command = urlencode(command) self._log_debug(u'Encoded API command: ' + enc_command) url = 'http://%s/xbmcCmds/xbmcHttp/?%s' % (host, enc_command) try: req = urllib.request.Request(url) if password: req.add_header( 'Authorization', 'Basic %s' % b64encodestring('%s:%s' % (username, password))) self._log_debug(u'Contacting (with auth header) via url: ' + url) else: self._log_debug(u'Contacting via url: ' + url) http_response_obj = urllib.request.urlopen( req) # PY2 http_response_obj has no `with` context manager result = decode_str(http_response_obj.read(), sickbeard.SYS_ENCODING) http_response_obj.close() self._log_debug(u'HTTP response: ' + result.replace('\n', '')) return True except (urllib.error.URLError, IOError) as e: self._log_warning(u'Couldn\'t contact Plex at ' + fixStupidEncodings(url) + ' ' + ex(e)) return False
def write_error(self, status_code, **kwargs): body = '' try: if self.request.body: body = '\nRequest body: %s' % decode_str(self.request.body) except (BaseException, Exception): pass logger.log('Sent %s error response to a `%s` request for `%s` with headers:\n%s%s' % (status_code, self.request.method, self.request.path, self.request.headers, body), logger.WARNING) # suppress traceback by removing 'exc_info' kwarg if 'exc_info' in kwargs: logger.log('Gracefully handled exception text:\n%s' % traceback.format_exception(*kwargs["exc_info"]), logger.DEBUG) del kwargs['exc_info'] return super(LegacyBase, self).write_error(status_code, **kwargs)
def logged_in(self, y): if all([ None is y or 'logout' in y, bool( filter_list(lambda c: 'remember_web_' in c, iterkeys(self.session.cookies))) ]): if None is not y: self.shows = dict( re.findall(r'<option value="(\d+)">(.*?)</option>', y)) for k, v in iteritems(self.shows): self.shows[k] = sanitize_scene_name( html_unescape(unidecode(decode_str(v)))) return True return False
def _notify(self, title, body, access_token=None, device_iden=None, **kwargs): """ Sends a pushbullet notification based on the provided info or SG config title: The title of the notification to send body: The body string to send access_token: The access token to grant access device_iden: The iden of a specific target, if none provided send to all devices """ access_token = self._choose(access_token, sickbeard.PUSHBULLET_ACCESS_TOKEN) device_iden = self._choose(device_iden, sickbeard.PUSHBULLET_DEVICE_IDEN) # send the request to Pushbullet result = None try: headers = { 'Authorization': 'Basic %s' % b64encodestring('%s:%s' % (access_token, '')), 'Content-Type': 'application/json' } resp = requests.post(PUSHAPI_ENDPOINT, headers=headers, data=json.dumps( dict(type='note', title=title, body=decode_str(body.strip()), device_iden=device_iden))) resp.raise_for_status() except (BaseException, Exception): try: # noinspection PyUnboundLocalVariable result = resp.json()['error']['message'] except (BaseException, Exception): result = 'no response' self._log_warning(u'%s' % result) return self._choose( (True, 'Failed to send notification: %s' % result)[bool(result)], not bool(result))
def _fill(self, dataline): for key in dataline: try: tmp_list = decode_str(dataline[key]).split("'") if len(tmp_list) > 1: new_list = [] for i in tmp_list: try: new_list.append(int(i)) except: new_list.append(i) self.__dict__[key] = new_list continue except: pass try: self.__dict__[key] = int(dataline[key]) except: # self.__dict__[key] = text_type(dataline[key], "utf-8") self.__dict__[key] = dataline[key] key = property(lambda x: dataline[key])
def __unicode__(self): result = u'' # print normal attributes lists = [] for key in self._keys: value = getattr(self, key, None) if value is None or key == 'url': continue if isinstance(value, list): if not value: continue elif isinstance(value[0], string_types): # Just a list of strings (keywords?), so don't treat it specially. value = u', '.join(value) else: lists.append((key, value)) continue elif isinstance(value, dict): # Tables or tags treated separately. continue if key in UNPRINTABLE_KEYS: value = '<unprintable data, size=%d>' % len(value) result += u'| %10s: %s\n' % (decode_str(key), decode_str(value)) # print tags (recursively, to support nested tags). def print_tags(tags, suffix, show_label): result = '' for n, (name, tag) in enumerate(tags.items()): result += u'| %12s%s%s = ' % ( u'tags: ' if n == 0 and show_label else '', suffix, name) if isinstance(tag, list): # TODO: doesn't support lists/dicts within lists. result += u'%s\n' % ', '.join(subtag.value for subtag in tag) else: result += u'%s\n' % (tag.value or '') if isinstance(tag, dict): result += print_tags(tag, ' ', False) return result result += print_tags(self.tags, '', True) # print lists for key, l in lists: for n, item in enumerate(l): label = '+-- ' + key.rstrip('s').capitalize() if key not in ['tracks', 'subtitles', 'chapters']: label += ' Track' result += u'%s #%d\n' % (label, n + 1) result += '| ' + re.sub(r'\n(.)', r'\n| \1', decode_str(item)) # print tables # FIXME: WTH? # if log.level >= 10: # for name, table in self.tables.items(): # result += '+-- Table %s\n' % str(name) # for key, value in table.items(): # try: # value = unicode(value) # if len(value) > 50: # value = u'<unprintable data, size=%d>' % len(value) # except (UnicodeDecodeError, TypeError): # try: # value = u'<unprintable data, size=%d>' % len(value) # except AttributeError: # value = u'<unprintable data>' # result += u'| | %s: %s\n' % (unicode(key), value) return result
def _notify(self, title, body, user_key=None, api_key=None, priority=None, device=None, sound=None, **kwargs): """ Sends a pushover notification to the address provided title: The title of the message msg: The message to send (unicode) user_key: The pushover user id to send the message to (or to subscribe with) returns: True if the message succeeded, False otherwise """ user_key = self._choose(user_key, sickbeard.PUSHOVER_USERKEY) api_key = self._choose(api_key, sickbeard.PUSHOVER_APIKEY) priority = self._choose(priority, sickbeard.PUSHOVER_PRIORITY) device = self._choose(device, sickbeard.PUSHOVER_DEVICE) sound = self._choose(sound, sickbeard.PUSHOVER_SOUND) # build up the URL and parameters params = dict(title=title, message=decode_str(body.strip()), user=user_key, timestamp=int(time.time())) if api_key: params.update(token=api_key) if priority: params.update(priority=priority) if not device: params.update(device=device) if not sound: params.update(sound=sound) # send the request to pushover result = None try: req = urllib.request.Request(API_URL) # PY2 http_response_obj has no `with` context manager http_response_obj = urllib.request.urlopen( req, decode_bytes(urlencode(params))) http_response_obj.close() except urllib.error.HTTPError as e: # HTTP status 404 if the provided email address isn't a Pushover user. if 404 == e.code: result = 'Username is wrong/not a Pushover email. Pushover will send an email to it' self._log_warning(result) # For HTTP status code 401's, it is because you are passing in either an invalid token, # or the user has not added your service. elif 401 == e.code: # HTTP status 401 if the user doesn't have the service added subscribe_note = self._notify(title, body, user_key) if subscribe_note: self._log_debug('Subscription sent') # return True else: result = 'Subscription could not be sent' self._log_error(result) else: # If you receive an HTTP status code of 400, it is because you failed to send the proper parameters if 400 == e.code: result = 'Wrong data sent to Pushover' # If you receive a HTTP status code of 429, # it is because the message limit has been reached (free limit is 7,500) elif 429 == e.code: result = 'API message limit reached - try a different API key' # If you receive a HTTP status code of 500, service is unavailable elif 500 == e.code: result = 'Unable to connect to API, service unavailable' self._log_error(result) return self._choose( (True, 'Failed to send notification: %s' % result)[bool(result)], not bool(result))
def __str__(self): return decode_str(self)
def get_str(self): return decode_str(self.entity_data, 'ascii', 'replace')
def _notify(self, title, body, access_token=None, sound=None, **kwargs): """ Sends a boxcar2 notification to the address provided title: The title of the message body: The message to send access_token: To send to this device sound: Sound profile to use returns: True if the message succeeded, False otherwise """ access_token = self._choose(access_token, sickbeard.BOXCAR2_ACCESSTOKEN) sound = self._choose(sound, sickbeard.BOXCAR2_SOUND) # build up the URL and parameters # more info goes here - # https://boxcar.uservoice.com/knowledgebase/articles/306788-how-to-send-your-boxcar-account-a-notification body = decode_str(body.strip()) data = urlencode({ 'user_credentials': access_token, 'notification[title]': '%s - %s' % (title, body), 'notification[long_message]': body, 'notification[sound]': sound, 'notification[source_name]': 'SickGear', 'notification[icon_url]': self._sg_logo_url }) # send the request to boxcar2 result = None try: req = urllib.request.Request( 'https://new.boxcar.io/api/notifications') # PY2 http_response_obj has no `with` context manager http_response_obj = urllib.request.urlopen(req, decode_bytes(data)) http_response_obj.close() except urllib.error.HTTPError as e: if not hasattr(e, 'code'): self._log_error(u'Notification failed: %s' % ex(e)) else: result = 'Notification failed. Error code: %s' % e.code self._log_error(result) if 503 == e.code: result = 'Server too busy to handle the request at this time' self._log_warning(result) else: if 404 == e.code: result = 'Access token is wrong/not associated to a device' self._log_error(result) elif 401 == e.code: result = 'Access token not recognized' self._log_error(result) elif 400 == e.code: result = 'Wrong data sent to Boxcar' self._log_error(result) except urllib.error.URLError as e: self._log_error(u'Notification failed: %s' % ex(e)) return self._choose( (True, 'Failed to send notification: %s' % result)[bool(result)], not bool(result))
def _getnextheader(self, s): r = struct.unpack('<16sQ', s[:24]) (guidstr, objsize) = r guid = self._parseguid(guidstr) if guid == GUIDS['ASF_File_Properties_Object']: log.debug(u'File Properties Object') val = struct.unpack('<16s6Q4I', s[24:24 + 80]) (fileid, size, date, packetcount, duration, senddur, preroll, flags, minpack, maxpack, maxbr) = val # FIXME: parse date to timestamp self.length = duration / 10000000.0 elif guid == GUIDS['ASF_Stream_Properties_Object']: log.debug(u'Stream Properties Object [%d]' % objsize) streamtype = self._parseguid(s[24:40]) # errortype = self._parseguid(s[40:56]) offset, typelen, errorlen, flags = struct.unpack('<QIIH', s[56:74]) strno = flags & 0x7f encrypted = flags >> 15 if encrypted: self._set('encrypted', True) if streamtype == GUIDS['ASF_Video_Media']: vi = core.VideoStream() vi.width, vi.height, depth, codec, = struct.unpack( '<4xII2xH4s', s[89:89 + 20]) vi.codec = codec vi.id = strno self.video.append(vi) elif streamtype == GUIDS['ASF_Audio_Media']: ai = core.AudioStream() twocc, ai.channels, ai.samplerate, bitrate, block, ai.samplebits, = struct.unpack( '<HHIIHH', s[78:78 + 16]) ai.bitrate = 8 * bitrate ai.codec = twocc ai.id = strno self.audio.append(ai) self._apply_extinfo(strno) elif guid == GUIDS['ASF_Extended_Stream_Properties_Object']: streamid, langid, frametime = struct.unpack('<HHQ', s[72:84]) (bitrate, ) = struct.unpack('<I', s[40:40 + 4]) if streamid not in self._extinfo: self._extinfo[streamid] = [None, None, None, {}] if frametime == 0: # Problaby VFR, report as 1000fps (which is what MPlayer does) frametime = 10000.0 self._extinfo[streamid][:3] = [ bitrate, 10000000.0 / frametime, langid ] self._apply_extinfo(streamid) elif guid == GUIDS['ASF_Header_Extension_Object']: log.debug(u'ASF_Header_Extension_Object %d' % objsize) size = struct.unpack('<I', s[42:46])[0] data = s[46:46 + size] while len(data): log.debug(u'Sub:') h = self._getnextheader(data) data = data[h[1]:] elif guid == GUIDS['ASF_Codec_List_Object']: log.debug(u'List Object') pass elif guid == GUIDS['ASF_Error_Correction_Object']: log.debug(u'Error Correction') pass elif guid == GUIDS['ASF_Content_Description_Object']: log.debug(u'Content Description Object') val = struct.unpack('<5H', s[24:24 + 10]) pos = 34 strings = [] for i in val: ss = s[pos:pos + i].replace('\0', '').lstrip().rstrip() strings.append(ss) pos += i # Set empty strings to None strings = [x or None for x in strings] self.title, self.artist, self.copyright, self.caption, rating = strings elif guid == GUIDS['ASF_Extended_Content_Description_Object']: (count, ) = struct.unpack('<H', s[24:26]) pos = 26 descriptor = {} for i in range(0, count): # Read additional content descriptors d = self._parsekv(s[pos:]) pos += d[0] descriptor[d[1]] = d[2] self._appendtable('ASFDESCRIPTOR', descriptor) elif guid == GUIDS['ASF_Metadata_Object']: (count, ) = struct.unpack('<H', s[24:26]) pos = 26 streams = {} for i in range(0, count): # Read additional content descriptors size, key, value, strno = self._parsekv2(s[pos:]) if strno not in streams: streams[strno] = {} streams[strno][key] = value pos += size for strno, metadata in streams.items(): if strno not in self._extinfo: self._extinfo[strno] = [None, None, None, {}] self._extinfo[strno][3].update(metadata) self._apply_extinfo(strno) elif guid == GUIDS['ASF_Language_List_Object']: count = struct.unpack('<H', s[24:26])[0] pos = 26 for i in range(0, count): idlen = struct.unpack('<B', s[pos:pos + 1])[0] idstring = s[pos + 1:pos + 1 + idlen] idstring = decode_str(idstring, 'utf-16').replace('\0', '') log.debug(u'Language: %d/%d: %r' % (i + 1, count, idstring)) self._languages.append(idstring) pos += 1 + idlen elif guid == GUIDS['ASF_Stream_Bitrate_Properties_Object']: # This record contains stream bitrate with payload overhead. For # audio streams, we should have the average bitrate from # ASF_Stream_Properties_Object. For video streams, we get it from # ASF_Extended_Stream_Properties_Object. So this record is not # used. pass elif guid == GUIDS['ASF_Content_Encryption_Object'] or \ guid == GUIDS['ASF_Extended_Content_Encryption_Object']: self._set('encrypted', True) else: # Just print the type: for h in GUIDS.keys(): if GUIDS[h] == guid: log.debug(u'Unparsed %r [%d]' % (h, objsize)) break else: u = "%.8X-%.4X-%.4X-%.2X%.2X-%s" % guid log.debug(u'unknown: len=%d [%d]' % (len(u), objsize)) return r
def _extractHeaderString(self, header): len = struct.unpack('<I', header[:4])[0] try: return (len + 4, decode_str(header[4:4 + len], 'utf-8')) except (KeyError, IndexError, UnicodeDecodeError): return (len + 4, None)
def _update(self, host=None, show_name=None): """ Handle updating Kodi host via HTTP API Update the video library for a specific tv show if passed, otherwise update the whole library if option enabled. Args: show_name: Name of a TV show to target for a library update Return: True or False """ if not host: self._log_warning(u'No host specified, aborting update') return False self._log_debug(u'Updating library via HTTP method for host: %s' % host) # if we're doing per-show if show_name: self._log_debug(u'Updating library via HTTP method for show %s' % show_name) # noinspection SqlResolve path_sql = 'SELECT path.strPath' \ ' FROM path, tvshow, tvshowlinkpath' \ ' WHERE tvshow.c00 = "%s"' % show_name \ + ' AND tvshowlinkpath.idShow = tvshow.idShow' \ ' AND tvshowlinkpath.idPath = path.idPath' # set xml response format, if this fails then don't bother with the rest if not self._send( host, { 'command': 'SetResponseFormat(webheader;false;webfooter;false;header;<xml>;footer;</xml>;' 'opentag;<tag>;closetag;</tag>;closefinaltag;false)' }): return False # sql used to grab path(s) response = self._send( host, {'command': 'QueryVideoDatabase(%s)' % path_sql}) if not response: self._log_debug(u'Invalid response for %s on %s' % (show_name, host)) return False try: et = XmlEtree.fromstring(quote(response, ':\\/<>')) except SyntaxError as e: self._log_error(u'Unable to parse XML in response: %s' % ex(e)) return False paths = et.findall('.//field') if not paths: self._log_debug(u'No valid path found for %s on %s' % (show_name, host)) return False for path in paths: # we do not need it double-encoded, gawd this is dumb un_enc_path = decode_str(unquote(path.text), sickbeard.SYS_ENCODING) self._log_debug(u'Updating %s on %s at %s' % (show_name, host, un_enc_path)) if not self._send( host, dict(command='ExecBuiltIn', parameter='Kodi.updatelibrary(video, %s)' % un_enc_path)): self._log_error( u'Update of show directory failed for %s on %s at %s' % (show_name, host, un_enc_path)) return False # sleep for a few seconds just to be sure kodi has a chance to finish each directory if 1 < len(paths): time.sleep(5) # do a full update if requested else: self._log_debug(u'Full library update on host: %s' % host) if not self._send( host, dict(command='ExecBuiltIn', parameter='Kodi.updatelibrary(video)')): self._log_error(u'Failed full library update on: %s' % host) return False return True
def _parse_string(self, name): # type: (AnyStr) -> Optional[ParseResult] """ :param name: name to parse :type name: AnyStr :return: :rtype: ParseResult or None """ if not name: return matches = [] initial_best_result = None for reg_ex in self.compiled_regexes: for (cur_regex_num, cur_regex_name, cur_regex) in self.compiled_regexes[reg_ex]: new_name = helpers.remove_non_release_groups(name, 'anime' in cur_regex_name) match = cur_regex.match(new_name) if not match: continue if 'garbage_name' == cur_regex_name: return result = ParseResult(new_name) result.which_regex = [cur_regex_name] result.score = 0 - cur_regex_num named_groups = list_keys(match.groupdict()) if 'series_name' in named_groups: result.series_name = match.group('series_name') if result.series_name: result.series_name = self.clean_series_name(result.series_name) name_parts = re.match(r'(?i)(.*)[ -]((?:part|pt)[ -]?\w+)$', result.series_name) try: result.series_name = name_parts.group(1) result.extra_info = name_parts.group(2) except (AttributeError, IndexError): pass result.score += 1 if 'anime' in cur_regex_name and not (self.show_obj and self.show_obj.is_anime): p_show_obj = helpers.get_show(result.series_name, True) if p_show_obj and self.show_obj and not (p_show_obj.tvid == self.show_obj.tvid and p_show_obj.prodid == self.show_obj.prodid): p_show_obj = None if not p_show_obj and self.show_obj: p_show_obj = self.show_obj if p_show_obj and not p_show_obj.is_anime: continue if 'series_num' in named_groups and match.group('series_num'): result.score += 1 if 'season_num' in named_groups: tmp_season = int(match.group('season_num')) if 'bare' == cur_regex_name and tmp_season in (19, 20): continue result.season_number = tmp_season result.score += 1 def _process_epnum(captures, capture_names, grp_name, extra_grp_name, ep_numbers, parse_result): ep_num = self._convert_number(captures.group(grp_name)) extra_grp_name = 'extra_%s' % extra_grp_name ep_numbers = '%sepisode_numbers' % ep_numbers if extra_grp_name in capture_names and captures.group(extra_grp_name): try: if hasattr(self.show_obj, 'get_episode'): _ep_obj = self.show_obj.get_episode(parse_result.season_number, ep_num) else: tmp_show_obj = helpers.get_show(parse_result.series_name, True) if tmp_show_obj and hasattr(tmp_show_obj, 'get_episode'): _ep_obj = tmp_show_obj.get_episode(parse_result.season_number, ep_num) else: _ep_obj = None except (BaseException, Exception): _ep_obj = None en = _ep_obj and _ep_obj.name and re.match(r'^\W*(\d+)', _ep_obj.name) or None es = en and en.group(1) or None extra_ep_num = self._convert_number(captures.group(extra_grp_name)) parse_result.__dict__[ep_numbers] = list_range(ep_num, extra_ep_num + 1) if not ( _ep_obj and es and es != captures.group(extra_grp_name)) and ( 0 < extra_ep_num - ep_num < 10) else [ep_num] parse_result.score += 1 else: parse_result.__dict__[ep_numbers] = [ep_num] parse_result.score += 1 return parse_result if 'ep_num' in named_groups: result = _process_epnum(match, named_groups, 'ep_num', 'ep_num', '', result) if 'ep_ab_num' in named_groups: result = _process_epnum(match, named_groups, 'ep_ab_num', 'ab_ep_num', 'ab_', result) if 'air_year' in named_groups and 'air_month' in named_groups and 'air_day' in named_groups: year = int(match.group('air_year')) try: month = int(match.group('air_month')) except ValueError: try: month = time.strptime(match.group('air_month')[0:3], '%b').tm_mon except ValueError as e: raise InvalidNameException(ex(e)) day = int(match.group('air_day')) # make an attempt to detect YYYY-DD-MM formats if 12 < month: tmp_month = month month = day day = tmp_month try: result.air_date = datetime.date( year + ((1900, 2000)[0 < year < 28], 0)[1900 < year], month, day) except ValueError as e: raise InvalidNameException(ex(e)) if 'extra_info' in named_groups: tmp_extra_info = match.group('extra_info') # Show.S04.Special or Show.S05.Part.2.Extras is almost certainly not every episode in the season if tmp_extra_info and 'season_only' == cur_regex_name and re.search( r'([. _-]|^)(special|extra)s?\w*([. _-]|$)', tmp_extra_info, re.I): continue if tmp_extra_info: if result.extra_info: tmp_extra_info = '%s %s' % (result.extra_info, tmp_extra_info) result.extra_info = tmp_extra_info result.score += 1 if 'release_group' in named_groups: result.release_group = match.group('release_group') result.score += 1 if 'version' in named_groups: # assigns version to anime file if detected using anime regex. Non-anime regex receives -1 version = match.group('version') if version: result.version = helpers.try_int(version) else: result.version = 1 else: result.version = -1 if None is result.season_number and result.episode_numbers and not result.air_date and \ cur_regex_name in ['no_season', 'no_season_general', 'no_season_multi_ep'] and \ re.search(r'(?i)\bpart.?\d{1,2}\b', result.original_name): result.season_number = 1 matches.append(result) if len(matches): # pick best match with highest score based on placement best_result = max(sorted(matches, reverse=True, key=lambda x: x.which_regex), key=lambda x: x.score) show_obj = None if not self.naming_pattern: # try and create a show object for this result show_obj = helpers.get_show(best_result.series_name, self.try_scene_exceptions) # confirm passed in show object tvid_prodid matches result show object tvid_prodid if show_obj and not self.testing: if self.show_obj and show_obj.tvid_prodid != self.show_obj.tvid_prodid: show_obj = None elif not show_obj and self.show_obj: show_obj = self.show_obj best_result.show_obj = show_obj if not best_result.series_name and getattr(show_obj, 'name', None): best_result.series_name = show_obj.name if show_obj and show_obj.is_anime and 1 < len(self.compiled_regexes[1]) and 1 != reg_ex: continue # if this is a naming pattern test then return best result if not show_obj or self.naming_pattern: if not show_obj and not self.naming_pattern and not self.testing: # ensure anime regex test but use initial best if show still not found if 0 == reg_ex: initial_best_result = best_result matches = [] # clear non-anime match scores continue return initial_best_result return best_result # get quality new_name = helpers.remove_non_release_groups(name, show_obj.is_anime) best_result.quality = common.Quality.nameQuality(new_name, show_obj.is_anime) new_episode_numbers = [] new_season_numbers = [] new_absolute_numbers = [] # if we have an air-by-date show then get the real season/episode numbers if best_result.is_air_by_date: season_number, episode_numbers = None, [] airdate = best_result.air_date.toordinal() my_db = db.DBConnection() sql_result = my_db.select( 'SELECT season, episode, name' ' FROM tv_episodes' ' WHERE indexer = ? AND showid = ?' ' AND airdate = ?', [show_obj.tvid, show_obj.prodid, airdate]) if sql_result: season_number = int(sql_result[0]['season']) episode_numbers = [int(sql_result[0]['episode'])] if 1 < len(sql_result): # multi-eps broadcast on this day nums = {'1': 'one', '2': 'two', '3': 'three', '4': 'four', '5': 'five', '6': 'six', '7': 'seven', '8': 'eight', '9': 'nine', '10': 'ten'} patt = '(?i)(?:e(?:p(?:isode)?)?|part|pt)[. _-]?(%s)' try: src_num = str(re.findall(patt % r'\w+', best_result.extra_info)[0]) alt_num = nums.get(src_num) or list(iterkeys(nums))[ list(itervalues(nums)).index(src_num)] re_partnum = re.compile(patt % ('%s|%s' % (src_num, alt_num))) for ep_details in sql_result: if re_partnum.search(ep_details['name']): season_number = int(ep_details['season']) episode_numbers = [int(ep_details['episode'])] break except (BaseException, Exception): pass if self.indexer_lookup and not season_number or not len(episode_numbers): try: tvinfo_config = sickbeard.TVInfoAPI(show_obj.tvid).api_params.copy() if show_obj.lang: tvinfo_config['language'] = show_obj.lang t = sickbeard.TVInfoAPI(show_obj.tvid).setup(**tvinfo_config) ep_obj = t[show_obj.prodid].aired_on(best_result.air_date)[0] season_number = int(ep_obj['seasonnumber']) episode_numbers = [int(ep_obj['episodenumber'])] except BaseTVinfoEpisodenotfound as e: logger.log(u'Unable to find episode with date ' + str(best_result.air_date) + ' for show ' + show_obj.name + ', skipping', logger.WARNING) episode_numbers = [] except BaseTVinfoError as e: logger.log(u'Unable to contact ' + sickbeard.TVInfoAPI(show_obj.tvid).name + ': ' + ex(e), logger.WARNING) episode_numbers = [] for epNo in episode_numbers: s = season_number e = epNo if self.convert and show_obj.is_scene: (s, e) = scene_numbering.get_indexer_numbering( show_obj.tvid, show_obj.prodid, season_number, epNo) new_episode_numbers.append(e) new_season_numbers.append(s) elif show_obj.is_anime and len(best_result.ab_episode_numbers) and not self.testing: scene_season = scene_exceptions.get_scene_exception_by_name(best_result.series_name)[2] for epAbsNo in best_result.ab_episode_numbers: a = epAbsNo if self.convert and show_obj.is_scene: a = scene_numbering.get_indexer_absolute_numbering( show_obj.tvid, show_obj.prodid, epAbsNo, True, scene_season) (s, e) = helpers.get_all_episodes_from_absolute_number(show_obj, [a]) new_absolute_numbers.append(a) new_episode_numbers.extend(e) new_season_numbers.append(s) elif best_result.season_number and len(best_result.episode_numbers) and not self.testing: for epNo in best_result.episode_numbers: s = best_result.season_number e = epNo if self.convert and show_obj.is_scene: (s, e) = scene_numbering.get_indexer_numbering( show_obj.tvid, show_obj.prodid, best_result.season_number, epNo) if show_obj.is_anime: a = helpers.get_absolute_number_from_season_and_episode(show_obj, s, e) if a: new_absolute_numbers.append(a) new_episode_numbers.append(e) new_season_numbers.append(s) # need to do a quick sanity check here. It's possible that we now have episodes # from more than one season (by tvdb numbering), and this is just too much, so flag it. new_season_numbers = list(set(new_season_numbers)) # remove duplicates if 1 < len(new_season_numbers): raise InvalidNameException('Scene numbering results episodes from ' 'seasons %s, (i.e. more than one) and ' 'SickGear does not support this. ' 'Sorry.' % (str(new_season_numbers))) # I guess it's possible that we'd have duplicate episodes too, so lets # eliminate them new_episode_numbers = list(set(new_episode_numbers)) new_episode_numbers.sort() # maybe even duplicate absolute numbers so why not do them as well new_absolute_numbers = list(set(new_absolute_numbers)) new_absolute_numbers.sort() if len(new_absolute_numbers): best_result.ab_episode_numbers = new_absolute_numbers if len(new_season_numbers) and len(new_episode_numbers): best_result.episode_numbers = new_episode_numbers best_result.season_number = new_season_numbers[0] if self.convert and show_obj.is_scene: logger.log(u'Converted parsed result %s into %s' % (best_result.original_name, decode_str(str(best_result), errors='xmlcharrefreplace')), logger.DEBUG) helpers.cpu_sleep() return best_result
def parse(self, name, cache_result=True): """ :param name: :type name: AnyStr :param cache_result: :type cache_result: bool :return: :rtype: ParseResult """ name = self._unicodify(name) if self.naming_pattern: cache_result = False cached = name_parser_cache.get(name) if cached: return cached # break it into parts if there are any (dirname, file name, extension) dir_name, file_name = ek.ek(os.path.split, name) if self.file_name: base_file_name = helpers.remove_extension(file_name) else: base_file_name = file_name # set up a result to use final_result = ParseResult(name) # try parsing the file name file_name_result = self._parse_string(base_file_name) # use only the direct parent dir dir_name = ek.ek(os.path.basename, dir_name) # parse the dirname for extra info if needed dir_name_result = self._parse_string(dir_name) # build the ParseResult object final_result.air_date = self._combine_results(file_name_result, dir_name_result, 'air_date') # anime absolute numbers final_result.ab_episode_numbers = self._combine_results( file_name_result, dir_name_result, 'ab_episode_numbers') # season and episode numbers final_result.season_number = self._combine_results( file_name_result, dir_name_result, 'season_number') final_result.episode_numbers = self._combine_results( file_name_result, dir_name_result, 'episode_numbers') # if the dirname has a release group/show name I believe it over the filename final_result.series_name = self._combine_results( dir_name_result, file_name_result, 'series_name') final_result.extra_info = self._combine_results( dir_name_result, file_name_result, 'extra_info') final_result.release_group = self._combine_results( dir_name_result, file_name_result, 'release_group') final_result.version = self._combine_results(dir_name_result, file_name_result, 'version') final_result.which_regex = [] if final_result == file_name_result: final_result.which_regex = file_name_result.which_regex elif final_result == dir_name_result: final_result.which_regex = dir_name_result.which_regex else: if file_name_result: final_result.which_regex += file_name_result.which_regex if dir_name_result: final_result.which_regex += dir_name_result.which_regex final_result.show_obj = self._combine_results(file_name_result, dir_name_result, 'show_obj') final_result.quality = self._combine_results(file_name_result, dir_name_result, 'quality') if not final_result.show_obj: if self.testing: pass else: raise InvalidShowException( 'Unable to parse %s' % name.encode(sickbeard.SYS_ENCODING, 'xmlcharrefreplace')) # if there's no useful info in it then raise an exception if None is final_result.season_number and not final_result.episode_numbers and None is final_result.air_date \ and not final_result.ab_episode_numbers and not final_result.series_name: raise InvalidNameException( 'Unable to parse %s' % name.encode(sickbeard.SYS_ENCODING, 'xmlcharrefreplace')) if cache_result: name_parser_cache.add(name, final_result) logger.log( u'Parsed %s into %s' % (name, decode_str(str(final_result), errors='xmlcharrefreplace')), logger.DEBUG) return final_result
def _update_library_http(self, host=None, show_name=None): """Handles updating XBMC host via HTTP API Attempts to update the XBMC video library for a specific tv show if passed, otherwise update the whole library if enabled. Args: host: XBMC webserver host:port show_name: Name of a TV show to specifically target the library update for Returns: Returns True or False """ if not host: self._log_debug(u'No host passed, aborting update') return False self._log_debug(u'Updating XMBC library via HTTP method for host: ' + host) # if we're doing per-show if show_name: self._log_debug(u'Updating library via HTTP method for show ' + show_name) # noinspection SqlResolve path_sql = 'select path.strPath' \ ' from path, tvshow, tvshowlinkpath' \ ' where tvshow.c00 = "%s"' \ ' and tvshowlinkpath.idShow = tvshow.idShow' \ ' and tvshowlinkpath.idPath = path.idPath' % show_name # use this to get xml back for the path lookups xml_command = dict( command= 'SetResponseFormat(webheader;false;webfooter;false;header;<xml>;footer;</xml>;' 'opentag;<tag>;closetag;</tag>;closefinaltag;false)') # sql used to grab path(s) sql_command = dict(command='QueryVideoDatabase(%s)' % path_sql) # set output back to default reset_command = dict(command='SetResponseFormat()') # set xml response format, if this fails then don't bother with the rest request = self._send_to_xbmc(xml_command, host) if not request: return False sql_xml = self._send_to_xbmc(sql_command, host) self._send_to_xbmc(reset_command, host) if not sql_xml: self._log_debug(u'Invalid response for ' + show_name + ' on ' + host) return False enc_sql_xml = quote(sql_xml, ':\\/<>') try: et = XmlEtree.fromstring(enc_sql_xml) except SyntaxError as e: self._log_error(u'Unable to parse XML response: ' + ex(e)) return False paths = et.findall('.//field') if not paths: self._log_debug(u'No valid paths found for ' + show_name + ' on ' + host) return False for path in paths: # we do not need it double-encoded, gawd this is dumb un_enc_path = decode_str(unquote(path.text), sickbeard.SYS_ENCODING) self._log_debug(u'Updating ' + show_name + ' on ' + host + ' at ' + un_enc_path) update_command = dict( command='ExecBuiltIn', parameter='XBMC.updatelibrary(video, %s)' % un_enc_path) request = self._send_to_xbmc(update_command, host) if not request: self._log_error(u'Update of show directory failed on ' + show_name + ' on ' + host + ' at ' + un_enc_path) return False # sleep for a few seconds just to be sure xbmc has a chance to finish each directory if 1 < len(paths): time.sleep(5) # do a full update if requested else: self._log(u'Doing full library update on host: ' + host) update_command = { 'command': 'ExecBuiltIn', 'parameter': 'XBMC.updatelibrary(video)' } request = self._send_to_xbmc(update_command, host) if not request: self._log_error(u'Full Library update failed on: ' + host) return False return True
def _readatom(self, file): s = file.read(8) if len(s) < 8: return 0 atomsize, atomtype = struct.unpack('>I4s', s) if not decode_str(atomtype, 'latin1').isalnum(): # stop at nonsense data return 0 log.debug(u'%r [%X]' % (atomtype, atomsize)) if atomtype == 'udta': # Userdata (Metadata) pos = 0 tabl = {} i18ntabl = {} atomdata = file.read(atomsize - 8) while pos < atomsize - 12: (datasize, datatype) = struct.unpack('>I4s', atomdata[pos:pos + 8]) if byte2int(datatype) == 169: # i18n Metadata... mypos = 8 + pos while mypos + 4 < datasize + pos: # first 4 Bytes are i18n header (tlen, lang) = struct.unpack('>HH', atomdata[mypos:mypos + 4]) i18ntabl[lang] = i18ntabl.get(lang, {}) length = atomdata[mypos + 4:mypos + tlen + 4] i18ntabl[lang][datatype[1:]] = length mypos += tlen + 4 elif datatype == 'WLOC': # Drop Window Location pass else: if byte2int(atomdata[pos + 8:pos + datasize]) > 1: tabl[datatype] = atomdata[pos + 8:pos + datasize] pos += datasize if len(i18ntabl.keys()) > 0: for k in i18ntabl.keys(): if k in QTLANGUAGES and QTLANGUAGES[k] == 'en': self._appendtable('QTUDTA', i18ntabl[k]) self._appendtable('QTUDTA', tabl) else: log.debug(u'NO i18') self._appendtable('QTUDTA', tabl) elif atomtype == 'trak': atomdata = file.read(atomsize - 8) pos = 0 trackinfo = {} tracktype = None while pos < atomsize - 8: (datasize, datatype) = struct.unpack('>I4s', atomdata[pos:pos + 8]) if datatype == 'tkhd': tkhd = struct.unpack('>6I8x4H36xII', atomdata[pos + 8:pos + datasize]) trackinfo['width'] = tkhd[10] >> 16 trackinfo['height'] = tkhd[11] >> 16 trackinfo['id'] = tkhd[3] try: # XXX Timestamp of Seconds is since January 1st 1904! # XXX 2082844800 is the difference between Unix and # XXX Apple time. FIXME to work on Apple, too self.timestamp = int(tkhd[1]) - 2082844800 except (BaseException, Exception): log.exception( u'There was trouble extracting timestamp') elif datatype == 'mdia': pos += 8 datasize -= 8 log.debug(u'--> mdia information') while datasize: mdia = struct.unpack('>I4s', atomdata[pos:pos + 8]) if mdia[1] == 'mdhd': # Parse based on version of mdhd header. See # http://wiki.multimedia.cx/index.php?title=QuickTime_container#mdhd ver = indexbytes(atomdata, pos + 8) if ver == 0: mdhd = struct.unpack( '>IIIIIhh', atomdata[pos + 8:pos + 8 + 24]) elif ver == 1: mdhd = struct.unpack( '>IQQIQhh', atomdata[pos + 8:pos + 8 + 36]) else: mdhd = None if mdhd: # duration / time scale trackinfo['length'] = mdhd[4] / mdhd[3] if mdhd[5] in QTLANGUAGES: trackinfo['language'] = QTLANGUAGES[ mdhd[5]] elif mdhd[5] == 0x7FF: trackinfo['language'] = 'und' elif mdhd[5] >= 0x400: # language code detected as explained in: # https://developer.apple.com/library/mac/documentation/QuickTime/qtff/QTFFChap4/qtff4.html#//apple_ref/doc/uid/TP40000939-CH206-35103 language = bytearray([ ((mdhd[5] & 0x7C00) >> 10) + 0x60, ((mdhd[5] & 0x3E0) >> 5) + 0x60, (mdhd[5] & 0x1F) + 0x60 ]) trackinfo['language'] = str(language) # mdhd[6] == quality self.length = max(self.length, mdhd[4] / mdhd[3]) elif mdia[1] == 'minf': # minf has only atoms inside pos -= (mdia[0] - 8) datasize += (mdia[0] - 8) elif mdia[1] == 'stbl': # stbl has only atoms inside pos -= (mdia[0] - 8) datasize += (mdia[0] - 8) elif mdia[1] == 'hdlr': hdlr = struct.unpack( '>I4s4s', atomdata[pos + 8:pos + 8 + 12]) if hdlr[1] == 'mhlr' or hdlr[1] == '\0\0\0\0': if hdlr[2] == 'vide': tracktype = 'video' if hdlr[2] == 'soun': tracktype = 'audio' if hdlr[2] == 'subt' or hdlr[ 2] == 'sbtl' or hdlr[ 2] == 'subp' or hdlr[2] == 'text': tracktype = 'subtitle' elif mdia[1] == 'stsd': stsd = struct.unpack('>2I', atomdata[pos + 8:pos + 8 + 8]) if stsd[1] > 0: codec = atomdata[pos + 16:pos + 16 + 8] codec = struct.unpack('>I4s', codec) trackinfo['codec'] = codec[1] if codec[1] == 'jpeg': tracktype = 'image' elif mdia[1] == 'dinf': dref = struct.unpack('>I4s', atomdata[pos + 8:pos + 8 + 8]) log.debug(u' --> %r, %r (useless)' % mdia) if dref[1] == 'dref': num = struct.unpack( '>I', atomdata[pos + 20:pos + 20 + 4])[0] rpos = pos + 20 + 4 for ref in range(num): # FIXME: do somthing if this references ref = struct.unpack( '>I3s', atomdata[rpos:rpos + 7]) data = atomdata[rpos + 7:rpos + ref[0]] rpos += ref[0] else: if mdia[1].startswith('st'): log.debug(u' --> %r, %r (sample)' % mdia) elif mdia[1] == 'vmhd' and not tracktype: # indicates that this track is video tracktype = 'video' elif mdia[1] in ['vmhd', 'smhd'] and not tracktype: # indicates that this track is audio tracktype = 'audio' else: log.debug(u' --> %r, %r (unknown)' % mdia) pos += mdia[0] datasize -= mdia[0] elif datatype == 'udta': log.debug(u'udta: %r' % struct.unpack('>I4s', atomdata[:8])) else: if datatype == 'edts': log.debug(u'--> %r [%d] (edit list)' % (datatype, datasize)) else: log.debug(u'--> %r [%d] (unknown)' % (datatype, datasize)) pos += datasize info = None if tracktype == 'video': info = core.VideoStream() self.video.append(info) if tracktype == 'audio': info = core.AudioStream() self.audio.append(info) if tracktype == 'subtitle': info = core.Subtitle() self.subtitles.append(info) if info: for key, value in trackinfo.items(): setattr(info, key, value) elif atomtype == 'mvhd': # movie header mvhd = struct.unpack('>6I2h', file.read(28)) self.length = max(self.length, mvhd[4] / mvhd[3]) self.volume = mvhd[6] file.seek(atomsize - 8 - 28, 1) elif atomtype == 'cmov': # compressed movie datasize, atomtype = struct.unpack('>I4s', file.read(8)) if not atomtype == 'dcom': return atomsize method = struct.unpack('>4s', file.read(datasize - 8))[0] datasize, atomtype = struct.unpack('>I4s', file.read(8)) if not atomtype == 'cmvd': return atomsize if method == 'zlib': data = file.read(datasize - 8) try: decompressed = zlib.decompress(data) except Exception: try: decompressed = zlib.decompress(data[4:]) except Exception: log.exception( u'There was a proble decompressiong atom') return atomsize decompressedIO = StringIO.StringIO(decompressed) while self._readatom(decompressedIO): pass else: log.info(u'unknown compression %r' % method) # unknown compression method file.seek(datasize - 8, 1) elif atomtype == 'moov': # decompressed movie info while self._readatom(file): pass elif atomtype == 'mdat': pos = file.tell() + atomsize - 8 # maybe there is data inside the mdat log.info(u'parsing mdat') while self._readatom(file): pass log.info(u'end of mdat') file.seek(pos, 0) elif atomtype == 'rmra': # reference list while self._readatom(file): pass elif atomtype == 'rmda': # reference atomdata = file.read(atomsize - 8) pos = 0 url = '' quality = 0 datarate = 0 while pos < atomsize - 8: (datasize, datatype) = struct.unpack('>I4s', atomdata[pos:pos + 8]) if datatype == 'rdrf': rflags, rtype, rlen = struct.unpack( '>I4sI', atomdata[pos + 8:pos + 20]) if rtype == 'url ': url = atomdata[pos + 20:pos + 20 + rlen] if url.find('\0') > 0: url = url[:url.find('\0')] elif datatype == 'rmqu': quality = struct.unpack('>I', atomdata[pos + 8:pos + 12])[0] elif datatype == 'rmdr': datarate = struct.unpack('>I', atomdata[pos + 12:pos + 16])[0] pos += datasize if url: self._references.append((url, quality, datarate)) if atomtype not in ['wide', 'free']: log.info(u'unhandled base atom %r' % atomtype) # Skip unknown atoms try: file.seek(atomsize - 8, 1) except IOError: return 0 return atomsize
def _show_data(self, show_obj): # type: (sickbeard.tv.TVShow) -> Optional[Union[bool, etree.Element]] """ Creates an elementTree XML structure for a Kodi-style tvshow.nfo and returns the resulting data object. show_obj: a TVShow instance to create the NFO for """ show_ID = show_obj.prodid show_lang = show_obj.lang tvinfo_config = sickbeard.TVInfoAPI(show_obj.tvid).api_params.copy() tvinfo_config['actors'] = True if show_lang and not 'en' == show_lang: tvinfo_config['language'] = show_lang if 0 != show_obj.dvdorder: tvinfo_config['dvdorder'] = True t = sickbeard.TVInfoAPI(show_obj.tvid).setup(**tvinfo_config) tv_node = etree.Element('tvshow') try: show_info = t[int(show_ID)] except BaseTVinfoShownotfound as e: logger.log( 'Unable to find show with id %s on %s, skipping it' % (show_ID, sickbeard.TVInfoAPI(show_obj.tvid).name), logger.ERROR) raise e except BaseTVinfoError as e: logger.log( '%s is down, can\'t use its data to add this show' % sickbeard.TVInfoAPI(show_obj.tvid).name, logger.ERROR) raise e if not self._valid_show(show_info, show_obj): return # check for title and id if None is getattr(show_info, 'seriesname', None) or None is getattr( show_info, 'id', None): logger.log( 'Incomplete info for show with id %s on %s, skipping it' % (show_ID, sickbeard.TVInfoAPI(show_obj.tvid).name), logger.ERROR) return False title = etree.SubElement(tv_node, 'title') if None is not getattr(show_info, 'seriesname', None): title.text = '%s' % show_info['seriesname'] # year = etree.SubElement(tv_node, 'year') premiered = etree.SubElement(tv_node, 'premiered') premiered_text = self.get_show_year(show_obj, show_info, year_only=False) if premiered_text: premiered.text = '%s' % premiered_text has_id = False tvdb_id = None for tvid, slug in map_iter( lambda _tvid: (_tvid, sickbeard.TVInfoAPI(_tvid).config.get('kodi_slug')), list(sickbeard.TVInfoAPI().all_sources)): mid = slug and show_obj.ids[tvid].get('id') if mid: has_id = True kwargs = dict(type=slug) if TVINFO_TVDB == tvid: kwargs.update(dict(default='true')) tvdb_id = str(mid) uniqueid = etree.SubElement(tv_node, 'uniqueid', **kwargs) uniqueid.text = '%s%s' % (('', 'tt')[TVINFO_IMDB == tvid], mid) if not has_id: logger.log( 'Incomplete info for show with id %s on %s, skipping it' % (show_ID, sickbeard.TVInfoAPI(show_obj.tvid).name), logger.ERROR) return False ratings = etree.SubElement(tv_node, 'ratings') if None is not getattr(show_info, 'rating', None): # todo: name dynamic depending on source rating = etree.SubElement(ratings, 'rating', name='thetvdb', max='10') rating_value = etree.SubElement(rating, 'value') rating_value.text = '%s' % show_info['rating'] if None is not getattr(show_info, 'siteratingcount', None): ratings_votes = etree.SubElement(rating, 'votes') ratings_votes.text = '%s' % show_info['siteratingcount'] plot = etree.SubElement(tv_node, 'plot') if None is not getattr(show_info, 'overview', None): plot.text = '%s' % show_info['overview'] episodeguide = etree.SubElement(tv_node, 'episodeguide') episodeguideurl = etree.SubElement(episodeguide, 'url', post='yes', cache='auth.json') if tvdb_id: episodeguideurl.text = sickbeard.TVInfoAPI( TVINFO_TVDB).config['epg_url'].replace('{MID}', tvdb_id) mpaa = etree.SubElement(tv_node, 'mpaa') if None is not getattr(show_info, 'contentrating', None): mpaa.text = '%s' % show_info['contentrating'] genre = etree.SubElement(tv_node, 'genre') if None is not getattr(show_info, 'genre', None): if isinstance(show_info['genre'], string_types): genre.text = ' / '.join([ x.strip() for x in show_info['genre'].split('|') if x.strip() ]) studio = etree.SubElement(tv_node, 'studio') if None is not getattr(show_info, 'network', None): studio.text = '%s' % show_info['network'] self.add_actor_element(show_info, etree, tv_node) # Make it purdy sg_helpers.indent_xml(tv_node) # output valid xml # data = etree.ElementTree(tv_node) # output non valid xml that Kodi accepts data = decode_str(etree.tostring(tv_node)) parts = data.split('episodeguide') if 3 == len(parts): data = 'episodeguide'.join( [parts[0], parts[1].replace('&quot;', '"'), parts[2]]) return data
def get_utf8(self): return decode_str(self.entity_data, 'utf-8', 'replace')
def __unicode__(self): return decode_str(self.value)
def _notify(self, title, body, hosts=None, username=None, password=None, **kwargs): """Internal wrapper for the notify_snatch and notify_download functions Detects JSON-RPC version then branches the logic for either the JSON-RPC or legacy HTTP API methods. Args: title: Title of the notice to send body: Message body of the notice to send hosts: XBMC webserver host:port username: XBMC webserver username password: XBMC webserver password Returns: Returns a list results in the format of host:ip:result The result will either be 'OK' or False, this is used to be parsed by the calling function. """ hosts = self._choose(hosts, sickbeard.XBMC_HOST) username = self._choose(username, sickbeard.XBMC_USERNAME) password = self._choose(password, sickbeard.XBMC_PASSWORD) success = False result = [] for cur_host in [x.strip() for x in hosts.split(',')]: cur_host = unquote_plus(cur_host) self._log(u'Sending notification to "%s"' % cur_host) xbmcapi = self._get_xbmc_version(cur_host, username, password) if xbmcapi: if 4 >= xbmcapi: self._log_debug(u'Detected version <= 11, using HTTP API') command = dict(command='ExecBuiltIn', parameter='Notification(' + title.encode('utf-8') + ',' + body.encode('utf-8') + ')') notify_result = self._send_to_xbmc(command, cur_host, username, password) if notify_result: result += [cur_host + ':' + str(notify_result)] success |= 'OK' in notify_result or success else: self._log_debug(u'Detected version >= 12, using JSON API') command = '{"jsonrpc":"2.0","method":"GUI.ShowNotification",' \ '"params":{"title":"%s","message":"%s", "image": "%s"},"id":1}' % \ (title.encode('utf-8'), body.encode('utf-8'), self._sg_logo_url) notify_result = self._send_to_xbmc_json( command, cur_host, username, password) if notify_result.get('result'): result += [ cur_host + ':' + decode_str(notify_result['result'], sickbeard.SYS_ENCODING) ] success |= 'OK' in notify_result or success else: if sickbeard.XBMC_ALWAYS_ON or self._testing: self._log_error( u'Failed to detect version for "%s", check configuration and try again' % cur_host) result += [cur_host + ':No response'] success = False return self._choose(('Success, all hosts tested', '<br />\n'.join(result))[not bool(success)], bool(success))