def deserialize_data(value):
    try:
        return pickle.loads(value)
    except pickle.UnpicklingError as exc:
        LOG.error(
            'It was not possible to deserialize the cache data, try purge cache from expert settings menu'
        )
        raise CacheMiss from exc
示例#2
0
 def call(self, data):
     """In memory reference for the target func"""
     try:
         _data = pickle.loads(b64decode(data))
         _call(self._func, _data)
     except Exception:  # pylint: disable=broad-except
         import traceback
         LOG.error(traceback.format_exc())
示例#3
0
 def _add_db(self, row_data):
     try:
         cursor = self.conn.cursor()
         query = ('REPLACE INTO cache_data (bucket, identifier, value, expires, last_modified) '
                  'VALUES(?, ?, ?, ?, ?)')
         cursor.execute(query, row_data)
     except sql.Error as exc:
         LOG.error('SQLite error {}:', exc.args[0])
         raise_from(DBSQLiteError, exc)
def _set_kodi_settings(system):
    """Method for self-configuring Kodi settings"""
    if system == 'android':
        # Media Codec hardware acceleration is mandatory, otherwise only the audio stream is played
        try:
            json_rpc('Settings.SetSettingValue', {'setting': 'videoplayer.usemediacodecsurface', 'value': True})
            json_rpc('Settings.SetSettingValue', {'setting': 'videoplayer.usemediacodec', 'value': True})
        except IOError as exc:
            LOG.error('Changing Kodi settings caused the following error: {}', exc)
 def _add_db(self, bucket_name, identifier, data, expires):
     try:
         cursor = self.conn.cursor()
         query = ('REPLACE INTO cache_data (bucket, identifier, value, expires, last_modified) '
                  'VALUES(?, ?, ?, ?, ?)')
         cursor.execute(query, (bucket_name, identifier, sql.Binary(data), expires, int(time())))
     except sql.Error as exc:
         LOG.error('SQLite error {}:', exc.args[0])
         raise DBSQLiteError
示例#6
0
 def callback_event_video_queue(self, data=None):
     """Callback to add a video event"""
     try:
         self.add_event_to_queue(data['event_type'], data['event_data'], data['player_state'])
     except Exception as exc:  # pylint: disable=broad-except
         import traceback
         from resources.lib.kodi.ui import show_addon_error_info
         LOG.error(traceback.format_exc())
         show_addon_error_info(exc)
示例#7
0
 def onNotification(self, sender, method, data):  # pylint: disable=unused-argument
     """
     Callback for Kodi notifications that handles and dispatches playback events
     """
     # WARNING: Do not get playerid from 'data',
     # Because when Up Next add-on play a video while we are inside Netflix add-on and
     # not externally like Kodi library, the playerid become -1 this id does not exist
     if not self.tracking or not method.startswith('Player.'):
         return
     try:
         if method == 'Player.OnPlay':
             if self.init_count > 0:
                 # In this case the user has chosen to play another video while another one is in playing,
                 # then we send the missing Stop event for the current video
                 self._on_playback_stopped()
             self._initialize_am()
         elif method == 'Player.OnAVStart':
             self._on_playback_started()
             self.tracking_tick = True
         elif method == 'Player.OnSeek':
             self._on_playback_seek(json.loads(data)['player']['time'])
         elif method == 'Player.OnPause':
             self._is_pause_called = True
             self._on_playback_pause()
         elif method == 'Player.OnResume':
             # Kodi call this event instead the "Player.OnStop" event when you try to play a video
             # while another one is in playing (also if the current video is in pause) (not happen on RPI devices)
             # Can be one of following cases:
             # - When you use ctx menu "Play From Here", this happen when click to next button
             # - When you use UpNext add-on
             # - When you play a non-Netflix video when a Netflix video is in playback in background
             # - When you play a video over another in playback (back in menus)
             if not self._is_pause_called:
                 return
             if self.init_count == 0:
                 # This should never happen, we have to avoid this event when you try to play a video
                 # while another non-netflix video is in playing
                 return
             self._is_pause_called = False
             self._on_playback_resume()
         elif method == 'Player.OnStop':
             self.tracking = False
             if self.active_player_id is None:
                 # if playback does not start due to an error in streams initialization
                 # OnAVStart notification will not be called, then active_player_id will be None
                 LOG.debug('ActionController: Player.OnStop event has been ignored')
                 LOG.warn('ActionController: Action managers disabled due to a playback initialization error')
                 self.action_managers = None
                 self.init_count -= 1
                 return
             self._on_playback_stopped()
     except Exception:  # pylint: disable=broad-except
         import traceback
         LOG.error(traceback.format_exc())
         self.tracking = False
         self.tracking_tick = False
         self.init_count = 0
 def _tick_and_wait_for_abort(self):
     try:
         self.library_updater.on_service_tick()
         G.CACHE_MANAGEMENT.on_service_tick()
     except Exception as exc:  # pylint: disable=broad-except
         import traceback
         from resources.lib.kodi.ui import show_notification
         LOG.error(traceback.format_exc())
         show_notification(': '.join((exc.__class__.__name__, str(exc))))
     return G.SETTINGS_MONITOR.waitForAbort(1)
示例#9
0
def _process_json_response(response):
    """Processes the response data by returning header and payloads in JSON format and check for possible MSL error"""
    try:
        data = json.loads('[' + response.replace('}{', '},{') + ']')
        # On 'data' list the first dict is always the header or the error
        payloads = [msg_part for msg_part in data if 'payload' in msg_part]
        return _raise_if_error(data[0]), payloads
    except ValueError as exc:
        LOG.error('Unable to load json data {}', response)
        raise MSLError('Unable to load json data') from exc
示例#10
0
def _perform_ipc_return_call(func, data, func_name=None):
    try:
        result = _call(func, data)
    except Exception as exc:  # pylint: disable=broad-except
        if exc.__class__.__name__ not in ['CacheMiss', 'MetadataNotAvailable']:
            LOG.error('IPC callback raised exception: {exc}', exc=exc)
            import traceback
            LOG.error(G.py2_decode(traceback.format_exc(), 'latin-1'))
        result = ipc_convert_exc_to_json(exc)
    return _execute_addonsignals_return_call(result, func_name)
示例#11
0
 def _delete_expired_db(self, bucket_names, timestamp):
     query = 'DELETE FROM cache_data WHERE ('
     query += ' OR '.join(['bucket = ?'] * len(bucket_names))
     query += ') AND expires < ?'
     bucket_names.append(timestamp)
     try:
         cursor = self.conn.cursor()
         cursor.execute(query, bucket_names)
     except sql.Error as exc:
         LOG.error('SQLite error {}:', exc.args[0])
         raise_from(DBSQLiteError, exc)
示例#12
0
 def _tick_and_wait_for_abort(self):
     try:
         self.controller.on_service_tick()
         self.library_updater.on_service_tick()
         G.CACHE_MANAGEMENT.on_service_tick()
     except Exception as exc:  # pylint: disable=broad-except
         import traceback
         from resources.lib.kodi.ui import show_notification
         LOG.error(G.py2_decode(traceback.format_exc(), 'latin-1'))
         show_notification(': '.join((exc.__class__.__name__, unicode(exc))))
     return self.controller.waitForAbort(1)
示例#13
0
def save(account_hash, cookie_jar, log_output=True):
    """Save a cookie jar to file and in-memory storage"""
    if log_output:
        log_cookie(cookie_jar)
    cookie_file = xbmcvfs.File(cookie_file_path(account_hash), 'wb')
    try:
        # pickle.dump(cookie_jar, cookie_file)
        cookie_file.write(bytearray(pickle.dumps(cookie_jar)))
    except Exception as exc:  # pylint: disable=broad-except
        LOG.error('Failed to save cookies to file: {exc}', exc=exc)
    finally:
        cookie_file.close()
示例#14
0
 def _delete_db(self, bucket_name, identifier, including_suffixes):
     try:
         cursor = self.conn.cursor()
         if including_suffixes:
             identifier += '%'
             query = 'DELETE FROM cache_data WHERE bucket = ? AND identifier LIKE ?'
         else:
             query = 'DELETE FROM cache_data WHERE bucket = ? AND identifier = ?'
         cursor.execute(query, (bucket_name, identifier))
     except sql.Error as exc:
         LOG.error('SQLite error {}:', exc.args[0])
         raise_from(DBSQLiteError, exc)
示例#15
0
 def _clear_db(self, bucket=None):
     try:
         cursor = self.conn.cursor()
         if bucket is None:
             query = 'DELETE FROM cache_data'
             cursor.execute(query)
         else:
             query = 'DELETE FROM cache_data WHERE bucket = ?'
             cursor.execute(query, (bucket['name'], ))
     except sql.Error as exc:
         LOG.error('SQLite error {}:', exc.args[0])
         raise_from(DBSQLiteError, exc)
def delete_cache_folder():
    # Delete cache folder in the add-on userdata (no more needed with the new cache management)
    cache_path = os.path.join(G.DATA_PATH, 'cache')
    if not os.path.exists(G.py2_decode(xbmc.translatePath(cache_path))):
        return
    LOG.debug('Deleting the cache folder from add-on userdata folder')
    try:
        delete_folder_contents(cache_path, True)
        xbmc.sleep(80)
        xbmcvfs.rmdir(cache_path)
    except Exception:  # pylint: disable=broad-except
        import traceback
        LOG.error(G.py2_decode(traceback.format_exc(), 'latin-1'))
示例#17
0
 def call(self, data):
     """Routes the call to the function associated to the class"""
     try:
         result = _call(self._func, data)
     except Exception as exc:  # pylint: disable=broad-except
         if exc.__class__.__name__ not in [
                 'CacheMiss', 'MetadataNotAvailable'
         ]:
             LOG.error('IPC callback raised exception: {exc}', exc=exc)
             import traceback
             LOG.error(traceback.format_exc())
         result = ipc_convert_exc_to_json(exc)
     return _execute_addonsignals_return_call(result, self._func.__name__)
示例#18
0
 def onSettingsChanged(self):
     # This method will be called when user change add-on settings or every time ADDON.setSetting...() is called
     if self.ignore_n_events > 0:
         self.ignore_n_events -= 1
         LOG.debug('SettingsMonitor: onSettingsChanged event ignored (remaining {})'.format(self.ignore_n_events))
         return
     try:
         self._on_change()
     except Exception as exc:  # pylint: disable=broad-except
         # If settings.xml is read/write at same time G.ADDON.getSetting...() could thrown a TypeError
         LOG.error('SettingsMonitor: Checks failed due to an error ({})', exc)
         import traceback
         LOG.error(traceback.format_exc())
 def website_extract_session_data(self, content, **kwargs):
     """Extract session data and handle errors"""
     try:
         return website.extract_session_data(content, **kwargs)
     except WebsiteParsingError as exc:
         LOG.error('An error occurs in extract session data: {}', exc)
         raise
     except (LoginValidateError, MbrStatusAnonymousError) as exc:
         LOG.warn('The session data is not more valid ({})', type(exc).__name__)
         common.purge_credentials()
         self.session.cookies.clear()
         common.send_signal(signal=common.Signals.CLEAR_USER_ID_TOKENS)
         raise_from(NotLoggedInError, exc)
示例#20
0
def handle_cache_request(server, func_name, data):
    try:
        ret_data = _call_instance_func(G.CACHE_MANAGEMENT, func_name, pickle.loads(data))
        server.send_response(200)
        server.end_headers()
        if ret_data:
            server.wfile.write(ret_data)
    except Exception as exc:  # pylint: disable=broad-except
        if not isinstance(exc, (CacheMiss, MetadataNotAvailable)):
            LOG.error('IPC callback raised exception: {exc}', exc=exc)
            import traceback
            LOG.error(traceback.format_exc())
        server.send_error(500, json.dumps(common.ipc_convert_exc_to_json(exc)))
示例#21
0
def handle_cache_request(server, func_name, data):
    server.send_response(200)
    server.end_headers()
    try:
        ret_data = _call_instance_func(G.CACHE_MANAGEMENT, func_name, pickle.loads(data))
    except Exception as exc:  # pylint: disable=broad-except
        if not isinstance(exc, (CacheMiss, MetadataNotAvailable)):
            LOG.error('IPC callback raised exception: {exc}', exc=exc)
            import traceback
            LOG.error(traceback.format_exc())
        ret_data = exc
    if ret_data:
        server.wfile.write(pickle.dumps(ret_data, protocol=pickle.HIGHEST_PROTOCOL))
示例#22
0
def deserialize_data(value):
    try:
        if G.PY_IS_VER2:
            # On python 2 pickle.loads wants str
            from base64 import standard_b64decode
            return pickle.loads(standard_b64decode(value))
        # On python 3 pickle.loads wants byte
        return pickle.loads(value)
    except (pickle.UnpicklingError, TypeError, EOFError):
        # TypeError/EOFError happen when standard_b64decode fails
        # This should happen only if manually mixing the database data
        LOG.error('It was not possible to deserialize the cache data, try purge cache from expert settings menu')
        raise CacheMiss()
def _get_player_id():
    try:
        retry = 10
        while retry:
            result = common.json_rpc('Player.GetActivePlayers')
            if result:
                return result[0]['playerid']
            time.sleep(0.1)
            retry -= 1
        LOG.warn('Player ID not obtained, fallback to ID 1')
    except IOError:
        LOG.error('Player ID not obtained, fallback to ID 1')
    return 1
def _notify_managers(manager, notification, data):
    notify_method = getattr(manager, notification.__name__)
    try:
        if data is not None:
            notify_method(data)
        else:
            notify_method()
    except Exception as exc:  # pylint: disable=broad-except
        manager.enabled = False
        msg = '{} disabled due to exception: {}'.format(manager.name, exc)
        import traceback
        LOG.error(traceback.format_exc())
        ui.show_notification(title=common.get_local_string(30105), msg=msg)
 def _request(self, method, endpoint, session_refreshed, **kwargs):
     endpoint_conf = ENDPOINTS[endpoint]
     url = (_api_url(endpoint_conf['address'])
            if endpoint_conf['is_api_call'] else _document_url(
                endpoint_conf['address'], kwargs))
     data, headers, params = self._prepare_request_properties(
         endpoint_conf, kwargs)
     retry = 1
     while True:
         try:
             LOG.debug('Executing {verb} request to {url}',
                       verb='GET' if method == self.session.get else 'POST',
                       url=url)
             start = time.perf_counter()
             response = method(url=url,
                               verify=self.verify_ssl,
                               headers=headers,
                               params=params,
                               data=data,
                               timeout=8)
             LOG.debug('Request took {}s', time.perf_counter() - start)
             LOG.debug('Request returned status code {}',
                       response.status_code)
             break
         except (req_exceptions.ConnectionError,
                 req_exceptions.ReadTimeout) as exc:
             # Info on PR: https://github.com/CastagnaIT/plugin.video.netflix/pull/1046
             LOG.error('HTTP request error: {}', exc)
             if retry == 3:
                 raise
             retry += 1
             LOG.warn('Another attempt will be performed ({})', retry)
     # for redirect in response.history:
     #     LOG.warn('Redirected to: [{}] {}', redirect.status_code, redirect.url)
     if not session_refreshed:
         # We refresh the session when happen:
         # Error 404: It happen when Netflix update the build_identifier version and causes the api address to change
         # Error 401: This is a generic error, can happen when the http request for some reason has failed,
         #   we allow the refresh only for shakti endpoint, sometimes for unknown reasons it is necessary to update
         #   the session for the request to be successful
         if response.status_code == 404 or (response.status_code == 401
                                            and endpoint == 'shakti'):
             LOG.warn('Attempt to refresh the session due to HTTP error {}',
                      response.status_code)
             if self.try_refresh_session_data():
                 return self._request(method, endpoint, True, **kwargs)
     if response.status_code == 401:
         raise HttpError401
     response.raise_for_status()
     return (_raise_api_error(response.json() if response.content else {})
             if endpoint_conf['is_api_call'] else response.content)
示例#26
0
def convert_to_string(value):
    if value is None:
        return None
    data_type = type(value)
    if data_type == str:
        return value
    if data_type in (int, float, bool, tuple, datetime.datetime):
        converter = _conv_standard_to_string
    elif data_type in (list, dict, OrderedDict):
        converter = _conv_json_to_string
    else:
        LOG.error('convert_to_string: Data type {} not mapped', data_type)
        raise DataTypeNotMapped
    return converter(value)
示例#27
0
 def _execute_job(self, job_handler, job_data, list_errors):
     if not job_data:  # No metadata or unexpected job case
         return
     try:
         job_handler(job_data, get_library_path())
     except Exception as exc:  # pylint: disable=broad-except
         import traceback
         LOG.error(traceback.format_exc())
         LOG.error('{} of {} ({}) failed', job_handler.__name__,
                   job_data['videoid'], job_data['title'])
         list_errors.append({
             'title': job_data['title'],
             'error': f'{type(exc).__name__}: {exc}'
         })
示例#28
0
    def __init__(self):
        super().__init__()
        self.crypto_session = None
        self.keyset_id = None
        self.key_id = None
        self.hmac_key_id = None
        try:
            self.crypto_session = xbmcdrm.CryptoSession(
                'edef8ba9-79d6-4ace-a3c8-27dcd51d21ed', 'AES/CBC/NoPadding', 'HmacSHA256')
            LOG.debug('Widevine CryptoSession successful constructed')
        except Exception as exc:  # pylint: disable=broad-except
            import traceback
            LOG.error(traceback.format_exc())
            raise MSLError('Failed to construct Widevine CryptoSession') from exc

        drm_info = {
            'version': self.crypto_session.GetPropertyString('version'),
            'system_id': self.crypto_session.GetPropertyString('systemId'),
            #  'device_unique_id': self.crypto_session.GetPropertyByteArray('deviceUniqueId')
            'hdcp_level': self.crypto_session.GetPropertyString('hdcpLevel'),
            'hdcp_level_max': self.crypto_session.GetPropertyString('maxHdcpLevel'),
            'security_level': self.crypto_session.GetPropertyString('securityLevel')
        }

        if not drm_info['version']:
            # Possible cases where no data is obtained:
            # - Device with custom ROM or without Widevine support
            # - Using Kodi debug build with a InputStream Adaptive release build (yes users do it)
            raise MSLError('It was not possible to get the data from Widevine CryptoSession.\r\n'
                           'Your system is not Widevine certified or you have a wrong Kodi version installed.')

        G.LOCAL_DB.set_value('drm_system_id', drm_info['system_id'], TABLE_SESSION)
        G.LOCAL_DB.set_value('drm_security_level', drm_info['security_level'], TABLE_SESSION)
        G.LOCAL_DB.set_value('drm_hdcp_level', drm_info['hdcp_level'], TABLE_SESSION)

        LOG.debug('Widevine version: {}', drm_info['version'])
        if drm_info['system_id']:
            LOG.debug('Widevine CryptoSession system id: {}', drm_info['system_id'])
        else:
            LOG.warn('Widevine CryptoSession system id not obtained!')
        LOG.debug('Widevine CryptoSession security level: {}', drm_info['security_level'])
        wv_force_sec_lev = G.LOCAL_DB.get_value('widevine_force_seclev',
                                                WidevineForceSecLev.DISABLED,
                                                table=TABLE_SESSION)
        if wv_force_sec_lev != WidevineForceSecLev.DISABLED:
            LOG.warn('Widevine security level is forced to {} by user settings!', wv_force_sec_lev)
        LOG.debug('Widevine CryptoSession current hdcp level: {}', drm_info['hdcp_level'])
        LOG.debug('Widevine CryptoSession max hdcp level supported: {}', drm_info['hdcp_level_max'])
        LOG.debug('Widevine CryptoSession algorithms: {}', self.crypto_session.GetPropertyString('algorithms'))
def _raise_if_error(decoded_response):
    raise_error = False
    # Catch a manifest/chunk error
    if any(key in decoded_response for key in ['error', 'errordata']):
        raise_error = True
    # Catch a license error
    if 'result' in decoded_response and isinstance(
            decoded_response.get('result'), list):
        if 'error' in decoded_response['result'][0]:
            raise_error = True
    if raise_error:
        LOG.error('Full MSL error information:')
        LOG.error(json.dumps(decoded_response))
        raise MSLError(_get_error_details(decoded_response))
    return decoded_response
示例#30
0
def convert_to_string(value):
    if value is None:
        return None
    data_type = type(value)
    if data_type in (str, unicode):
        return value
    converter = None
    if data_type in (int, float, bool, tuple, datetime.datetime):
        converter = _conv_standard_to_string
    if data_type in (list, dict, OrderedDict):
        converter = _conv_json_to_string
    if not converter:
        LOG.error('convert_to_string: Data type {} not mapped'.format(data_type))
        raise DataTypeNotMapped
    return converter(value)