Exemplo n.º 1
0
        def _download_callback(self, document_list):
            """
            Callback called by download_<something>() once data
            is arrived from web service.
            document_list can be None!
            """
            has_more = 0
            if document_list is not None:
                has_more = document_list.has_more()
            # stash more data?
            if has_more and (document_list is not None):
                self._offset += len(document_list)
                # download() will be called externally

            if const_debug_enabled():
                const_debug_write(
                    __name__,
                    "MetadataDownloader._download_callback: %s, more: %s" %
                    (document_list, has_more))
                if document_list is not None:
                    const_debug_write(
                        __name__, "MetadataDownloader._download_callback: "
                        "has_more: %s, offset: %s" %
                        (document_list.has_more(), document_list.offset()))

            self._callback(self, self._app, document_list, has_more)
Exemplo n.º 2
0
        def _download_callback(self, document_list):
            """
            Callback called by download_<something>() once data
            is arrived from web service.
            document_list can be None!
            """
            has_more = 0
            if document_list is not None:
                has_more = document_list.has_more()
            # stash more data?
            if has_more and (document_list is not None):
                self._offset += len(document_list)
                # download() will be called externally

            if const_debug_enabled():
                const_debug_write(
                    __name__,
                    "MetadataDownloader._download_callback: %s, more: %s" % (
                        document_list, has_more))
                if document_list is not None:
                    const_debug_write(
                        __name__,
                        "MetadataDownloader._download_callback: "
                            "has_more: %s, offset: %s" % (
                            document_list.has_more(),
                            document_list.offset()))

            self._callback(self, self._app, document_list, has_more)
Exemplo n.º 3
0
 def _set_cached(self, cache_key, data):
     """
     Save a cache item to disk.
     """
     with self._cache_dir_lock:
         try:
             return self._cacher.save(cache_key, data,
                 cache_dir = WebService.CACHE_DIR)
         except IOError as err:
             # IOError is raised when cache cannot be written to disk
             if const_debug_enabled():
                 const_debug_write(__name__,
                     "WebService._set_cached(%s) = cache store error: %s" % (
                         cache_key, repr(err),))
Exemplo n.º 4
0
 def _set_cached(self, cache_key, data):
     """
     Save a cache item to disk.
     """
     with self._cache_dir_lock:
         try:
             return self._cacher.save(cache_key,
                                      data,
                                      cache_dir=WebService.CACHE_DIR)
         except IOError as err:
             # IOError is raised when cache cannot be written to disk
             if const_debug_enabled():
                 const_debug_write(
                     __name__,
                     "WebService._set_cached(%s) = cache store error: %s" %
                     (
                         cache_key,
                         repr(err),
                     ))
Exemplo n.º 5
0
def handle_exception(exc_class, exc_instance, exc_tb):

    # restore original exception handler, to avoid loops
    uninstall_exception_handler()

    _text = TextInterface()

    if exc_class is SystemDatabaseError:
        _text.output(
            darkred(_("Installed packages repository corrupted. "
              "Please re-generate it")),
            importance=1,
            level="error")
        os._exit(101)

    generic_exc_classes = (OnlineMirrorError, RepositoryError,
        PermissionDenied, FileNotFound, SPMError, SystemError)
    if exc_class in generic_exc_classes:
        _text.output(
            "%s: %s" % (exc_instance, darkred(_("Cannot continue")),),
            importance=1,
            level="error")
        os._exit(1)

    if exc_class is SystemExit:
        return

    if issubclass(exc_class, IOError): # in Python 3.3+ it's BrokenPipeError
        if exc_instance.errno == errno.EPIPE:
            return

    if exc_class is KeyboardInterrupt:
        os._exit(1)

    t_back = entropy.tools.get_traceback(tb_obj = exc_tb)
    if const_debug_enabled():
        sys.stdout = sys.__stdout__
        sys.stderr = sys.__stderr__
        sys.stdin = sys.__stdin__
        entropy.tools.print_exception(tb_data = exc_tb)
        pdb.set_trace()

    if exc_class in (IOError, OSError):
        if exc_instance.errno == errno.ENOSPC:
            print_generic(t_back)
            _text.output(
                "%s: %s" % (
                    exc_instance,
                    darkred(_("Your hard drive is full! Your fault!")),),
                importance=1,
                level="error")
            os._exit(5)
        elif exc_instance.errno == errno.ENOMEM:
            print_generic(t_back)
            _text.output(
                "%s: %s" % (
                    exc_instance,
                    darkred(_("No more memory dude! Your fault!")),),
                importance=1,
                level="error")
            os._exit(5)

    _text.output(
        darkred(_("Hi. My name is Bug Reporter. "
          "I am sorry to inform you that the program crashed. "
          "Well, you know, shit happens.")),
        importance=1,
        level="error")
    _text.output(
        darkred(_("But there's something you could "
                  "do to help me to be a better application.")),
        importance=1,
        level="error")
    _text.output(
        darkred(
            _("-- BUT, DO NOT SUBMIT THE SAME REPORT MORE THAN ONCE --")),
        importance=1,
        level="error")
    _text.output(
        darkred(
            _("Now I am showing you what happened. "
              "Don't panic, I'm here to help you.")),
        importance=1,
        level="error")

    entropy.tools.print_exception(tb_data = exc_tb)

    exception_data = entropy.tools.print_exception(silent = True,
        tb_data = exc_tb, all_frame_data = True)
    exception_tback_raw = const_convert_to_rawstring(t_back)

    error_fd, error_file = None, None
    try:
        error_fd, error_file = const_mkstemp(
            prefix="entropy.error.report.",
            suffix=".txt")

        with os.fdopen(error_fd, "wb") as ferror:
            ferror.write(
                const_convert_to_rawstring(
                    "\nRevision: %s\n\n" % (
                        etpConst['entropyversion'],))
                )
            ferror.write(
                exception_tback_raw)
            ferror.write(
                const_convert_to_rawstring("\n\n"))
            ferror.write(
                const_convert_to_rawstring(''.join(exception_data)))
            ferror.write(
                const_convert_to_rawstring("\n"))

    except (OSError, IOError) as err:
        _text.output(
            "%s: %s" % (
                err,
                darkred(
                    _("Oh well, I cannot even write to TMPDIR. "
                      "So, please copy the error and "
                      "mail [email protected]."))),
            importance=1,
            level="error")
        os._exit(1)
    finally:
        if error_fd is not None:
            try:
                os.close(error_fd)
            except OSError:
                pass

    _text.output("", level="error")

    ask_msg = _("Erm... Can I send the error, "
                "along with some other information\nabout your "
                "hardware to my creators so they can fix me? "
                "(Your IP will be logged)")
    rc = _text.ask_question(ask_msg)
    if rc == _("No"):
        _text.output(
            darkgreen(_("Ok, ok ok ok... Sorry!")),
            level="error")
        os._exit(2)

    _text.output(
        darkgreen(
            _("If you want to be contacted back "
              "(and actively supported), also answer "
              "the questions below:")
            ),
        level="error")

    try:
        name = readtext(_("Your Full name:"))
        email = readtext(_("Your E-Mail address:"))
        description = readtext(_("What you were doing:"))
    except EOFError:
        os._exit(2)

    try:
        from entropy.client.interfaces.qa import UGCErrorReport
        from entropy.core.settings.base import SystemSettings
        _settings = SystemSettings()
        repository_id = _settings['repositories']['default_repository']
        error = UGCErrorReport(repository_id)
    except (OnlineMirrorError, AttributeError, ImportError,):
        error = None

    result = None
    if error is not None:
        error.prepare(exception_tback_raw, name, email,
            '\n'.join([x for x in exception_data]), description)
        result = error.submit()

    if result:
        _text.output(
            darkgreen(
                _("Thank you very much. The error has been "
                  "reported and hopefully, the problem will "
                  "be solved as soon as possible.")),
            level="error")
    else:
        _text.output(
            darkred(_("Ugh. Cannot send the report. "
                      "Please mail the file below "
                      "to [email protected].")),
            level="error")
        _text.output("", level="error")
        _text.output("==> %s" % (error_file,), level="error")
        _text.output("", level="error")
Exemplo n.º 6
0
def handle_exception(exc_class, exc_instance, exc_tb):

    # restore original exception handler, to avoid loops
    uninstall_exception_handler()

    _text = TextInterface()

    if exc_class is SystemDatabaseError:
        _text.output(darkred(
            _("Installed packages repository corrupted. "
              "Please re-generate it")),
                     importance=1,
                     level="error")
        os._exit(101)

    generic_exc_classes = (OnlineMirrorError, RepositoryError,
                           PermissionDenied, FileNotFound, SPMError,
                           SystemError)
    if exc_class in generic_exc_classes:
        _text.output("%s: %s" % (
            exc_instance,
            darkred(_("Cannot continue")),
        ),
                     importance=1,
                     level="error")
        os._exit(1)

    if exc_class is SystemExit:
        return

    if issubclass(exc_class, IOError):  # in Python 3.3+ it's BrokenPipeError
        if exc_instance.errno == errno.EPIPE:
            return

    if exc_class is KeyboardInterrupt:
        os._exit(1)

    t_back = entropy.tools.get_traceback(tb_obj=exc_tb)
    if const_debug_enabled():
        sys.stdout = sys.__stdout__
        sys.stderr = sys.__stderr__
        sys.stdin = sys.__stdin__
        entropy.tools.print_exception(tb_data=exc_tb)
        pdb.set_trace()

    if exc_class in (IOError, OSError):
        if exc_instance.errno == errno.ENOSPC:
            print_generic(t_back)
            _text.output("%s: %s" % (
                exc_instance,
                darkred(_("Your hard drive is full! Your fault!")),
            ),
                         importance=1,
                         level="error")
            os._exit(5)
        elif exc_instance.errno == errno.ENOMEM:
            print_generic(t_back)
            _text.output("%s: %s" % (
                exc_instance,
                darkred(_("No more memory dude! Your fault!")),
            ),
                         importance=1,
                         level="error")
            os._exit(5)

    _text.output(darkred(
        _("Hi. My name is Bug Reporter. "
          "I am sorry to inform you that the program crashed. "
          "Well, you know, shit happens.")),
                 importance=1,
                 level="error")
    _text.output(darkred(
        _("But there's something you could "
          "do to help me to be a better application.")),
                 importance=1,
                 level="error")
    _text.output(darkred(
        _("-- BUT, DO NOT SUBMIT THE SAME REPORT MORE THAN ONCE --")),
                 importance=1,
                 level="error")
    _text.output(darkred(
        _("Now I am showing you what happened. "
          "Don't panic, I'm here to help you.")),
                 importance=1,
                 level="error")

    entropy.tools.print_exception(tb_data=exc_tb)

    exception_data = entropy.tools.print_exception(silent=True,
                                                   tb_data=exc_tb,
                                                   all_frame_data=True)
    exception_tback_raw = const_convert_to_rawstring(t_back)

    error_fd, error_file = None, None
    try:
        error_fd, error_file = const_mkstemp(prefix="entropy.error.report.",
                                             suffix=".txt")

        with os.fdopen(error_fd, "wb") as ferror:
            ferror.write(
                const_convert_to_rawstring("\nRevision: %s\n\n" %
                                           (etpConst['entropyversion'], )))
            ferror.write(exception_tback_raw)
            ferror.write(const_convert_to_rawstring("\n\n"))
            ferror.write(const_convert_to_rawstring(''.join(exception_data)))
            ferror.write(const_convert_to_rawstring("\n"))

    except (OSError, IOError) as err:
        _text.output("%s: %s" %
                     (err,
                      darkred(
                          _("Oh well, I cannot even write to TMPDIR. "
                            "So, please copy the error and "
                            "mail [email protected]."))),
                     importance=1,
                     level="error")
        os._exit(1)
    finally:
        if error_fd is not None:
            try:
                os.close(error_fd)
            except OSError:
                pass

    _text.output("", level="error")

    ask_msg = _("Erm... Can I send the error, "
                "along with some other information\nabout your "
                "hardware to my creators so they can fix me? "
                "(Your IP will be logged)")
    rc = _text.ask_question(ask_msg)
    if rc == _("No"):
        _text.output(darkgreen(_("Ok, ok ok ok... Sorry!")), level="error")
        os._exit(2)

    _text.output(darkgreen(
        _("If you want to be contacted back "
          "(and actively supported), also answer "
          "the questions below:")),
                 level="error")

    try:
        name = readtext(_("Your Full name:"))
        email = readtext(_("Your E-Mail address:"))
        description = readtext(_("What you were doing:"))
    except EOFError:
        os._exit(2)

    try:
        from entropy.client.interfaces.qa import UGCErrorReport
        from entropy.core.settings.base import SystemSettings
        _settings = SystemSettings()
        repository_id = _settings['repositories']['default_repository']
        error = UGCErrorReport(repository_id)
    except (
            OnlineMirrorError,
            AttributeError,
            ImportError,
    ):
        error = None

    result = None
    if error is not None:
        error.prepare(exception_tback_raw, name, email,
                      '\n'.join([x for x in exception_data]), description)
        result = error.submit()

    if result:
        _text.output(darkgreen(
            _("Thank you very much. The error has been "
              "reported and hopefully, the problem will "
              "be solved as soon as possible.")),
                     level="error")
    else:
        _text.output(darkred(
            _("Ugh. Cannot send the report. "
              "Please mail the file below "
              "to [email protected].")),
                     level="error")
        _text.output("", level="error")
        _text.output("==> %s" % (error_file, ), level="error")
        _text.output("", level="error")
Exemplo n.º 7
0
    def _download_file(self,
                       url,
                       download_path,
                       digest=None,
                       resume=True,
                       package_id=None,
                       repository_id=None):
        """
        Internal method. Try to download the package file.
        """
        def do_stfu_rm(xpath):
            try:
                os.remove(xpath)
            except OSError:
                pass

        def do_get_md5sum(path):
            try:
                return entropy.tools.md5sum(path)
            except IOError:
                return None
            except OSError:
                return None

        download_path_dir = os.path.dirname(download_path)
        try:
            os.makedirs(download_path_dir, 0o755)
        except OSError as err:
            if err.errno != errno.EEXIST:
                const_debug_write(
                    __name__, "_download_file.makedirs, %s, error: %s" %
                    (download_path_dir, err))
                return -1, 0, False

        fetch_abort_function = self._meta.get('fetch_abort_function')
        existed_before = False
        if os.path.isfile(download_path) and os.path.exists(download_path):
            existed_before = True

        avail_data = self._settings['repositories']['available']
        repo_data = avail_data[self._repository_id]

        basic_user = repo_data.get('username')
        basic_pwd = repo_data.get('password')
        https_validate_cert = not repo_data.get(
            'https_validate_cert') == "false"

        fetch_intf = self._entropy._url_fetcher(
            url,
            download_path,
            resume=resume,
            abort_check_func=fetch_abort_function,
            http_basic_user=basic_user,
            http_basic_pwd=basic_pwd,
            https_validate_cert=https_validate_cert)

        if (package_id is not None) and (repository_id is not None):
            self._setup_differential_download(self._entropy._url_fetcher, url,
                                              resume, download_path,
                                              repository_id, package_id)

        data_transfer = 0
        resumed = False
        try:
            # make sure that we don't need to abort already
            # doing the check here avoids timeouts
            if fetch_abort_function != None:
                fetch_abort_function()

            fetch_checksum = fetch_intf.download()
            data_transfer = fetch_intf.get_transfer_rate()
            resumed = fetch_intf.is_resumed()
        except (KeyboardInterrupt, InterruptError):
            return -100, data_transfer, resumed

        except NameError:
            raise

        except:
            if const_debug_enabled():
                self._entropy.output("fetch_file:",
                                     importance=1,
                                     level="warning",
                                     header=red("   ## "))
                entropy.tools.print_traceback()
            if (not existed_before) or (not resume):
                do_stfu_rm(download_path)
            return -1, data_transfer, resumed

        if fetch_checksum == UrlFetcher.GENERIC_FETCH_ERROR:
            # !! not found
            # maybe we already have it?
            # this handles the case where network is unavailable
            # but file is already downloaded
            fetch_checksum = do_get_md5sum(download_path)
            if (fetch_checksum != digest) or fetch_checksum is None:
                return -3, data_transfer, resumed

        elif fetch_checksum == UrlFetcher.TIMEOUT_FETCH_ERROR:
            # maybe we already have it?
            # this handles the case where network is unavailable
            # but file is already downloaded
            fetch_checksum = do_get_md5sum(download_path)
            if (fetch_checksum != digest) or fetch_checksum is None:
                return -4, data_transfer, resumed

        if digest and (fetch_checksum != digest):
            # not properly downloaded
            if (not existed_before) or (not resume):
                do_stfu_rm(download_path)
            return -2, data_transfer, resumed

        return 0, data_transfer, resumed
Exemplo n.º 8
0
 def append_many(self, opaque_list):
     for opaque in opaque_list:
         self._store.append([opaque])
     if const_debug_enabled():
         const_debug_write(__name__, "AVC: emitting view-filled")
     self.emit("view-filled")
Exemplo n.º 9
0
 def clear(self):
     self.clear_silent()
     if const_debug_enabled():
         const_debug_write(__name__, "AVC: emitting view-cleared")
     self.emit("view-cleared")
Exemplo n.º 10
0
    def _method_getter(self,
                       func_name,
                       params,
                       cache=True,
                       cached=False,
                       require_credentials=False,
                       file_params=None,
                       timeout=None):
        """
        Given a function name and request parameters, do all the duties required
        to get a response from the Web Service. This method raises several
        exceptions, that have to be advertised on public methods as well.

        @param func_name: API function name
        @type func_name: string
        @param params: dictionary object that will be converted into a JSON
            request string
        @type params: dict
        @keyword cache: True means use on-disk cache if available?
        @type cache: bool
        @keyword cached: if True, it will only use the on-disk cached call
            result and raise WebService.CacheMiss if not found.
        @type cached: bool
        @keyword require_credentials: True means that credentials will be added
            to the request, if credentials are not available in the local
            authentication storage, WebService.AuthenticationRequired is
            raised
        @type require_credentials: bool
        @param file_params: mapping composed by file names as key and tuple
            composed by (file_name, file object) as values
        @type file_params: dict
        @param timeout: provide specific socket timeout
        @type timeout: float
        @return: the JSON response (dict format)
        @rtype: dict
        @raise WebService.UnsupportedParameters: if input parameters are invalid
        @raise WebService.RequestError: if request cannot be satisfied
        @raise WebService.MethodNotAvailable: if API method is not available
            remotely and an error occurred (error code passed as exception
            argument)
        @raise WebService.AuthenticationRequired: if require_credentials is True
            and credentials are required.
        @raise WebService.AuthenticationFailed: if credentials are not valid
        @raise WebService.MalformedResponse: if JSON response cannot be
            converted back to dict.
        @raise WebService.UnsupportedAPILevel: if client API and Web Service
            API do not match
        @raise WebService.MethodResponseError; if method execution failed
        @raise WebService.CacheMiss: if cached=True and cached object is not
            available
        """
        cache_key = self._get_cache_key(func_name, params)
        if cache or cached:
            # this does call: _setup_generic_params()
            obj = self._method_cached(func_name, params, cache_key=cache_key)
            if (obj is None) and cached:
                if const_debug_enabled():
                    const_debug_write(
                        __name__, "WebService.%s(%s) = cache miss: %s" % (
                            func_name,
                            params,
                            cache_key,
                        ))
                raise WebService.CacheMiss(
                    WebService.WEB_SERVICE_NOT_FOUND_CODE, method=func_name)
            if obj is not None:
                if const_debug_enabled():
                    const_debug_write(
                        __name__, "WebService.%s(%s) = CACHED!" % (
                            func_name,
                            params,
                        ))
                return obj
            if const_debug_enabled():
                const_debug_write(
                    __name__, "WebService.%s(%s) = NOT cached" % (
                        func_name,
                        params,
                    ))
        else:
            self._setup_generic_params(params)

        if require_credentials:
            # this can raise AuthenticationRequired
            self._setup_credentials(params)

        obj = None
        try:
            json_response, response = self._generic_post_handler(
                func_name, params, file_params, timeout)

            http_status = response.status
            if http_status not in (httplib.OK, ):
                raise WebService.MethodNotAvailable(http_status,
                                                    method=func_name)

            # try to convert the JSON response
            try:
                data = json.loads(json_response)
            except (ValueError, TypeError) as err:
                raise WebService.MalformedResponse(err, method=func_name)

            # check API
            if data.get("api_rev") != WebService.SUPPORTED_API_LEVEL:
                raise WebService.UnsupportedAPILevel(
                    data['api_rev'],
                    method=func_name,
                    message=data.get("message"))

            code = data.get("code", -1)
            if code == WebService.WEB_SERVICE_INVALID_CREDENTIALS_CODE:
                # invalid credentials, ask again login data
                raise WebService.AuthenticationFailed(
                    code, method=func_name, message=data.get("message"))
            if code != WebService.WEB_SERVICE_RESPONSE_CODE_OK:
                raise WebService.MethodResponseError(
                    code, method=func_name, message=data.get("message"))

            if "r" not in data:
                raise WebService.MalformedResponse("r not found",
                                                   method=func_name,
                                                   message=data.get("message"))
            obj = data["r"]

            if const_debug_enabled():
                const_debug_write(
                    __name__, "WebService.%s(%s) = fetched!" % (
                        func_name,
                        params,
                    ))
            return obj

        finally:
            if obj is not None:
                # store cache
                self._set_cached(cache_key, obj)
Exemplo n.º 11
0
 def append_many(self, opaque_list):
     for opaque in opaque_list:
         self._store.append([opaque])
     if const_debug_enabled():
         const_debug_write(__name__, "AVC: emitting view-filled")
     self.emit("view-filled")
Exemplo n.º 12
0
 def append(self, opaque):
     self._store.append([opaque])
     if const_debug_enabled():
         const_debug_write(__name__, "AVC: emitting view-filled")
     self.emit("view-filled")
Exemplo n.º 13
0
 def clear(self):
     self.clear_silent()
     if const_debug_enabled():
         const_debug_write(__name__, "AVC: emitting view-cleared")
     self.emit("view-cleared")
Exemplo n.º 14
0
 def _ui_redraw_callback(*args):
     if const_debug_enabled():
         const_debug_write(__name__,
                           "_ui_redraw_callback()")
     GLib.idle_add(self.emit, "redraw-request", pkg_match)
Exemplo n.º 15
0
    def get_icon(self, app, cached=False):

        pkg_match = app.get_details().pkg
        cached_icon = AppListStore._ICON_CACHE.get(pkg_match)
        if cached_icon is not None:
            return cached_icon
        if cached:
            # then return the default icon
            return self._missing_icon

        def _still_visible():
            return self.visible(pkg_match)

        icon, cache_hit = app.get_icon(
            _still_visible_cb=_still_visible,
            cached=cached)
        if const_debug_enabled():
            const_debug_write(__name__,
                              "get_icon({%s, %s}) = %s, hit: %s" % (
                    (pkg_match, app.name, icon, cache_hit,)))

        if icon is None:
            if cache_hit:
                # this means that there is no icon for package
                # and so we should not keep bugging underlying
                # layers with requests
                AppListStore._ICON_CACHE[pkg_match] = self._missing_icon
            return self._missing_icon

        icon_path = icon.local_document()
        icon_path_exists = False
        if icon_path:
            icon_path_exists = os.path.isfile(icon_path)
        if not icon_path_exists:
            return self._missing_icon

        try:
            img = Gtk.Image.new_from_file(icon_path)
        except GObject.GError:
            return self._missing_icon

        img_buf = img.get_pixbuf()
        if img_buf is None:
            # wth, invalid crap
            return self._missing_icon
        w, h = img_buf.get_width(), img_buf.get_height()
        del img_buf
        del img
        if w < 1:
            # not legit
            return self._missing_icon
        width = AppListStore.ICON_SIZE
        height = width * h / w

        try:
            pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(
                icon_path, width, height)
        except GObject.GError:
            try:
                os.remove(icon_path)
            except OSError:
                pass
            return self._missing_icon

        AppListStore._ICON_CACHE[pkg_match] = pixbuf
        return pixbuf
Exemplo n.º 16
0
    def get_icon(self, app, cached=False):

        pkg_match = app.get_details().pkg
        cached_icon = AppListStore._ICON_CACHE.get(pkg_match)
        if cached_icon is not None:
            return cached_icon
        if cached:
            # then return the default icon
            return self._missing_icon

        def _still_visible():
            return self.visible(pkg_match)

        icon, cache_hit = app.get_icon(_still_visible_cb=_still_visible,
                                       cached=cached)
        if const_debug_enabled():
            const_debug_write(
                __name__, "get_icon({%s, %s}) = %s, hit: %s" % ((
                    pkg_match,
                    app.name,
                    icon,
                    cache_hit,
                )))

        if icon is None:
            if cache_hit:
                # this means that there is no icon for package
                # and so we should not keep bugging underlying
                # layers with requests
                AppListStore._ICON_CACHE[pkg_match] = self._missing_icon
            return self._missing_icon

        icon_path = icon.local_document()
        icon_path_exists = False
        if icon_path:
            icon_path_exists = os.path.isfile(icon_path)
        if not icon_path_exists:
            return self._missing_icon

        try:
            img = Gtk.Image.new_from_file(icon_path)
        except GObject.GError:
            return self._missing_icon

        img_buf = img.get_pixbuf()
        if img_buf is None:
            # wth, invalid crap
            return self._missing_icon
        w, h = img_buf.get_width(), img_buf.get_height()
        del img_buf
        del img
        if w < 1:
            # not legit
            return self._missing_icon
        width = AppListStore.ICON_SIZE
        height = width * h / w

        try:
            pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(
                icon_path, width, height)
        except GObject.GError:
            try:
                os.remove(icon_path)
            except OSError:
                pass
            return self._missing_icon

        AppListStore._ICON_CACHE[pkg_match] = pixbuf
        return pixbuf
Exemplo n.º 17
0
 def _ui_redraw_callback(*args):
     if const_debug_enabled():
         const_debug_write(__name__, "_ui_redraw_callback()")
     GLib.idle_add(self.emit, "redraw-request", pkg_match)
Exemplo n.º 18
0
    def _method_getter(self, func_name, params, cache = True,
        cached = False, require_credentials = False, file_params = None,
        timeout = None):
        """
        Given a function name and request parameters, do all the duties required
        to get a response from the Web Service. This method raises several
        exceptions, that have to be advertised on public methods as well.

        @param func_name: API function name
        @type func_name: string
        @param params: dictionary object that will be converted into a JSON
            request string
        @type params: dict
        @keyword cache: True means use on-disk cache if available?
        @type cache: bool
        @keyword cached: if True, it will only use the on-disk cached call
            result and raise WebService.CacheMiss if not found.
        @type cached: bool
        @keyword require_credentials: True means that credentials will be added
            to the request, if credentials are not available in the local
            authentication storage, WebService.AuthenticationRequired is
            raised
        @type require_credentials: bool
        @param file_params: mapping composed by file names as key and tuple
            composed by (file_name, file object) as values
        @type file_params: dict
        @param timeout: provide specific socket timeout
        @type timeout: float
        @return: the JSON response (dict format)
        @rtype: dict
        @raise WebService.UnsupportedParameters: if input parameters are invalid
        @raise WebService.RequestError: if request cannot be satisfied
        @raise WebService.MethodNotAvailable: if API method is not available
            remotely and an error occured (error code passed as exception
            argument)
        @raise WebService.AuthenticationRequired: if require_credentials is True
            and credentials are required.
        @raise WebService.AuthenticationFailed: if credentials are not valid
        @raise WebService.MalformedResponse: if JSON response cannot be
            converted back to dict.
        @raise WebService.UnsupportedAPILevel: if client API and Web Service
            API do not match
        @raise WebService.MethodResponseError; if method execution failed
        @raise WebService.CacheMiss: if cached=True and cached object is not
            available
        """
        cache_key = self._get_cache_key(func_name, params)
        if cache or cached:
            # this does call: _setup_generic_params()
            obj = self._method_cached(func_name, params, cache_key = cache_key)
            if (obj is None) and cached:
                if const_debug_enabled():
                    const_debug_write(__name__,
                        "WebService.%s(%s) = cache miss: %s" % (
                            func_name, params, cache_key,))
                raise WebService.CacheMiss(
                    WebService.WEB_SERVICE_NOT_FOUND_CODE, method = func_name)
            if obj is not None:
                if const_debug_enabled():
                    const_debug_write(__name__,
                        "WebService.%s(%s) = CACHED!" % (
                            func_name, params,))
                return obj
            if const_debug_enabled():
                const_debug_write(__name__, "WebService.%s(%s) = NOT cached" % (
                    func_name, params,))
        else:
            self._setup_generic_params(params)

        if require_credentials:
            # this can raise AuthenticationRequired
            self._setup_credentials(params)

        obj = None
        try:
            json_response, response = self._generic_post_handler(func_name,
                params, file_params, timeout)

            http_status = response.status
            if http_status not in (httplib.OK,):
                raise WebService.MethodNotAvailable(http_status,
                    method = func_name)

            # try to convert the JSON response
            try:
                data = json.loads(json_response)
            except (ValueError, TypeError) as err:
                raise WebService.MalformedResponse(err,
                    method = func_name)

            # check API
            if data.get("api_rev") != WebService.SUPPORTED_API_LEVEL:
                raise WebService.UnsupportedAPILevel(data['api_rev'],
                    method = func_name, message = data.get("message"))

            code = data.get("code", -1)
            if code == WebService.WEB_SERVICE_INVALID_CREDENTIALS_CODE:
                # invalid credentials, ask again login data
                raise WebService.AuthenticationFailed(code,
                    method = func_name, message = data.get("message"))
            if code != WebService.WEB_SERVICE_RESPONSE_CODE_OK:
                raise WebService.MethodResponseError(code,
                    method = func_name, message = data.get("message"))

            if "r" not in data:
                raise WebService.MalformedResponse("r not found",
                    method = func_name, message = data.get("message"))
            obj = data["r"]

            if const_debug_enabled():
                const_debug_write(__name__, "WebService.%s(%s) = fetched!" % (
                    func_name, params,))
            return obj

        finally:
            if obj is not None:
                # store cache
                self._set_cached(cache_key, obj)
Exemplo n.º 19
0
 def append(self, opaque):
     self._store.append([opaque])
     if const_debug_enabled():
         const_debug_write(__name__, "AVC: emitting view-filled")
     self.emit("view-filled")
Exemplo n.º 20
0
    def __cacher(self, run_until_empty=False, sync=False, _loop=False):
        """
        This is where the actual asynchronous copy takes
        place. __cacher runs on a different threads and
        all the operations done by this are atomic and
        thread-safe. It just loops over and over until
        __alive becomes False.
        """
        try:
            if self.__inside_with_stmt != 0:
                return
        except AttributeError:
            # interpreter shutdown
            pass

        # make sure our set delay is respected
        try:
            self.__cache_writer.set_delay(EntropyCacher.WRITEBACK_TIMEOUT)
        except AttributeError:
            # can be None
            pass

        # sleep if there's nothing to do
        if _loop:
            try:
                # CANBLOCK
                self.__worker_sem.acquire()
                # we just consumed one acquire()
                # that was dedicated to actual data,
                # put it back
                self.__worker_sem.release()
            except AttributeError:
                pass

        def _commit_data(_massive_data):
            for (key, cache_dir), data in _massive_data:
                d_o = entropy.dump.dumpobj
                if d_o is not None:
                    d_o(key, data, dump_dir=cache_dir)

        while self.__alive or run_until_empty:

            if const_debug_enabled():
                const_debug_write(
                    __name__,
                    "EntropyCacher.__cacher: loop: %s, alive: %s, empty: %s" % (_loop, self.__alive, run_until_empty),
                )

            with self.__enter_context_lock:
                massive_data = []
                try:
                    massive_data_count = EntropyCacher._OBJS_WRITTEN_AT_ONCE
                except AttributeError:  # interpreter shutdown
                    break
                while massive_data_count > 0:

                    if _loop:
                        # extracted an item from worker_sem
                        # call down() on the semaphore without caring
                        # can't sleep here because we're in a critical region
                        # holding __enter_context_lock
                        self.__worker_sem.acquire(False)

                    massive_data_count -= 1
                    try:
                        data = self.__cache_buffer.pop()
                    except (ValueError, TypeError):
                        # TypeError is when objects are being destroyed
                        break  # stack empty
                    massive_data.append(data)

                if not massive_data:
                    break

                task = ParallelTask(_commit_data, massive_data)
                task.name = "EntropyCacherCommitter"
                task.daemon = not sync
                task.start()
                if sync:
                    task.join()

                if const_debug_enabled():
                    const_debug_write(
                        __name__, "EntropyCacher.__cacher [%s], writing %s objs" % (task, len(massive_data))
                    )

                if EntropyCacher.STASHING_CACHE:
                    for (key, cache_dir), data in massive_data:
                        try:
                            del self.__stashing_cache[(key, cache_dir)]
                        except (AttributeError, KeyError):
                            continue
                del massive_data[:]
                del massive_data
Exemplo n.º 21
0
    def __cacher(self, run_until_empty=False, sync=False, _loop=False):
        """
        This is where the actual asynchronous copy takes
        place. __cacher runs on a different threads and
        all the operations done by this are atomic and
        thread-safe. It just loops over and over until
        __alive becomes False.
        """
        try:
            if self.__inside_with_stmt != 0:
                return
        except AttributeError:
            # interpreter shutdown
            pass

        # make sure our set delay is respected
        try:
            self.__cache_writer.set_delay(EntropyCacher.WRITEBACK_TIMEOUT)
        except AttributeError:
            # can be None
            pass

        # sleep if there's nothing to do
        if _loop:
            try:
                # CANBLOCK
                self.__worker_sem.acquire()
                # we just consumed one acquire()
                # that was dedicated to actual data,
                # put it back
                self.__worker_sem.release()
            except AttributeError:
                pass

        def _commit_data(_massive_data):
            for (key, cache_dir), data in _massive_data:
                d_o = entropy.dump.dumpobj
                if d_o is not None:
                    d_o(key, data, dump_dir=cache_dir)

        while self.__alive or run_until_empty:

            if const_debug_enabled():
                const_debug_write(
                    __name__,
                    "EntropyCacher.__cacher: loop: %s, alive: %s, empty: %s" %
                    (
                        _loop,
                        self.__alive,
                        run_until_empty,
                    ))

            with self.__enter_context_lock:
                massive_data = []
                try:
                    massive_data_count = EntropyCacher._OBJS_WRITTEN_AT_ONCE
                except AttributeError:  # interpreter shutdown
                    break
                while massive_data_count > 0:

                    if _loop:
                        # extracted an item from worker_sem
                        # call down() on the semaphore without caring
                        # can't sleep here because we're in a critical region
                        # holding __enter_context_lock
                        self.__worker_sem.acquire(False)

                    massive_data_count -= 1
                    try:
                        data = self.__cache_buffer.pop()
                    except (
                            ValueError,
                            TypeError,
                    ):
                        # TypeError is when objects are being destroyed
                        break  # stack empty
                    massive_data.append(data)

                if not massive_data:
                    break

                task = ParallelTask(_commit_data, massive_data)
                task.name = "EntropyCacherCommitter"
                task.daemon = not sync
                task.start()
                if sync:
                    task.join()

                if const_debug_enabled():
                    const_debug_write(
                        __name__,
                        "EntropyCacher.__cacher [%s], writing %s objs" % (
                            task,
                            len(massive_data),
                        ))

                if EntropyCacher.STASHING_CACHE:
                    for (key, cache_dir), data in massive_data:
                        try:
                            del self.__stashing_cache[(key, cache_dir)]
                        except (
                                AttributeError,
                                KeyError,
                        ):
                            continue
                del massive_data[:]
                del massive_data
Exemplo n.º 22
0
    def _download_file(self, url, download_path, digest = None,
                       resume = True, package_id = None,
                       repository_id = None):
        """
        Internal method. Try to download the package file.
        """

        def do_stfu_rm(xpath):
            try:
                os.remove(xpath)
            except OSError:
                pass

        def do_get_md5sum(path):
            try:
                return entropy.tools.md5sum(path)
            except IOError:
                return None
            except OSError:
                return None

        download_path_dir = os.path.dirname(download_path)
        try:
            os.makedirs(download_path_dir, 0o755)
        except OSError as err:
            if err.errno != errno.EEXIST:
                const_debug_write(
                    __name__,
                    "_download_file.makedirs, %s, error: %s" % (
                        download_path_dir, err))
                return -1, 0, False

        fetch_abort_function = self._meta.get('fetch_abort_function')
        existed_before = False
        if os.path.isfile(download_path) and os.path.exists(download_path):
            existed_before = True

        fetch_intf = self._entropy._url_fetcher(
            url, download_path, resume = resume,
            abort_check_func = fetch_abort_function)

        if (package_id is not None) and (repository_id is not None):
            self._setup_differential_download(
                self._entropy._url_fetcher, url,
                resume, download_path, repository_id, package_id)

        data_transfer = 0
        resumed = False
        try:
            # make sure that we don't need to abort already
            # doing the check here avoids timeouts
            if fetch_abort_function != None:
                fetch_abort_function()

            fetch_checksum = fetch_intf.download()
            data_transfer = fetch_intf.get_transfer_rate()
            resumed = fetch_intf.is_resumed()
        except KeyboardInterrupt:
            return -100, data_transfer, resumed

        except NameError:
            raise

        except:
            if const_debug_enabled():
                self._entropy.output(
                    "fetch_file:",
                    importance = 1,
                    level = "warning",
                    header = red("   ## ")
                )
                entropy.tools.print_traceback()
            if (not existed_before) or (not resume):
                do_stfu_rm(download_path)
            return -1, data_transfer, resumed

        if fetch_checksum == UrlFetcher.GENERIC_FETCH_ERROR:
            # !! not found
            # maybe we already have it?
            # this handles the case where network is unavailable
            # but file is already downloaded
            fetch_checksum = do_get_md5sum(download_path)
            if (fetch_checksum != digest) or fetch_checksum is None:
                return -3, data_transfer, resumed

        elif fetch_checksum == UrlFetcher.TIMEOUT_FETCH_ERROR:
            # maybe we already have it?
            # this handles the case where network is unavailable
            # but file is already downloaded
            fetch_checksum = do_get_md5sum(download_path)
            if (fetch_checksum != digest) or fetch_checksum is None:
                return -4, data_transfer, resumed

        if digest and (fetch_checksum != digest):
            # not properly downloaded
            if (not existed_before) or (not resume):
                do_stfu_rm(download_path)
            return -2, data_transfer, resumed

        return 0, data_transfer, resumed