예제 #1
0
def goglib_get_banner(banner_path, *args):

    banner_height = 240
    game_name = os.path.basename(banner_path).split('.jpg')[0]

    try:
        banner_url = find_image(game_name)

        if banner_url.startswith('http'):
            banner_req = urllib_request(banner_url)
        else:
            banner_req = urllib_request('https:' + banner_url)

        banner_data = urllib_urlopen(banner_req).read()
        banner_file = open(banner_path, 'wb')
        banner_file.write(banner_data)
        banner_file.close()

        pic_src = Image.open(banner_path)
        scale_lvl = banner_height / float(pic_src.size[1])
        scaled_width = int(float(pic_src.size[0]) * scale_lvl)
        pic = pic_src.resize((scaled_width, banner_height),
                             PIL.Image.ANTIALIAS)
        if len(args) > 0:
            pic = pic.convert('L')
        pic.save(banner_path)

    except urllib_urlerror as e:
        print(e.reason)
    except urllib_httperror as e:
        print(e.code)
        print(e.read())
    except:
        goglib_recreate_banner.goglib_recreate_banner(game_name, banner_path)
예제 #2
0
def get_banner(game_name, url, banner_path, lib):

    banner_req = urllib_request(url)

    try:

        if not os.path.exists(banner_path):
            os.makedirs(banner_path)

        banner_data = urllib_urlopen(banner_req).read()
        banner_file = open(banner_path + '/' + game_name + '.jpg', 'wb')
        banner_file.write(banner_data)
        banner_file.close()

        pic_src = Image.open(banner_path + '/' + game_name + '.jpg')
        pic = pic_src.resize((518, 240), PIL.Image.ANTIALIAS)
        pic.save(banner_path + '/' + game_name + '.jpg')

        if lib == 'goglib':

            if not os.path.exists(banner_path + '/unavailable/'):
                os.makedirs(banner_path + '/unavailable/')

            new_pic = Image.open(banner_path + '/' + game_name + '.jpg')
            pic_grey = new_pic.convert('L')
            pic_grey.save(banner_path + '/unavailable/' + game_name + '.jpg')

    except urllib_urlerror as e:
        print(e.reason)
    except urllib_httperror as e:
        print(e.code)
        print(e.read())
예제 #3
0
def goglib_get_icon(game_name, icon_url, path):

    icon_req = urllib_request(icon_url)
    icon_data = urllib_urlopen(icon_req).read()
    icon_file = open(path + '/' + game_name + '.jpg', 'wb')
    icon_file.write(icon_data)
    icon_file.close()
def get_weather():
    """Returns current temperature"""
    def signal_timeout(signum, frame):
        """Handler for timeout signal"""
        raise TimeoutException("Time out.")

    with open(sys.path[0] + "/location", "r") as f:
        URL = f.readlines()[0].strip()
    try:
        from urllib2 import Request as urllib_request
        from urllib2 import urlopen as urllib_urlopen
        from urllib2 import URLError as urllib_urlerror
        from urllib2 import HTTPError as urllib_httperror
    except:
        from urllib.request import Request as urllib_request
        from urllib.request import urlopen as urllib_urlopen
        from urllib.request import URLError as urllib_urlerror
        from urllib.request import HTTPError as urllib_httperror

    req = urllib_request(URL)

    signal.signal(signal.SIGALRM, signal_timeout)
    signal.alarm(10)  # Maximum execution time

    try:
        page = urllib_urlopen(req)
        page_content = page.read()
        soup = BeautifulSoup(page_content, "lxml")

        temperature = soup.find_all("span",
                                    attrs={"class":
                                           "summary"})[0].get_text().split()[0]
        icon_name = ""
        for tag in soup.find_all("span", attrs={"class": "currently"}):
            icon_name = tag.find_all("img")[0].get("alt").split()[0]

        output = temperature + "," + icon_name + ".svg"
        if sys.version_info.major == 2:
            output = output.encode("utf-8")
        print(output)
        return temperature, icon_name

    except TimeoutException as e:
        print(e)
        return 1
    except urllib_urlerror as e:
        print("Error")
        #print(e.reason)
        return e.reason
    except urllib_httperror as e:
        print("Error")
        #print(e.code)
        #print(e.read())
        return e.code, e.read()
    except:
        print("Error")
        return 1
예제 #5
0
    def get_scripts(self):

        if self.lib == 'goglib':
            url = 'https://github.com/yancharkin/games_nebula_goglib_scripts/archive/master.zip'
        elif self.lib == 'mylib':
            url = 'https://github.com/yancharkin/games_nebula_mylib_scripts/archive/master.zip'

        req = urllib_request(url)

        try:

            archive_data = urllib_urlopen(req).read()
            archive_path = tmp + '/' + self.lib + '_scripts.zip'
            archive_file = open(archive_path, 'wb')
            archive_file.write(archive_data)
            archive_file.close()

            lib_has_new_scripts = self.unpack(archive_path)
            return lib_has_new_scripts

        except urllib_urlerror as e:

            message_dialog = Gtk.MessageDialog(
                None,
                0,
                Gtk.MessageType.ERROR,
                Gtk.ButtonsType.OK,
                _("Error"),
            )
            message_dialog.format_secondary_text(str(e.reason))

            response = message_dialog.run()
            message_dialog.destroy()

            sys.exit()

        except urllib_httperror as e:

            message_dialog = Gtk.MessageDialog(
                None,
                0,
                Gtk.MessageType.ERROR,
                Gtk.ButtonsType.OK,
                _("Error"),
            )
            message_dialog.format_secondary_text(
                str(e.code) + ' ' + str(e.read()))

            response = message_dialog.run()
            message_dialog.destroy()

            sys.exit()
예제 #6
0
def goglib_get_banner(banner_path, *args):

    game_name = os.path.basename(banner_path).split('.jpg')[0]

    req = urllib_request('https://www.gog.com/game/' + game_name)

    try:
        game_page = urllib_urlopen(req)
        game_page_content = game_page.read()
        soup = BeautifulSoup(game_page_content, 'lxml')
        raw_data = soup.findAll(attrs={'name': 'og:image'})
        banner_url = raw_data[0]['content']

        if banner_url.startswith('http'):
            banner_req = urllib_request(banner_url)
        else:
            banner_req = urllib_request('https:' + banner_url)

        banner_data = urllib_urlopen(banner_req).read()
        banner_file = open(banner_path, 'wb')
        banner_file.write(banner_data)
        banner_file.close()

        pic_src = Image.open(banner_path)
        scale_lvl = 240 / float(pic_src.size[1])
        scaled_width = int(float(pic_src.size[0]) * scale_lvl)
        pic = pic_src.resize((scaled_width, 240), PIL.Image.ANTIALIAS)
        if len(args) > 0:
            pic = pic.convert('L')
        pic.save(banner_path)

    except urllib_urlerror as e:
        print(e.reason)
    except urllib_httperror as e:
        print(e.code)
        print(e.read())
예제 #7
0
 def send(self, request, stream=False, timeout=None, verify=False, cert=None, proxies=None):
     if request.method != "GET":
         raise HTTPMethodUnavailableError("FTP requests do not support method '%s'" % request.method, request.method)
     try:
         real_resp = urllib_urlopen(request.url, timeout=timeout)
         # we're going to fake a requests.Response with this
         resp = requests.Response()
         resp.status_code = 200
         resp.url = request.url
         resp.headers = {}
         resp._content = real_resp.read()
         resp.raw = _dummy
         return resp
     except urllib_URLError:
         raise FTPError(request.url)
예제 #8
0
 def send(self, request, stream=False, timeout=None, verify=False, cert=None, proxies=None):
     if request.method != 'GET':
         raise HTTPMethodUnavailableError("FTP requests do not support method '%s'" %
                                          request.method, request.method)
     try:
         real_resp = urllib_urlopen(request.url, timeout=timeout)
         # we're going to fake a requests.Response with this
         resp = requests.Response()
         resp.status_code = 200
         resp.url = request.url
         resp.headers = {}
         resp._content = real_resp.read()
         resp.raw = _dummy
         return resp
     except urllib_URLError:
         raise FTPError(request.url)
예제 #9
0
def find_image(query):
    global use_fuzzy
    product_json = json.loads(
        urllib_urlopen(SEARCH_API +
                       query.replace('_', ' ')).read().decode('utf-8'))
    if use_fuzzy:
        best_match = None
        closest_ratio = 0
        for product in product_json['products']:
            ratio = fuzz.token_set_ratio(query, product['slug'])
            if ratio > closest_ratio:
                closest_ratio = ratio
                best_match = product
        return 'https:' + best_match['image'] + '.jpg'
    else:
        for product in product_json['products']:
            if product['slug'] == query:
                return 'https:' + product['image'] + '.jpg'
예제 #10
0
 def request(self, method, url, **kwargs):
     if url.startswith('ftp://'):
         if method.lower() != 'get':
             raise HTTPMethodUnavailableError(
                 "non-HTTP(S) requests do not support method '%s'" %
                 method, method)
         try:
             real_resp = urllib_urlopen(url, timeout=self.timeout)
             # we're going to fake a requests.Response with this
             resp = requests.Response()
             resp.status_code = 200
             resp.url = url
             resp.headers = {}
             resp._content = real_resp.read()
             return resp
         except urllib_URLError:
             raise FTPError(url)
     else:
         return super(FTPSession, self).request(method, url, **kwargs)
예제 #11
0
def goglib_get_banner(banner_path, unavailable_path, game_id, *args):

    banner_height = 240
    game_name = os.path.basename(banner_path).split('.jpg')[0]
    print("Getting picture for: '" + game_name + "'")

    try:
        token_path = os.getenv('HOME') + '/.config/lgogdownloader/galaxy_tokens.json'
        token = Token.from_file(token_path)
        if token.expired():
            token.refresh()
            token.save(token_path)
        api = GogApi(token)

        prod = api.product(game_id)
        prod.update_galaxy(expand=True)
        banner_url = 'https:' + ''.join(prod.image_logo.split('_glx_logo'))

        banner_req = urllib_request(banner_url)
        banner_data = urllib_urlopen(banner_req).read()
        banner_file = open(banner_path, 'wb')
        banner_file.write(banner_data)
        banner_file.close()

        pic_src = Image.open(banner_path)
        scale_lvl = banner_height/float(pic_src.size[1])
        scaled_width = int(float(pic_src.size[0])*scale_lvl)
        pic = pic_src.resize((scaled_width, banner_height), PIL.Image.ANTIALIAS)
        pic.save(banner_path)
        pic = pic.convert('L')
        pic.save(unavailable_path)

    except urllib_urlerror as e:
        print(e.reason)
    except urllib_httperror as e:
        print(e.code)
        print(e.read())
    except:
        goglib_recreate_banner.goglib_recreate_banner(game_name, banner_path)
예제 #12
0
파일: __init__.py 프로젝트: wcq062821/Sigil
def getEncodingInfo(response=None, text='', log=None, url=None):
    """Find all encoding related information in given `text`.

    Information in headers of supplied HTTPResponse, possible XML
    declaration and X/HTML ``<meta>`` elements are used.

    :param response:
        HTTP response object, e.g. via ``urllib.urlopen('url')``
    :param text:
        a byte string to guess encoding for. XML prolog with
        encoding pseudo attribute or HTML meta element will be used to detect
        the encoding
    :param url:
        When given fetches document at `url` and all needed information.
        No `reponse` or `text` parameters are needed in this case.
    :param log:
        an optional logging logger to which messages may go, if
        no log given all log messages are available from resulting
        ``EncodingInfo``

    :returns:
        instance of :class:`EncodingInfo`.

    How the resulting encoding is retrieved:

    XML
        RFC 3023 states if media type given in the Content-Type HTTP header is
        application/xml, application/xml-dtd,
        application/xml-external-parsed-entity, or any one of the subtypes of
        application/xml such as application/atom+xml or application/rss+xml
        etc then the character encoding is determined in this order:

        1. the encoding given in the charset parameter of the Content-Type HTTP
        header, or
        2. the encoding given in the encoding attribute of the XML declaration
        within the document, or
        3. utf-8.

        Mismatch possibilities:
            - HTTP + XMLdecla
            - HTTP + HTMLmeta

            application/xhtml+xml ?
                XMLdecla + HTMLmeta


        If the media type given in the Content-Type HTTP header is text/xml,
        text/xml-external-parsed-entity, or a subtype like text/Anything+xml,
        the encoding attribute of the XML declaration is ignored completely
        and the character encoding is determined in the order:
        1. the encoding given in the charset parameter of the Content-Type HTTP
        header, or
        2. ascii.

        No mismatch possible.


        If no media type is given the XML encoding pseuso attribute is used
        if present.

        No mismatch possible.

    HTML
        For HTML served as text/html:
            http://www.w3.org/TR/REC-html40/charset.html#h-5.2.2

        1. An HTTP "charset" parameter in a "Content-Type" field.
           (maybe defaults to ISO-8859-1, but should not assume this)
        2. A META declaration with "http-equiv" set to "Content-Type" and a
           value set for "charset".
        3. The charset attribute set on an element that designates an external
           resource. (NOT IMPLEMENTED HERE YET)

        Mismatch possibilities:
            - HTTP + HTMLmeta

    TEXT
        For most text/* types the encoding will be reported as iso-8859-1.
        Exceptions are XML formats send as text/* mime type (see above) and
        text/css which has a default encoding of UTF-8.
    """
    if url:
        # may cause IOError which is raised
        response = urllib_urlopen(url)

    if text is None:
        # read text from response only if not explicitly given
        try:
            text = response.read()
        except IOError:
            pass

    if text is None:
        # text must be a string (not None)
        text = ''

    encinfo = EncodingInfo()

    logstream = io_StringIO()
    if not log:
        log = buildlog(stream=logstream, format='%(message)s')

    # HTTP
    if response:
        encinfo.http_media_type, encinfo.http_encoding = getHTTPInfo(
            response, log)
        texttype = _getTextTypeByMediaType(encinfo.http_media_type, log)
    else:
        # check if maybe XML or (TODO:) HTML
        texttype = _getTextType(text, log)

    # XML only served as application/xml ! #(also XHTML served as text/html)
    if texttype == _XML_APPLICATION_TYPE:  # or texttype == _XML_TEXT_TYPE:
        try:
            encinfo.xml_encoding = detectXMLEncoding(text, log)
        except (AttributeError, ValueError):
            encinfo.xml_encoding = None

    # XML (also XHTML served as text/html)
    if texttype == _HTML_TEXT_TYPE:
        try:
            encinfo.xml_encoding = detectXMLEncoding(text,
                                                     log,
                                                     includeDefault=False)
        except (AttributeError, ValueError):
            encinfo.xml_encoding = None

    # HTML
    if texttype == _HTML_TEXT_TYPE or texttype == _TEXT_TYPE:
        encinfo.meta_media_type, encinfo.meta_encoding = getMetaInfo(text, log)

    # guess
    # 1. HTTP charset?
    encinfo.encoding = encinfo.http_encoding
    encinfo.mismatch = False

    # 2. media_type?
    #   XML application/...
    if texttype == _XML_APPLICATION_TYPE:
        if not encinfo.encoding:
            encinfo.encoding = encinfo.xml_encoding
            # xml_encoding has default of utf-8

    #   text/html
    elif texttype == _HTML_TEXT_TYPE:
        if not encinfo.encoding:
            encinfo.encoding = encinfo.meta_encoding
        if not encinfo.encoding:
            encinfo.encoding = encodingByMediaType(encinfo.http_media_type)
        if not encinfo.encoding:
            encinfo.encoding = tryEncodings(text)

    #   text/... + xml or text/*
    elif texttype == _XML_TEXT_TYPE or texttype == _TEXT_TYPE:
        if not encinfo.encoding:
            encinfo.encoding = encodingByMediaType(encinfo.http_media_type)

    elif texttype == _TEXT_UTF8:
        if not encinfo.encoding:
            encinfo.encoding = encodingByMediaType(encinfo.http_media_type)

    # possible mismatches, checks if present at all and then if equal
    # HTTP + XML
    if encinfo.http_encoding and encinfo.xml_encoding and\
       encinfo.http_encoding != encinfo.xml_encoding:
        encinfo.mismatch = True
        log.warning('"%s" (HTTP) != "%s" (XML) encoding mismatch' %
                    (encinfo.http_encoding, encinfo.xml_encoding))
    # HTTP + Meta
    if encinfo.http_encoding and encinfo.meta_encoding and\
            encinfo.http_encoding != encinfo.meta_encoding:
        encinfo.mismatch = True
        log.warning('"%s" (HTTP) != "%s" (HTML <meta>) encoding mismatch' %
                    (encinfo.http_encoding, encinfo.meta_encoding))
    # XML + Meta
    if encinfo.xml_encoding and encinfo.meta_encoding and\
            encinfo.xml_encoding != encinfo.meta_encoding:
        encinfo.mismatch = True
        log.warning('"%s" (XML) != "%s" (HTML <meta>) encoding mismatch' %
                    (encinfo.xml_encoding, encinfo.meta_encoding))

    log.info('Encoding (probably): %s (Mismatch: %s)', encinfo.encoding,
             encinfo.mismatch)

    encinfo.logtext = logstream.getvalue()
    return encinfo
예제 #13
0
def getEncodingInfo(response=None, text='', log=None, url=None):
    """Find all encoding related information in given `text`.

    Information in headers of supplied HTTPResponse, possible XML
    declaration and X/HTML ``<meta>`` elements are used.

    :param response:
        HTTP response object, e.g. via ``urllib.urlopen('url')``
    :param text:
        a byte string to guess encoding for. XML prolog with
        encoding pseudo attribute or HTML meta element will be used to detect
        the encoding
    :param url:
        When given fetches document at `url` and all needed information.
        No `reponse` or `text` parameters are needed in this case.
    :param log:
        an optional logging logger to which messages may go, if
        no log given all log messages are available from resulting
        ``EncodingInfo``

    :returns:
        instance of :class:`EncodingInfo`.

    How the resulting encoding is retrieved:

    XML
        RFC 3023 states if media type given in the Content-Type HTTP header is
        application/xml, application/xml-dtd,
        application/xml-external-parsed-entity, or any one of the subtypes of
        application/xml such as application/atom+xml or application/rss+xml
        etc then the character encoding is determined in this order:

        1. the encoding given in the charset parameter of the Content-Type HTTP
        header, or
        2. the encoding given in the encoding attribute of the XML declaration
        within the document, or
        3. utf-8.

        Mismatch possibilities:
            - HTTP + XMLdecla
            - HTTP + HTMLmeta

            application/xhtml+xml ?
                XMLdecla + HTMLmeta


        If the media type given in the Content-Type HTTP header is text/xml,
        text/xml-external-parsed-entity, or a subtype like text/Anything+xml,
        the encoding attribute of the XML declaration is ignored completely
        and the character encoding is determined in the order:
        1. the encoding given in the charset parameter of the Content-Type HTTP
        header, or
        2. ascii.

        No mismatch possible.


        If no media type is given the XML encoding pseuso attribute is used
        if present.

        No mismatch possible.

    HTML
        For HTML served as text/html:
            http://www.w3.org/TR/REC-html40/charset.html#h-5.2.2

        1. An HTTP "charset" parameter in a "Content-Type" field.
           (maybe defaults to ISO-8859-1, but should not assume this)
        2. A META declaration with "http-equiv" set to "Content-Type" and a
           value set for "charset".
        3. The charset attribute set on an element that designates an external
           resource. (NOT IMPLEMENTED HERE YET)

        Mismatch possibilities:
            - HTTP + HTMLmeta

    TEXT
        For most text/* types the encoding will be reported as iso-8859-1.
        Exceptions are XML formats send as text/* mime type (see above) and
        text/css which has a default encoding of UTF-8.
    """
    if url:
        # may cause IOError which is raised
        response = urllib_urlopen(url)

    if text is None:
        # read text from response only if not explicitly given
        try:
            text = response.read()
        except IOError:
            pass

    if text is None:
        # text must be a string (not None)
        text = ''

    encinfo = EncodingInfo()

    logstream = io_StringIO()
    if not log:
        log = buildlog(stream=logstream, format='%(message)s')

    # HTTP
    if response:
        encinfo.http_media_type, encinfo.http_encoding = getHTTPInfo(
            response, log)
        texttype = _getTextTypeByMediaType(encinfo.http_media_type, log)
    else:
        # check if maybe XML or (TODO:) HTML
        texttype = _getTextType(text, log)

    # XML only served as application/xml ! #(also XHTML served as text/html)
    if texttype == _XML_APPLICATION_TYPE:  # or texttype == _XML_TEXT_TYPE:
        try:
            encinfo.xml_encoding = detectXMLEncoding(text, log)
        except (AttributeError, ValueError):
            encinfo.xml_encoding = None

    # XML (also XHTML served as text/html)
    if texttype == _HTML_TEXT_TYPE:
        try:
            encinfo.xml_encoding = detectXMLEncoding(
                text, log, includeDefault=False)
        except (AttributeError, ValueError):
            encinfo.xml_encoding = None

    # HTML
    if texttype == _HTML_TEXT_TYPE or texttype == _TEXT_TYPE:
        encinfo.meta_media_type, encinfo.meta_encoding = getMetaInfo(text, log)

    # guess
    # 1. HTTP charset?
    encinfo.encoding = encinfo.http_encoding
    encinfo.mismatch = False

    # 2. media_type?
    #   XML application/...
    if texttype == _XML_APPLICATION_TYPE:
        if not encinfo.encoding:
            encinfo.encoding = encinfo.xml_encoding
            # xml_encoding has default of utf-8

    #   text/html
    elif texttype == _HTML_TEXT_TYPE:
        if not encinfo.encoding:
            encinfo.encoding = encinfo.meta_encoding
        if not encinfo.encoding:
            encinfo.encoding = encodingByMediaType(encinfo.http_media_type)
        if not encinfo.encoding:
            encinfo.encoding = tryEncodings(text)

    #   text/... + xml or text/*
    elif texttype == _XML_TEXT_TYPE or texttype == _TEXT_TYPE:
        if not encinfo.encoding:
            encinfo.encoding = encodingByMediaType(encinfo.http_media_type)

    elif texttype == _TEXT_UTF8:
        if not encinfo.encoding:
            encinfo.encoding = encodingByMediaType(encinfo.http_media_type)

    # possible mismatches, checks if present at all and then if equal
    # HTTP + XML
    if encinfo.http_encoding and encinfo.xml_encoding and\
       encinfo.http_encoding != encinfo.xml_encoding:
        encinfo.mismatch = True
        log.warning('"%s" (HTTP) != "%s" (XML) encoding mismatch' %
                    (encinfo.http_encoding, encinfo.xml_encoding))
    # HTTP + Meta
    if encinfo.http_encoding and encinfo.meta_encoding and\
            encinfo.http_encoding != encinfo.meta_encoding:
        encinfo.mismatch = True
        log.warning('"%s" (HTTP) != "%s" (HTML <meta>) encoding mismatch' %
                    (encinfo.http_encoding, encinfo.meta_encoding))
    # XML + Meta
    if encinfo.xml_encoding and encinfo.meta_encoding and\
            encinfo.xml_encoding != encinfo.meta_encoding:
        encinfo.mismatch = True
        log.warning('"%s" (XML) != "%s" (HTML <meta>) encoding mismatch' %
                    (encinfo.xml_encoding, encinfo.meta_encoding))

    log.info('Encoding (probably): %s (Mismatch: %s)', encinfo.encoding,
             encinfo.mismatch)

    encinfo.logtext = logstream.getvalue()
    return encinfo