Example #1
0
	def __init__(self, locations):
		TRANSLATORS_FILE = os.path.join(locations['share'], 'TRANSLATORS') # remember to encode this file in UTF-8
		IMAGES_DIR = locations['images']
		dialog = gtk.AboutDialog()
		dialog.set_name(version.pname)
		dialog.set_version(version.pversion)
		dialog.set_copyright("Copyright © 2005-2008 Vasco Nunes. Piotr Ożarowski")
		dialog.set_website(version.pwebsite)
		dialog.set_authors([
			_("Main Authors") + ':',
			version.pauthor.replace(', ', '\n') + "\n",
			_("Programmers") + ':',
			'Jessica Katharina Parth <*****@*****.**>',
			'Michael Jahn <*****@*****.**>\n',
			_('Contributors:'), # FIXME: remove ":"
			'Christian Sagmueller <*****@*****.**>\n' \
			'Arjen Schwarz <*****@*****.**>'
		])
		dialog.set_artists([_("Logo, icon and general artwork " + \
			"by Peek <*****@*****.**>." + \
			"\nPlease visit http://www.peekmambo.com/\n"),
			'seen / unseen icons by dragonskulle <*****@*****.**>'
		])
		data = None
		if os.path.isfile(TRANSLATORS_FILE):
			data = open(TRANSLATORS_FILE).read()
		elif os.path.isfile(TRANSLATORS_FILE+'.gz'):
			from gutils import decompress
			data = decompress(open(TRANSLATORS_FILE + '.gz').read())
		elif os.name == 'posix':
			if os.path.isfile('/usr/share/doc/griffith/TRANSLATORS'):
				data = open('/usr/share/doc/griffith/TRANSLATORS').read()
			elif os.path.isfile('/usr/share/doc/griffith/TRANSLATORS.gz'):
				from gutils import decompress
				data = decompress(open('/usr/share/doc/griffith/TRANSLATORS.gz').read())
		translator_credits = ''
		if data:
			for line in data.split('\n'):
				if line.startswith('* '):
					lang = line[2:]
					if _(lang) != lang:
						line = "* %s:" % _(lang)
				translator_credits += "%s\n" % line
		else:
			translator_credits = _("See TRANSLATORS file")
		dialog.set_translator_credits(translator_credits)
		logo_file = os.path.abspath(os.path.join(IMAGES_DIR, 'griffith.png'))
		logo = gtk.gdk.pixbuf_new_from_file(logo_file)
		dialog.set_logo(logo)
		if os.path.isfile('/usr/share/common-licenses/GPL-2'):
			dialog.set_license(open('/usr/share/common-licenses/GPL-2').read())
		else:
			dialog.set_license(_("This program is released under the GNU" + \
				"General Public License.\n" + \
				"Please visit http://www.gnu.org/copyleft/gpl.html for details."))
		dialog.set_comments(version.pdescription)
		dialog.run()
		dialog.destroy()
Example #2
0
 def open_search(self,parent_window):
     self.titles = [""]
     self.ids = [""]
     if self.url.find('%s') > 0:
         self.url = self.url % self.title
         self.url = string.replace(self.url, ' ', '%20')
     else:
         self.url = string.replace(self.url+self.title, ' ', '%20')
     try:
         url = self.url.encode(self.encode)
     except UnicodeEncodeError:
         url = self.url.encode('utf-8')
     self.progress.set_data(parent_window, _("Searching"), _("Wait a moment"), False)
     retriever = Retriever(url, parent_window, self.progress)
     retriever.start()
     while retriever.isAlive():
         self.progress.pulse()
         if self.progress.status:
             retriever.join()
         while gtk.events_pending():
             gtk.main_iteration()
     try:
         if retriever.html:
             ifile = file(retriever.html[0], 'rb')
             self.page = ifile.read()
             # check for gzip compressed pages before decoding to unicode
             if len(self.page) > 2 and self.page[0:2] == '\037\213':
                 self.page = gutils.decompress(self.page)
             self.page = self.page.decode(self.encode, 'replace')
         else:
             return False
     except IOError:
         pass
     urlcleanup()
     return True
Example #3
0
 def open_page(self, parent_window=None, url=None):
     if url is None:
         url_to_fetch = self.url
     else:
         url_to_fetch = url
     if parent_window is not None:
         self.parent_window = parent_window
     self.progress.set_data(parent_window, _("Fetching data"), _("Wait a moment"), False)
     retriever = Retriever(url_to_fetch, self.parent_window, self.progress)
     retriever.start()
     while retriever.isAlive():
         self.progress.pulse()
         if self.progress.status:
             retriever.join()
         while gtk.events_pending():
             gtk.main_iteration()
     data = None
     try:
         if retriever.html:
             ifile = file(retriever.html[0], "rb")
             data = ifile.read()
             # check for gzip compressed pages before decoding to unicode
             if len(data) > 2 and data[0:2] == "\037\213":
                 data = gutils.decompress(data)
             data = data.decode(self.encode)
     except IOError:
         pass
     if url is None:
         self.page = data
     urlcleanup()
     return data
Example #4
0
 def open_search(self, parent_window, destination=None):
     self.titles = [""]
     self.ids = [""]
     if self.url.find('%s') > 0:
         self.url = self.url % self.title
         self.url = string.replace(self.url, ' ', '%20')
     else:
         if not self.usepostrequest:
             self.url = string.replace(self.url + self.title, ' ', '%20')
     try:
         url = self.url.encode(self.encode)
     except UnicodeEncodeError:
         url = self.url.encode('utf-8')
     self.progress.set_data(parent_window, _("Searching"), _("Wait a moment"), True)
     if self.usepostrequest:
         postdata = self.get_postdata()
         retriever = Retriever(url, parent_window, self.progress, destination, useurllib2=self.useurllib2,
                               postdata=postdata)
     else:
         retriever = Retriever(url, parent_window, self.progress, destination, useurllib2=self.useurllib2)
     retriever.start()
     while retriever.isAlive():
         self.progress.pulse()
         if self.progress.status:
             retriever.join()
         while gtk.events_pending():
             gtk.main_iteration()
     try:
         if retriever.exception is None:
             if destination:
                 # caller gave an explicit destination file
                 # don't care about the content
                 return True
             if retriever.html:
                 ifile = file(retriever.html[0], 'rb')
                 try:
                     self.page = ifile.read()
                 finally:
                     ifile.close()
                 # check for gzip compressed pages before decoding to unicode
                 if len(self.page) > 2 and self.page[0:2] == '\037\213':
                     self.page = gutils.decompress(self.page)
                 self.page = self.page.decode(self.encode, 'replace')
             else:
                 return False
         else:
             self.progress.hide()
             gutils.urllib_error(_("Connection error"), parent_window)
             return False
     except IOError:
         log.exception('')
     finally:
         urlcleanup()
     return True
	def search(self,parent_window):
		self.open_search(parent_window)
		self.page = decompress(self.page)

		tmp = string.find(self.page, '<h1>Anime List - Search for: ')
		if tmp == -1:		# already a movie page
			self.page = ''
		else:			# multiple matches
			self.page = gutils.trim(self.page, 'class="anime_list"', '</table>');
			self.page = gutils.after(self.page, '</tr>');

		return self.page
Example #6
0
 def open_search(self, parent_window, destination=None):
     self.titles = [""]
     self.ids = [""]
     if self.url.find('%s') > 0:
         self.url = self.url % self.title
         self.url = string.replace(self.url, ' ', '%20')
     else:
         self.url = string.replace(self.url + self.title, ' ', '%20')
     try:
         url = self.url.encode(self.encode)
     except UnicodeEncodeError:
         url = self.url.encode('utf-8')
     self.progress.set_data(parent_window, _("Searching"),
                            _("Wait a moment"), True)
     retriever = Retriever(url,
                           parent_window,
                           self.progress,
                           destination,
                           useurllib2=self.useurllib2)
     retriever.start()
     while retriever.isAlive():
         self.progress.pulse()
         if self.progress.status:
             retriever.join()
         while gtk.events_pending():
             gtk.main_iteration()
     try:
         if retriever.exception is None:
             if destination:
                 # caller gave an explicit destination file
                 # don't care about the content
                 return True
             if retriever.html:
                 ifile = file(retriever.html[0], 'rb')
                 try:
                     self.page = ifile.read()
                 finally:
                     ifile.close()
                 # check for gzip compressed pages before decoding to unicode
                 if len(self.page) > 2 and self.page[0:2] == '\037\213':
                     self.page = gutils.decompress(self.page)
                 self.page = self.page.decode(self.encode, 'replace')
             else:
                 return False
         else:
             self.progress.hide()
             gutils.urllib_error(_("Connection error"), parent_window)
             return False
     except IOError:
         log.exception('')
     finally:
         urlcleanup()
     return True
	def initialize(self):
		self.page = decompress(self.page)
		if self.movie_id == 'anidb':
			aid =  aid_pattern.search(self.page)
			if aid:
				self.movie_id = aid.groups()[0]
				self.url = "http://anidb.net/perl-bin/animedb.pl?show=anime&aid=%s" % self.movie_id
			else:
				return False
		self.page = gutils.after(self.page, 'id="layout-content"')
		pos = string.find(self.page, 'class="g_section anime_episodes">')
		if pos >0:
			self.page = self.page[:pos]
Example #8
0
 def open_page(self, parent_window=None, url=None):
     if url is None:
         url_to_fetch = self.url
     else:
         url_to_fetch = url
     if parent_window is not None:
         self.parent_window = parent_window
     self.progress.set_data(parent_window, _("Fetching data"),
                            _("Wait a moment"), False)
     retriever = Retriever(url_to_fetch,
                           self.parent_window,
                           self.progress,
                           useurllib2=self.useurllib2)
     retriever.start()
     while retriever.isAlive():
         self.progress.pulse()
         if self.progress.status:
             retriever.join()
         while gtk.events_pending():
             gtk.main_iteration()
     data = None
     try:
         if retriever.exception is None:
             if retriever.html:
                 ifile = file(retriever.html[0], "rb")
                 try:
                     data = ifile.read()
                 finally:
                     ifile.close()
                 # check for gzip compressed pages before decoding to unicode
                 if len(data) > 2 and data[0:2] == '\037\213':
                     data = gutils.decompress(data)
                 try:
                     # try to decode it strictly
                     if self.encode:
                         data = data.decode(self.encode)
                 except UnicodeDecodeError, exc:
                     # something is wrong, perhaps a wrong character set
                     # or some pages are not as strict as they should be
                     # (like OFDb, mixes utf8 with iso8859-1)
                     # I want to log the error here so that I can find it
                     # but the program should not terminate
                     log.error(exc)
                     data = data.decode(self.encode, 'ignore')
         else:
Example #9
0
 def open_page(self, parent_window=None, url=None):
     if url is None:
         url_to_fetch = self.url
     else:
         url_to_fetch = url
     if parent_window is not None:
         self.parent_window = parent_window
     self.progress.set_data(parent_window, _("Fetching data"), _("Wait a moment"), False)
     retriever = Retriever(url_to_fetch, self.parent_window, self.progress)
     retriever.start()
     while retriever.isAlive():
         self.progress.pulse()
         if self.progress.status:
             retriever.join()
         while gtk.events_pending():
             gtk.main_iteration()
     data = None
     try:
         if retriever.html:
             ifile = file(retriever.html[0], "rb")
             try:
                 data = ifile.read()
             finally:
                 ifile.close()
             # check for gzip compressed pages before decoding to unicode
             if len(data) > 2 and data[0:2] == '\037\213':
                 data = gutils.decompress(data)
             try:
                 # try to decode it strictly
                 data = data.decode(self.encode)
             except UnicodeDecodeError, exc:
                 # something is wrong, perhaps a wrong character set
                 # or some pages are not as strict as they should be
                 # (like OFDb, mixes utf8 with iso8859-1)
                 # I want to log the error here so that I can find it
                 # but the program should not terminate
                 log.error(exc)
                 data = data.decode(self.encode, 'ignore')
     except IOError:
         pass
     if url is None:
         self.page = data
     urlcleanup()
     return data
    def load_titles(self, fpath, parent_window):
        # animetitles.xml.gz is updated once a day
        remote = None
        download = True
        if isfile(fpath):
            cache_last_modified = datetime.fromtimestamp(getmtime(fpath))
            if cache_last_modified > datetime.now() - timedelta(days=1):
                download = False
            else:
                remote = urllib2.urlopen(ANIME_TITLES_URL)
                last_modified = datetime(*remote.info().getdate('Last-Modified')[:7])
                if cache_last_modified >= last_modified:
                    download = False
                remote.close()

        if download:
            log.info('downloading title list from %s' % ANIME_TITLES_URL)
            self.url = ''
            self.title = ANIME_TITLES_URL
            self.open_search(parent_window, fpath)

        return etree.fromstring(decompress(open(fpath, 'rb').read()))
Example #11
0
    def load_titles(self, fpath, parent_window):
        # animetitles.xml.gz is updated once a day
        remote = None
        download = True
        if isfile(fpath):
            cache_last_modified = datetime.fromtimestamp(getmtime(fpath))
            if cache_last_modified > datetime.now() - timedelta(days=1):
                download = False
            else:
                remote = urllib2.urlopen(ANIME_TITLES_URL)
                last_modified = datetime(*remote.info().getdate('Last-Modified')[:7])
                if cache_last_modified >= last_modified:
                    download = False
                remote.close()

        if download:
            log.info('downloading title list from %s' % ANIME_TITLES_URL)
            self.url = ''
            self.title = ANIME_TITLES_URL
            self.open_search(parent_window, fpath)

        return etree.fromstring(decompress(open(fpath, 'rb').read()))
    def _get(self, url, parent_window, decode=True):
        # initialize the progress dialog once for the following search process
        if not self.progress:
            self.progress = Progress(parent_window)

        data = None
        url = url.encode('utf-8', 'replace')
        log.debug('Using url <%s>', url)
        self.progress.set_data(parent_window, _('Searching'),
                               _('Wait a moment'), True)
        try:
            retriever = Retriever(url, parent_window, self.progress)
            retriever.start()
            while retriever.isAlive():
                self.progress.pulse()
                if self.progress.status:
                    retriever.join()
                while gtk.events_pending():
                    gtk.main_iteration()
            try:
                if retriever.html:
                    ifile = file(retriever.html[0], 'rb')
                    try:
                        data = ifile.read()
                    finally:
                        ifile.close()
                    # check for gzip compressed pages before decoding to unicode
                    if len(data) > 2 and data[0:2] == '\037\213':
                        data = gutils.decompress(data)
                    if decode:
                        data = data.decode('utf-8', 'replace')
                    os.remove(retriever.html[0])
            except IOError:
                log.exception('')
        finally:
            self.progress.hide()
        urlcleanup()
        return data
    def _get(self, url, parent_window, decode=True):
        # initialize the progress dialog once for the following search process
        if not self.progress:
            self.progress = Progress(parent_window)

        data = None
        url = url.encode('utf-8', 'replace')
        log.debug('Using url <%s>', url)
        self.progress.set_data(parent_window, _('Searching'), _('Wait a moment'), True)
        try:
            retriever = Retriever(url, parent_window, self.progress)
            retriever.start()
            while retriever.isAlive():
                self.progress.pulse()
                if self.progress.status:
                    retriever.join()
                while gtk.events_pending():
                    gtk.main_iteration()
            try:
                if retriever.html:
                    ifile = file(retriever.html[0], 'rb')
                    try:
                        data = ifile.read()
                    finally:
                        ifile.close()
                    # check for gzip compressed pages before decoding to unicode
                    if len(data) > 2 and data[0:2] == '\037\213':
                        data = gutils.decompress(data)
                    if decode:
                        data = data.decode('utf-8', 'replace')
                    os.remove(retriever.html[0])
            except IOError:
                log.exception('')
        finally:
            self.progress.hide()
        urlcleanup()
        return data
	def initialize(self):
		self.page = decompress(self.page)
		if self.movie_id == 'anidb':
			self.movie_id = gutils.trim(self.page, 'animedb.pl?show=addgenren&amp;aid=', '&')
			self.url = "http://anidb.info/perl-bin/animedb.pl?show=anime&aid=%s" % self.movie_id
		self.page = gutils.trim(self.page, 'id="layout-content">','class="g_section anime_episodes">')
Example #15
0
    def __init__(self, locations):
        TRANSLATORS_FILE = os.path.join(locations['share'], 'TRANSLATORS') # remember to encode this file in UTF-8
        IMAGES_DIR = locations['images']

        def _open_url(dialog, link):
            import gutils
            gutils.run_browser(link)
        gtk.about_dialog_set_url_hook(_open_url)

        dialog = gtk.AboutDialog()
        dialog.set_name(version.pname)
        dialog.set_version(version.pversion)
        dialog.set_copyright("Copyright © 2005-2011 Vasco Nunes. Piotr Ożarowski")
        dialog.set_website(version.pwebsite)
        dialog.set_authors([
            _("Main Authors") + ':',
            version.pauthor.replace(', ', '\n') + "\n",
            _("Programmers") + ':',
            'Jessica Katharina Parth <*****@*****.**>',
            'Michael Jahn <*****@*****.**>',
            'Ivo Nunes <*****@*****.**>\n',
            _('Contributors') + ':',
            'Christian Sagmueller <*****@*****.**>\n' \
            'Arjen Schwarz <*****@*****.**>'
        ])
        dialog.set_artists([_("Logo, icon and general artwork " + \
            "by Peek <*****@*****.**>." + \
            "\nPlease visit http://www.peekmambo.com/\n"),
            'seen / unseen icons by dragonskulle <*****@*****.**>'
        ])
        data = None
        if os.path.isfile(TRANSLATORS_FILE):
            data = open(TRANSLATORS_FILE).read()
        elif os.path.isfile(TRANSLATORS_FILE+'.gz'):
            from gutils import decompress
            data = decompress(open(TRANSLATORS_FILE + '.gz').read())
        elif os.name == 'posix':
            if os.path.isfile('/usr/share/doc/griffith/TRANSLATORS'):
                data = open('/usr/share/doc/griffith/TRANSLATORS').read()
            elif os.path.isfile('/usr/share/doc/griffith/TRANSLATORS.gz'):
                from gutils import decompress
                data = decompress(open('/usr/share/doc/griffith/TRANSLATORS.gz').read())
        translator_credits = ''
        if data:
            for line in data.split('\n'):
                if line.startswith('* '):
                    lang = line[2:]
                    if _(lang) != lang:
                        line = "* %s:" % _(lang)
                translator_credits += "%s\n" % line
        else:
            translator_credits = _("See TRANSLATORS file")
        dialog.set_translator_credits(translator_credits)
        logo_file = os.path.abspath(os.path.join(IMAGES_DIR, 'griffith.png'))
        logo = gtk.gdk.pixbuf_new_from_file(logo_file)
        dialog.set_logo(logo)
        if os.path.isfile('/usr/share/common-licenses/GPL-2'):
            dialog.set_license(open('/usr/share/common-licenses/GPL-2').read())
        else:
            dialog.set_license(_("This program is released under the GNU" + \
                "General Public License.\n" + \
                "Please visit http://www.gnu.org/copyleft/gpl.html for details."))
        dialog.set_comments(version.pdescription)
        dialog.connect("response", lambda d, r: d.destroy())
        dialog.show()
Example #16
0
 def try_decompress(self, data):
     # check for gzip compressed pages
     if data[0:2] == '\037\213':
         return gutils.decompress(data)
     return data