Beispiel #1
0
	def create_search_page(mediawiki, basedir):
		meta = {
			"clear_screen": True,
			"links": {
				"0": "0"
			},
			"inputs": {
				"fields": [
					{
						"name": "search",
						"line": 18,
						"column": 9,
						"height": 1,
						"width": 31,
						"bgcolor": 0,
						"fgcolor": 15,
						"validate": "call:MediaWiki_UI.callback_validate_search:" + str(mediawiki.id)
					}
				],
				"confirm": False,
				"target": "call:MediaWiki_UI.callback_search:" + str(mediawiki.id)
			},
			"publisher_color": 0
		}

		data_cept = bytearray()
		data_cept.extend(Cept.parallel_mode())
		data_cept.extend(Cept.set_screen_bg_color(7))
		data_cept.extend(Cept.set_cursor(2, 1))
		data_cept.extend(Cept.set_line_bg_color(0))
		data_cept.extend(b'\n')
		data_cept.extend(Cept.set_line_bg_color(0))
		data_cept.extend(Cept.double_height())
		data_cept.extend(Cept.set_fg_color(7))
		data_cept.extend(Cept.from_str(mediawiki.title))
		data_cept.extend(b'\r\n')
		data_cept.extend(Cept.normal_size())
		data_cept.extend(b'\n')
		data_cept.extend(Cept.set_cursor(18, 1))
		data_cept.extend(Cept.set_fg_color(0))
		data_cept.extend(Cept.from_str(mediawiki.search_string))
		# trick: show cursor now so that user knows they can enter text, even though more
		# data is loading
		data_cept.extend(Cept.show_cursor())
		image = Image_UI(basedir + mediawiki.image, colors = 4)
#		image = Image_UI(basedir + "wikipedia.png", colors = 4)

		data_cept.extend(Cept.define_palette(image.palette))
		data_cept.extend(image.drcs)

		data_cept.extend(Cept.hide_cursor())

		y = 6
		for l in image.chars:
			data_cept.extend(Cept.set_cursor(y, int((41 - len(image.chars[0])) / 2)))
			data_cept.extend(Cept.load_g0_drcs())
			data_cept.extend(l)
			y += 1

		return (meta, data_cept)
Beispiel #2
0
    def create_article_page(mediawiki, wikiid, sheet_number):
        is_first_page = sheet_number == 0

        # get HTML from server
        (title, html) = mediawiki.html_for_wikiid(wikiid)

        soup = BeautifulSoup(html, 'html.parser')

        # handle redirects
        for tag in soup.contents[0].findAll('div'):
            if tag.get("class") == ["redirectMsg"]:
                sys.stderr.write("tag: " + pprint.pformat(tag) + "\n")
                for tag in tag.findAll('a'):
                    link = tag.get("href")
                    title = link[6:]
                    sys.stderr.write("a: " + pprint.pformat(title) + "\n")
                    wikiid = mediawiki.wikiid_for_title(title)
                    sys.stderr.write("wikiid: " + pprint.pformat(wikiid) +
                                     "\n")
                    return MediaWiki_UI.create_article_page(
                        mediawiki, wikiid, sheet_number)

        # extract URL of first image
        image_url = None
        for tag in soup.contents[0].findAll('img'):
            if tag.get("class") == ["thumbimage"]:
                image_url = tag.get("src")
                if image_url.startswith("//"):  # same scheme
                    image_url = mediawiki.base_scheme() + image_url[2:]
                if image_url.startswith("/"):  # same scheme + host
                    image_url = mediawiki.base_url() + image_url
                break

        soup = MediaWiki_UI.simplify_html(soup)

        # try conversion without image to estimate an upper bound
        # on the number of DRCS characters needed on the first page
        page = Cept_page_from_HTML()
        page.article_prefix = mediawiki.article_prefix
        # XXX why is this necessary???
        page.lines_cept = []
        page.soup = soup
        page.link_index = 10
        page.pageid_base = mediawiki.pageid_prefix + str(wikiid)
        page.insert_html_tags(soup.contents[0].children)
        # and create the image with the remaining characters
        image = Image_UI(image_url, drcs_start=page.drcs_start_for_first_sheet)

        #
        # conversion
        #
        page = Cept_page_from_HTML()
        page.title = title
        page.article_prefix = mediawiki.article_prefix

        # tell page renderer to leave room for the image in the top right of the first sheet
        if (image is not None) and (image.chars is not None):
            page.title_image_width = len(image.chars[0])
            page.title_image_height = len(
                image.chars) - 2  # image draws 2 characters into title area

        # XXX why is this necessary???
        page.lines_cept = []

        page.soup = soup
        page.link_index = 10
        page.pageid_base = mediawiki.pageid_prefix + str(wikiid)
        page.insert_html_tags(soup.contents[0].children)

        # create one page

        if sheet_number > page.number_of_sheets() - 1:
            return None

        meta = {"publisher_color": 0}

        if len(page.links_for_page) < sheet_number + 1:
            meta["links"] = {}
        else:
            meta["links"] = page.links_for_page[sheet_number]

        meta["links"]["0"] = mediawiki.pageid_prefix

        if len(page.wiki_link_targets) < sheet_number + 1:
            links_for_this_page = {}
        else:
            links_for_this_page = page.wiki_link_targets[sheet_number]

        for l in links_for_this_page.keys():
            meta["links"][str(
                l)] = "call:MediaWiki_UI.callback_pageid_for_title:" + str(
                    mediawiki.id) + "|" + str(links_for_this_page[l])

        meta["clear_screen"] = is_first_page

        data_cept = page.complete_cept_for_sheet(sheet_number, image)

        return (meta, data_cept)
Beispiel #3
0
def create_page(pageid):
	ret = None
	# generated pages
	if pageid.startswith("00000") or pageid == "9a":
		# login
		ret = Login_UI.create_page(User.user(), pageid)
		basedir = PATH_DATA + "00000/"
	if not ret and (pageid.startswith("71") or pageid.startswith("78")):
		# historic page overview
		ret = Historic_UI.create_page(User.user(), pageid)
		basedir = PATH_DATA + "8/"
	if not ret and pageid.startswith("7"):
		# user management
		ret = User_UI.create_page(User.user(), pageid)
		basedir = PATH_DATA + "7/"
	if not ret and pageid.startswith("8"):
		# messaging
		ret = Messaging_UI.create_page(User.user(), pageid)
		basedir = PATH_DATA + "8/"
	if not ret and pageid.startswith("55"):
		# wikipedia
		basedir = PATH_DATA + "55/"
		ret = MediaWiki_UI.create_page(pageid, basedir)
	if not ret and pageid.startswith("35"):
		# Congress Wiki
		basedir = PATH_DATA + "55/"
		ret = MediaWiki_UI.create_page(pageid, basedir)
	if not ret and pageid.startswith("45"):
		# c64 wiki
		basedir = PATH_DATA + "45/"
		ret = MediaWiki_UI.create_page(pageid, basedir)
	if not ret and pageid.startswith("666"):
		# images
		ret = Image_UI.create_page(pageid)
		basedir = PATH_DATA + "55/"
	if not ret and pageid.startswith("6502"):
		# RSS
		basedir = PATH_DATA + "55/"
		ret = RSS_UI.create_page(pageid, basedir)

	if ret:
		(meta, data_cept) = ret
	else:
		basedir = None
		data_cept = None
		for dir in [ "", "hist/10/", "hist/11/" ]:
			for i in reversed(range(0, len(pageid))):
				testdir = PATH_DATA + dir + pageid[:i+1]
				if os.path.isdir(testdir):
					filename = pageid[i+1:]
					sys.stderr.write("filename: '" + filename + "'\n")
					basedir = testdir + "/"
					break
	
			if basedir:
				filename_meta = basedir + filename + ".meta"
				filename_cept = basedir + filename + ".cept"
				filename_cm = basedir + filename + ".cm"

				if os.path.isfile(filename_meta):
					with open(filename_meta) as f:
						meta = json.load(f)
					if os.path.isfile(filename_cept):
						with open(filename_cept, mode='rb') as f:
							data_cept = f.read()
					elif os.path.isfile(filename_cm):
						data_cept = CM.read(filename_cm)
					break
	
		if data_cept is None:
			return None

	try:
		with open(basedir + "a.glob") as f:
			glob = json.load(f)
		meta.update(glob) # combine dicts, glob overrides meta
	except:
		pass

	cept_1 = bytearray()

	cept_1.extend(Cept.hide_cursor())

	if meta.get("clear_screen", False):
		cept_1.extend(Cept.serial_limited_mode())
		cept_1.extend(Cept.clear_screen())
		last_filename_include = ""
		sys.stderr.write("Clear Screen\n");

	cept_1.extend(create_preamble(basedir, meta))

	cept_2 = bytearray()

	if meta.get("cls2", False):
		cept_2.extend(Cept.serial_limited_mode())
		cept_2.extend(Cept.clear_screen())
		last_filename_include = ""

	# header if the data is actually there, if not ignore this
	try:
		hf = headerfooter(pageid, meta["publisher_name"], meta["publisher_color"])
		cept_2.extend(hf)
	except:
		pass

	if meta.get("parallel_mode", False):
		cept_2.extend(Cept.parallel_mode())

	# payload
	cept_2.extend(data_cept)

	cept_2.extend(Cept.serial_limited_mode())

	# footer if it exists
	try:
		cept_2.extend(hf)
	except:
		pass

	cept_2.extend(Cept.sequence_end_of_page())

	inputs = meta.get("inputs")
	return (cept_1, cept_2, meta["links"], inputs, meta.get("autoplay", False))