def create_search_page(mediawiki, basedir): meta = { "clear_screen": True, "links": { "0": "0" }, "inputs": { "fields": [ { "name": "search", "line": 18, "column": 9, "height": 1, "width": 31, "bgcolor": 0, "fgcolor": 15, "validate": "call:MediaWiki_UI.callback_validate_search:" + str(mediawiki.id) } ], "confirm": False, "target": "call:MediaWiki_UI.callback_search:" + str(mediawiki.id) }, "publisher_color": 0 } data_cept = bytearray() data_cept.extend(Cept.parallel_mode()) data_cept.extend(Cept.set_screen_bg_color(7)) data_cept.extend(Cept.set_cursor(2, 1)) data_cept.extend(Cept.set_line_bg_color(0)) data_cept.extend(b'\n') data_cept.extend(Cept.set_line_bg_color(0)) data_cept.extend(Cept.double_height()) data_cept.extend(Cept.set_fg_color(7)) data_cept.extend(Cept.from_str(mediawiki.title)) data_cept.extend(b'\r\n') data_cept.extend(Cept.normal_size()) data_cept.extend(b'\n') data_cept.extend(Cept.set_cursor(18, 1)) data_cept.extend(Cept.set_fg_color(0)) data_cept.extend(Cept.from_str(mediawiki.search_string)) # trick: show cursor now so that user knows they can enter text, even though more # data is loading data_cept.extend(Cept.show_cursor()) image = Image_UI(basedir + mediawiki.image, colors = 4) # image = Image_UI(basedir + "wikipedia.png", colors = 4) data_cept.extend(Cept.define_palette(image.palette)) data_cept.extend(image.drcs) data_cept.extend(Cept.hide_cursor()) y = 6 for l in image.chars: data_cept.extend(Cept.set_cursor(y, int((41 - len(image.chars[0])) / 2))) data_cept.extend(Cept.load_g0_drcs()) data_cept.extend(l) y += 1 return (meta, data_cept)
def draw(self): cept_data = bytearray(Cept.parallel_limited_mode()) cept_data.extend(Cept.hide_cursor()) cept_data.extend(Cept.set_cursor(self.line, self.column)) fill_with_clear_line = self.clear_line and self.width == 40 # and self.height == 1 fill_with_spaces = self.clear_line and not fill_with_clear_line for i in range(0, self.height): l = self.__data[i].rstrip() if self.type == "password": l = "*" * len(l) else: if l.startswith(chr(Cept.ini())): l = "*" + l[1:] if l: cept_data.extend(self.set_color()) if fill_with_clear_line: cept_data.extend(Cept.clear_line()) if self.bgcolor: cept_data.extend(Cept.set_line_bg_color(self.bgcolor)) cept_data.extend(Cept.from_str(l)) if fill_with_spaces and len(l) > self.width: cept_data.extend(Cept.repeat(" ", self.width - len(l))) if i != self.height - 1: if self.column == 1: if self.width != 40 or fill_with_clear_line: cept_data.extend(b'\n') else: cept_data.extend( Cept.set_cursor(self.line + i + 1, self.column)) sys.stdout.buffer.write(cept_data) sys.stdout.flush()
def create_page(pageid): ret = None # generated pages if pageid.startswith("00000") or pageid == "9a": # login ret = Login_UI.create_page(User.user(), pageid) basedir = PATH_DATA + "00000/" if not ret and (pageid.startswith("71") or pageid.startswith("78")): # historic page overview ret = Historic_UI.create_page(User.user(), pageid) basedir = PATH_DATA + "8/" if not ret and pageid.startswith("7"): # user management ret = User_UI.create_page(User.user(), pageid) basedir = PATH_DATA + "7/" if not ret and pageid.startswith("8"): # messaging ret = Messaging_UI.create_page(User.user(), pageid) basedir = PATH_DATA + "8/" if not ret and pageid.startswith("55"): # wikipedia basedir = PATH_DATA + "55/" ret = MediaWiki_UI.create_page(pageid, basedir) if not ret and pageid.startswith("35"): # Congress Wiki basedir = PATH_DATA + "55/" ret = MediaWiki_UI.create_page(pageid, basedir) if not ret and pageid.startswith("45"): # c64 wiki basedir = PATH_DATA + "45/" ret = MediaWiki_UI.create_page(pageid, basedir) if not ret and pageid.startswith("666"): # images ret = Image_UI.create_page(pageid) basedir = PATH_DATA + "55/" if not ret and pageid.startswith("6502"): # RSS basedir = PATH_DATA + "55/" ret = RSS_UI.create_page(pageid, basedir) if ret: (meta, data_cept) = ret else: basedir = None data_cept = None for dir in [ "", "hist/10/", "hist/11/" ]: for i in reversed(range(0, len(pageid))): testdir = PATH_DATA + dir + pageid[:i+1] if os.path.isdir(testdir): filename = pageid[i+1:] sys.stderr.write("filename: '" + filename + "'\n") basedir = testdir + "/" break if basedir: filename_meta = basedir + filename + ".meta" filename_cept = basedir + filename + ".cept" filename_cm = basedir + filename + ".cm" if os.path.isfile(filename_meta): with open(filename_meta) as f: meta = json.load(f) if os.path.isfile(filename_cept): with open(filename_cept, mode='rb') as f: data_cept = f.read() elif os.path.isfile(filename_cm): data_cept = CM.read(filename_cm) break if data_cept is None: return None try: with open(basedir + "a.glob") as f: glob = json.load(f) meta.update(glob) # combine dicts, glob overrides meta except: pass cept_1 = bytearray() cept_1.extend(Cept.hide_cursor()) if meta.get("clear_screen", False): cept_1.extend(Cept.serial_limited_mode()) cept_1.extend(Cept.clear_screen()) last_filename_include = "" sys.stderr.write("Clear Screen\n"); cept_1.extend(create_preamble(basedir, meta)) cept_2 = bytearray() if meta.get("cls2", False): cept_2.extend(Cept.serial_limited_mode()) cept_2.extend(Cept.clear_screen()) last_filename_include = "" # header if the data is actually there, if not ignore this try: hf = headerfooter(pageid, meta["publisher_name"], meta["publisher_color"]) cept_2.extend(hf) except: pass if meta.get("parallel_mode", False): cept_2.extend(Cept.parallel_mode()) # payload cept_2.extend(data_cept) cept_2.extend(Cept.serial_limited_mode()) # footer if it exists try: cept_2.extend(hf) except: pass cept_2.extend(Cept.sequence_end_of_page()) inputs = meta.get("inputs") return (cept_1, cept_2, meta["links"], inputs, meta.get("autoplay", False))