def success_auth(self, request, username, next_location): if not username in self.hi.db_view.user_db: msg = 'Could not find user %r' % username raise Exception(msg) logger.info('successfully authenticated user %s' % username) headers = remember(request, username) raise HTTPFound(location=next_location, headers=headers)
def make(self, context): soup = context.soup id_ = '%s:section' % self.id_ try: e = soup_find_absolutely(soup, id_) except KeyError: msg = 'Cannot find ID %r in document.' % id_ d = Tag(name='div') t = Tag(name='code') t.append(self.id_) d.append(t) note_error2(t, 'ref error', msg) return [d] logger.info('Adding section %r' % e.attrs['id']) # logger.info('e: ' + get_summary_of_section(e)) e_copy = e.__copy__() for eid in self.exceptions: logger.info('Removing sections by id "%s"' % eid) look_for = eid + ':section' s = e_copy.find(id=look_for) if s is None: msg = 'Could not remove "%s" because could not find element with ID "%s"' % (eid, look_for) raise Exception(msg) s.extract() # logger.info('e_copy: ' + get_summary_of_section(e_copy)) return [e_copy]
def redirect_to_page(self, e, page): if self.options.url_base_public: url = self.options.url_base_public + page else: url = e.root + page logger.info('redirecting to page %s\nurl: %s' % (page, url)) raise HTTPFound(location=url)
def get_link_to_image_file(filename, max_width): basename, ext = os.path.splitext(os.path.basename(filename).lower()) if ext in ['.jpg', '.jpeg']: with open(filename) as f: im = Image.open(f) # print filename, im.size if im.size[0] > max_width: b = basename + '-' + get_md5(filename)[:4] + '.jpg' dest = os.path.join(get_mcdp_tmp_dir(), 'images', b) height = int(im.size[1] * max_width / im.size[0]) new_size = (max_width, height) msg = 'Resizing image %s from %s to %s' % (filename, im.size, new_size) logger.info(msg) # print('resizing to %s in %s' % (str(new_size), dest)) if not os.path.exists(dest): make_sure_dir_exists(dest) resized = im.resize(new_size) resized.save(dest) return dest # im.save(file + ".thumbnail", "JPEG") return filename else: return filename
def find_links_from_master(master_soup, version_soup, raise_errors): logger.info('find_links_from_master') from mcdp_docs.tocs import sub_link # find all ids master_ids = get_ids_from_soup(master_soup) version_ids = get_ids_from_soup(version_soup) missing = [] seen = [] found = [] for a, eid in a_linking_to_fragments(version_soup): seen.append(eid) if not eid in version_ids: missing.append(eid) if eid in master_ids: # logger.info('found %s in master' % eid) found.append(eid) linked_element = master_ids[eid] if is_empty_link(a): # logger.debug('is: %s' % a) if not get_classes(a): add_class(a, MCDPManualConstants.CLASS_ONLY_NAME) # logger.debug('is before: %s' % a) sub_link(a, eid, linked_element, raise_errors) # logger.debug('is now: %s' % a) href = 'http://purl.org/dth/%s' % remove_prefix(eid) a.attrs['href'] = href add_class(a, 'link-to-master') else: logger.info('Not found %r in master.' % eid)
def logout(self, request): logger.info('logging out') headers = forget(request) logger.debug('headers: %s' % headers) came_from = request.referrer if came_from is None: came_from = self.get_root_relative_to_here(request) raise HTTPFound(location=came_from, headers=headers)
def run_prince(html_filename): pdf = os.path.splitext(html_filename)[0] + '.pdf' cwd = '.' cmd = ['prince', '-o', pdf, html_filename] system_cmd_show(cwd, cmd) cwd = os.getcwd() rel = os.path.relpath(pdf, cwd) logger.info('Written %s' % rel)
def get_expected_exceptions(markdown_data): expected = [] first_line = markdown_data.split('\n')[0] expect_syntax_error = 'syntax_error' in first_line if expect_syntax_error: expected.append(DPSyntaxError) logger.info('Expecting DPSyntaxError') expected = tuple(expected) return expected
def process_one(filename, page, target_pdf, target_svg, tmpdir, name): extracted = os.path.join(tmpdir, name + '_extracted.pdf') cropped = os.path.join(tmpdir, name + '_extracted_no_label.pdf') extract_page(filename, page, extracted) pdfcrop_margins(extracted, cropped, "0mm 0mm 0mm 5cm") pdfcrop(cropped, target_pdf) pdf2svg(target_pdf, target_svg) logger.info('Wrote %s' % friendly_path(target_pdf)) logger.info('Wrote %s' % friendly_path(target_svg))
def notification(aug, jobs_aug, output_dir): res = AugmentedResult() res.merge(aug) for job_aug in jobs_aug: res.merge(job_aug) # res.set_result(job_aug.get_result()) main = os.path.join(output_dir, 'index.html') msg = '\n \n The HTML version is ready at %s ' % main msg += '\n \n \nPlease wait a few more seconds for the PDF version.' logger.info(msg) return res
def get_pages(node, prefix): logger.info('get_pages(%s, %s)' % (node, prefix)) for child in node: yield "/".join(prefix + (child, )) c = node[child] if c is None: msg = 'Found invalid child %r of %r' % (child, prefix) raise ValueError(msg) for _ in get_pages(c, prefix + (child, )): yield _
def render(library, docname, data, realpath, out_dir, generate_pdf, stylesheet, symbols, raise_errors, use_mathjax, do_slides): res = AugmentedResult() if MCDPConstants.pdf_to_png_dpi < 300: msg = ( 'Note that pdf_to_png_dpi is set to %d, which is not suitable for printing' % MCDPConstants.pdf_to_png_dpi) mcdp_dev_warning(msg) from mcdp_docs.pipeline import render_complete out = os.path.join(out_dir, docname + '.html') html_contents = render_complete(library=library, s=data, raise_errors=raise_errors, realpath=realpath, generate_pdf=generate_pdf, symbols=symbols, use_mathjax=use_mathjax) title = docname doc = get_minimal_document(html_contents, title=title, stylesheet=stylesheet, add_markdown_css=True, add_manual_css=True) soup = bs_entire_document(doc) document_final_pass_before_toc(soup, remove=None, remove_selectors=[], res=res) generate_and_add_toc(soup, res=res) document_final_pass_after_toc(soup, res=res) if use_mathjax and symbols: add_mathjax_preamble(soup, symbols) if do_slides: create_reveal(soup, res) doc = to_html_entire_document(soup) d = os.path.dirname(out) if not os.path.exists(d): os.makedirs(d) with open(out, 'w') as f: f.write(doc) logger.info('Written %s ' % out) return out
def get_test_librarian(): package = dir_from_package_name('mcdp_data') folder = os.path.join(package, 'bundled.mcdp_repo') if not os.path.exists(folder): raise_desc(ValueError, 'Test folders not found.', folder=folder) librarian = Librarian() librarian.find_libraries(folder) libraries = librarian.libraries n = len(libraries) if n <= 1: msg = 'Expected more libraries.' raise_desc(ValueError, msg, folder, libraries=libraries) orig = list(libraries) vname = MCDPConstants.ENV_TEST_LIBRARIES if vname in os.environ: use = os.environ[vname].split(",") logger.debug('environment variable %s = %s' % (vname, use)) logger.info('Because %s is set, I will use only %s instead of %s.' % (vname, use, orig)) for _ in orig: if not _ in use: del libraries[_] else: pass #logger.debug('environment variable %s is unset' % vname) vname2 = MCDPConstants.ENV_TEST_LIBRARIES_EXCLUDE if vname2 in os.environ: exclude = os.environ[vname2].split(',') logger.debug('environment variable %s = %s' % (vname2, exclude)) else: exclude = [] # logger.debug('environment variable %s is unset' % vname2) if exclude: for a in exclude: if not a in libraries: msg = '%s = %s but %r is not a library.' % (vname2, exclude, a) logger.error(msg) else: logger.info('Excluding %s' % vname2) del libraries[a] return librarian
def eval_assert_empty(r, context): from .eval_constant_imp import eval_constant assert isinstance(r, CDP.AssertEmpty) v = eval_constant(r.value, context) seq = get_sequence(v) check = len(seq) == 0 if check: logger.info(v.__repr__()) return passed_value() msg = 'Assertion assert_nonempty() failed.' raise_desc(DPUserAssertion, msg, v=v)
def write_results(res, model_name, outdir): for r in res: assert isinstance(r, tuple), r mime, name, x = r assert isinstance(x, str), x ext = mime base = model_name + '-%s.%s' % (name, ext) out = os.path.join(outdir, base) logger.info('Writing to %s' % out) with open(out, 'w') as f: f.write(x)
def junit_xml(compmake_db): jobs = list(all_jobs(compmake_db)) logger.info('Loaded %d jobs' % len(jobs)) if len(jobs) < 10: logger.error('too few jobs') sys.exit(128) test_cases = [] for job_id in jobs: tc = junit_test_case_from_compmake(compmake_db, job_id) test_cases.append(tc) ts = TestSuite("comptests_test_suite", test_cases) return TestSuite.to_xml_string([ts])
def define_jobs_context(self, context): options = self.get_options() if options.mcdp_settings: MCDPManualConstants.activate_tilde_as_nbsp = False MCDPConstants.softy_mode = False logger.setLevel(logging.DEBUG) src = options.src src_dirs = [_.strip() for _ in src.split(":") if _ and _.strip()] self.info("Src dirs: \n" + "\n -".join(src_dirs)) raise_errors = options.raise_errors out_dir = options.output generate_pdf = options.pdf output_file = options.output_file remove = options.remove stylesheet = options.stylesheet symbols = options.symbols do_last_modified = options.last_modified use_mathjax = True if options.mathjax else False logger.info('use mathjax: %s' % use_mathjax) logger.info('use symbols: %s' % symbols) if symbols is not None: symbols = open(symbols).read() if out_dir is None: out_dir = os.path.join('out', 'mcdp_render_manual') for s in src_dirs: check_bad_input_file_presence(s) resolve_references = not options.no_resolve_references manual_jobs(context, src_dirs=src_dirs, output_file=output_file, generate_pdf=generate_pdf, stylesheet=stylesheet, remove=remove, use_mathjax=use_mathjax, raise_errors=raise_errors, symbols=symbols, resolve_references=resolve_references, do_last_modified=do_last_modified)
def view_authomatic_(self, config, e): response = Response() provider_name = e.context.name logger.info('using provider %r' % provider_name) if not provider_name in config: msg = 'I got to the URL for provider %r even though it is not in the config.' % provider_name raise ValueError(msg) authomatic = Authomatic(config=config, secret='some random secret string') url_base_public = self.options.url_base_public url_base_internal = self.options.url_base_internal if not ((url_base_public is None) == (url_base_public is None)): msg = 'Only one of url_base_public and url_base_internal is specified.' raise Exception(msg) result = authomatic.login( MyWebObAdapter(e.request, response, url_base_internal, url_base_public), provider_name) if not result: return response # If there is result, the login procedure is over and we can write to response. response.write('<a href="..">Home</a>') if result.error: # Login procedure finished with an error. msg = result.error.message return self.show_error(e, msg, status=500) elif result.user: # OAuth 2.0 and OAuth 1.0a provide only limited user data on login, # We need to update the user to get more info. # if not (result.user.name and result.user.id): result.user.update() s = "user info: \n" for k, v in result.user.__dict__.items(): s += '\n %s : %s' % (k, v) logger.debug(s) next_location = config.get('next_location', e.root) handle_auth_success(self, e, provider_name, result, next_location) # # response.write('<pre>'+s+'</pre>') # # Welcome the user. # response.write(u'<h1>Hi {0}</h1>'.format(result.user.name)) # response.write(u'<h2>Your id is: {0}</h2>'.format(result.user.id)) # response.write(u'<h2>Your email is: {0}</h2>'.format(result.user.email)) # just regular login return response
def embed_css_files(soup): """ Look for <link> elements of CSS and embed them if they are local files""" # <link href="..." rel="stylesheet" type="text/css"/> for link in list( soup.findAll('link', attrs={ 'rel': 'stylesheet', 'href': True })): href = link.attrs['href'] if href.startswith('/'): # not on windows? logger.info('Embedding %r' % href) data = open(href).read() style = Tag(name='style') style.attrs['type'] = 'text/css' style.string = data link.replace_with(style)
def add_github_links_if_edit_url(soup): """ If an element has an attribute 'github-edit-url' then add little icons """ attname = 'github-edit-url' nfound = 0 for h in soup.findAll(['h1', 'h2', 'h3', 'h4'], attrs={attname: True}): nfound += 1 a = Tag(name='a') a.attrs['href'] = h.attrs[attname] a.attrs['class'] = 'github-edit-link' a.string = ' ✎' h.append(a) # msg = 'Found element %s' % h # logger.info(msg) logger.info('Found %d elements with attribute %r' % (nfound, attname))
def go(self): options = self.get_options() if options.config is not None: logger.info('Reading configuration from %s' % options.config) logger.warn('Other options from command line will be ignored. ') parser = RawConfigParser() parser.read(options.config) sections = parser.sections() logger.info('sections: %s' % sections) s = 'app:main' if not s in sections: msg = 'Could not find section "%s": available are %s.' % ( s, format_list(sections)) msg += '\n file %s' % options.config raise Exception(msg) # XXX settings = dict((k, parser.get(s, k)) for k in parser.options(s)) prefix = 'mcdp_web.' mcdp_web_settings = get_only_prefixed(settings, prefix, delete=True) # mcdp_web_settings = {} # for k,v in list(settings.items()): # if k.startswith(prefix): # mcdp_web_settings[k[len(prefix):]] = v # del settings[k] options = parse_mcdpweb_params_from_dict(mcdp_web_settings) logger.debug('Using these options: %s' % options) else: logger.info('No configuration .ini specified (use --config).') settings = {} wa = WebApp(options, settings=settings) # Write warning messages now wa.get_authomatic_config() msg = """Welcome to PyMCDP! To access the interface, open your browser at the address http://localhost:%s/ Use Chrome, Firefox, or Opera - Internet Explorer is not supported. """ % options.port logger.info(msg) if options.delete_cache: pass # XXX: warning deprecated # logger.info('Deleting cache...') # wa._refresh_library(None) wa.serve(port=options.port)
def process_bibtex2html_output(bibtex2html_output, d): """ From the bibtex2html output, get clean version. """ # frag = bs(bibtex2html_output) frag = BeautifulSoup(bibtex2html_output, 'html.parser') with open(os.path.join(d, 'fixed_interpreted.html'), 'w') as f: f.write(str(frag)) res = Tag(name='div') ids = [] for dt in list(frag.select('dt')): assert dt.name == 'dt' name = dt.a.attrs['name'] name = 'bib:' + name ids.append(name) dd = dt.findNext('dd') assert dd.name == 'dd' entry = dd.__copy__() entry.name = 'cite' entry.attrs['id'] = name try_to_replace_stuff = True if try_to_replace_stuff: for x in list(entry.descendants): if isinstance(x, NavigableString): s = x.string.encode('utf-8') s = s.replace('\n', ' ') s = s.replace('[', '') s = s.replace('|', '') s = s.replace(']', '') y = NavigableString(unicode(s, 'utf-8')) x.replace_with(y) #print('string %r' % x.string) if isinstance(x, Tag) and x.name == 'a' and x.string == 'bib': x.extract() res.append(NavigableString('\n')) res.append(entry) res.append(NavigableString('\n')) res.attrs['id'] = 'bibliography_entries' logger.info('Found %d bib entries.' % len(ids)) return str(res)
def go(path): db = StorageFilesystem(path, compress=True) args = ['failed'] cq = CacheQueryDB(db) context = Context(db) if not list(db.keys()): msg = 'Compmake DB is empty' logger.error(msg) else: job_list = parse_job_list(args, context=context, cq=cq) s = "" if job_list: job_list = job_list[:2] s += 'Running on host: %s' % hostname s += "\nJob failed in path %s" % path for job_id in job_list: if job_cache_exists(job_id, db): cache = get_job_cache(job_id, db) status = Cache.state2desc[cache.state] s += "\nFailure of job %s" % job_id if cache.state in [Cache.FAILED, Cache.BLOCKED]: why = str(cache.exception).strip() else: why = 'No why for job done.' s += '\n' + "```\n" + why + "\n```" s += '\n\n' else: logger.warning('no cache for %s' % job_id) s += '\n@censi' s += '\n@jacopo' s += '\n@paull' s += '\n@walter' s += '\n@daniele' print(s) slack.chat.post_message(channel, s, link_names=1) else: s = 'Everything is fine' # slack.chat.post_message(channel, s) logger.info('No jobs found')
def go(): fn = sys.argv[1] outdir = sys.argv[2] warning = os.path.join(outdir, 'DO_NOT_PLACE_OTHER_FILES_HERE.txt') write_data_to_file('It is autogenerated', warning) f = open(fn, 'rb') pdfReader = PyPDF2.PdfFileReader(f) n = pdfReader.numPages results = [] for i in range(n): pageObj = pdfReader.getPage(i) try: name = get_figure_name(pageObj) results.append((i, name)) logger.info('page %d name = %r' % (i, name)) except NoFigureName as _e: logger.error(str(_e)) allt = "" tmpdir = create_tmpdir('clipart') for i, name in results: target_pdf = os.path.join(outdir, name + '.pdf') target_svg = os.path.join(outdir, name + '.svg') page = i+1 process_one(fn, page, target_pdf, target_svg, tmpdir, name) t = """ <div figure-id="fig:NAME"> <figcaption>Add caption here</figcaption> <img src="NAME.svg" style="width: 80%; height: auto"/> </div> """ t = t.replace('NAME', name) allt += t instructions = os.path.join(outdir, 'how_to_use.md.example') write_data_to_file(allt, instructions) print allt
def erase_job_if_files_updated(compmake_context, promise, filenames): """ Invalidates the job if the filename is newer """ check_isinstance(promise, Promise) check_isinstance(filenames, (list, tuple)) def friendly_age(ts): age = time.time() - ts return '%.3fs ago' % age filenames = list(filenames) for _ in filenames: if not os.path.exists(_): msg = 'File does not exist: %s' % _ raise ValueError(msg) last_update = max(os.path.getmtime(_) for _ in filenames) db = compmake_context.get_compmake_db() job_id = promise.job_id cache = get_job_cache(job_id, db) if cache.state == cache.DONE: done_at = cache.timestamp if done_at < last_update: show_filenames = filenames if len( filenames) < 3 else '(too long to show)' logger.info('Cleaning job %r because files updated %s' % (job_id, show_filenames)) logger.info(' files last updated: %s' % friendly_age(last_update)) logger.info(' job last done: %s' % friendly_age(done_at)) mark_to_remake(job_id, db)
def do_plots(logger, model_name, plots, outdir, extra_params, maindir, extra_dirs, use_cache): if '.mcdp' in model_name: model_name2 = model_name.replace('.mcdp', '') msg = 'Arguments should be model names, not file names.' msg += ' Interpreting %r as %r.' % (model_name, model_name2) logger.warn(msg) model_name = model_name2 if use_cache: cache_dir = os.path.join(outdir, '_cached/mcdp_plot_cache') logger.info('using cache %s' % cache_dir) else: cache_dir = None librarian = Librarian() for e in extra_dirs: librarian.find_libraries(e) library = librarian.get_library_by_dir(maindir) if cache_dir is not None: library.use_cache_dir(cache_dir) assert library.library_name is not None is_ndp = library.file_exists(model_name + '.mcdp') is_poset = library.file_exists(model_name + '.mcdp_poset') if is_ndp: results = do_plots_ndp(model_name, library, plots, extra_params) elif is_poset: results = do_plots_poset(model_name, library, plots) else: msg = 'Cannot find anything corresponding to %r.' % model_name raise_desc(ValueError, msg) return write_results(results, model_name, outdir)
def look_for_files(srcdirs, pattern): """ Excludes files with "excludes" in the name. """ results = [] results_absolute = set() for d0 in srcdirs: d = expand_all(d0) if not os.path.exists(d): msg = 'Could not find directory %r' % d msg += '\nSearching from directory %r' % os.getcwd() raise Exception(msg) filenames = locate_files(d, pattern, followlinks=True, include_directories=False, include_files=True, normalize=False) ok = [] for fn in filenames: fn0 = os.path.realpath(fn) if 'exclude' in fn0: logger.info( 'Excluding file %r because of string "exclude" in it' % fn) else: if fn0 in results_absolute: logger.debug('Reached the file %s twice' % fn0) pass else: results_absolute.add(fn0) ok.append(fn) results.extend(natsorted(ok)) logger.info('Found %d files with pattern %s in %s' % (len(results), pattern, srcdirs)) return results
def split_file(html, directory): soup = BeautifulSoup(html, 'lxml', from_encoding='utf-8') body = soup.html.body # extract the main toc if it is there main_toc = body.find(id='main_toc') # if main_toc: # main_toc.extract() assert body is not None, soup filename2contents = split_in_files(body) add_prev_next_links(filename2contents) for filename, contents in list(filename2contents.items()): html = Tag(name='html') head = soup.html.head.__copy__() html.append(head) body = Tag(name='body') if main_toc: tocdiv = Tag(name='div') tocdiv.attrs['id'] = 'tocdiv' tocdiv.append(main_toc.__copy__()) body.append(tocdiv) body.append(contents) html.append(body) PAGE_IDENTIFIER = filename.replace('.html', '') PAGE_URL = 'https://duckietown.github.io/duckuments/master/' + filename s = disqus s = s.replace('PAGE_IDENTIFIER', PAGE_IDENTIFIER) s = s.replace('PAGE_URL', PAGE_URL) disqus_section = bs(s) from mcdp import logger logger.info(str(s)) body.append(disqus_section) filename2contents[filename] = html update_refs(filename2contents) write_split_files(filename2contents, directory)
def memo_disk_cache2(cache_file, data, f): """ """ from mcdp import logger dirname = os.path.dirname(cache_file) cachedir = os.path.join(dirname) if not os.path.exists(cachedir): try: os.makedirs(cachedir) except: if os.path.exists(cachedir): pass else: raise if os.path.exists(cache_file): # logger.info('Reading from cache %r.' % cache_file) try: res = safe_pickle_load(cache_file) if data != res['data']: logger.info('Stale cache, recomputing.') else: return res['result'] except Exception as e: logger.error(e) result = f() if MCDPConstants.log_cache_writes: logger.info('Writing to cache %s.' % cache_file) res = dict(data=data, result=result) safe_pickle_dump(res, cache_file) return result
def find_pickling_error(obj, protocol=pickle.HIGHEST_PROTOCOL): sio = StringIO() try: pickle.dumps(obj) except Exception as e1: # s1 = traceback.format_exc(e1) pass else: msg = ('Strange! I could not reproduce the pickling error ' 'for the object of class %s' % describe_type(obj)) logger.info(msg) pickler = MyPickler(sio, protocol) try: pickler.dump(obj) except Exception as e1: msg = pickler.get_stack_description() msg += '\n --- Current exception----\n%s' % traceback.format_exc(e1) msg += '\n --- Old exception----\n%s' % traceback.format_exc(e1) return msg else: msg = 'I could not find the exact pickling error.' raise Exception(msg)