def __call__( self, target, source, env ): for s, t in itertools.izip( source, target ): in_file = str(s) out_file = str(t) try: grip.export( path=in_file, render_wide=True, out_filename=out_file ) except Exception as error: print "cuppa: error: grip.export( path={}, render_wide=True, out_filename={}) failed with error [{}]".format( in_file, out_file, error ) return None
def main(): script_dir = os.path.dirname(__file__) toybox_dir = os.path.split(script_dir)[0] site_dir = os.path.join(toybox_dir,'puppet','modules','site') www_files = os.path.join(site_dir,'files','www') readme_f= os.path.join(toybox_dir, 'README.md') index_f = os.path.join(www_files, 'index.html') assert os.path.exists(readme_f) export(readme_f, out_filename=index_f) print 'done.'
def write_figs_overview_html(figs_directory, show_image_info=True): sorted_filenames_dict = get_filenames_sorted_by_the_last_element_of_basename(figs_directory) n_rows_per_column = [len(val) for val in sorted_filenames_dict.values()] print('n_rows_per_column', n_rows_per_column, 'in', figs_directory.rstrip('/') + '.html') n_rows = max(n_rows_per_column) with open(figs_directory.rstrip('/') + '.md', 'w') as f: write_col_names(f, sorted_filenames_dict.keys()) for irow in range(n_rows): if show_image_info: write_col_img_info(f, irow, sorted_filenames_dict) write_col_img_inludes(f, irow, sorted_filenames_dict) grip.export(figs_directory.rstrip('/') + '.md') os.remove(figs_directory.rstrip('/') + '.md')
def __call__(self, target, source, env): for s, t in itertools.izip(source, target): in_file = str(s) out_file = str(t) try: grip.export(path=in_file, render_wide=True, out_filename=out_file) except Exception as error: print "cuppa: error: grip.export( path={}, render_wide=True, out_filename={}) failed with error [{}]".format( in_file, out_file, error) return None
def main(): from optparse import OptionParser parser = OptionParser(usage='usage: %prog -v 1.0') parser.add_option('-v', '--version',dest='version', help="Version number for the SDK") opts, args = parser.parse_args() if not opts.version: parser.error('please specify version with -v') if opts.version: version = opts.version pubDir = '/publish' binDir = '/publish/bin' demoDir = '/demo' sdkDir = '/sdk' archiveName = '/CocosPlay_SDK' print('==> Clean up git repo') from subprocess import call call("git clean -dxf", shell=True) print print('==> creating publish folder') currDir = os.getcwd() publishPath = currDir + pubDir binPath = currDir + binDir docPath = currDir + '/docs/guide.md' outDocPath = binPath + '/Readme.html' print(publishPath) if os.path.exists(publishPath): shutil.rmtree(publishPath) os.makedirs(publishPath) os.makedirs(binPath) print print('==> generating documentation form markdown') from grip import export export(path=docPath, out_filename=outDocPath) print print('==> copy resources to publish folder') copyanything(currDir + sdkDir, binPath + sdkDir) copyanything(currDir + demoDir, binPath + demoDir) print print('==> create archive') zip(binPath, publishPath + archiveName + version) print
def markdown_to_html(markdown, html_file, title): markdown = markdown.replace('<', ':lt:').replace('>', ':gt:') with NamedTemporaryFile(mode='w') as md, NamedTemporaryFile( mode='r') as html, open(html_file, 'w') as f: md.write(markdown) md.flush() grip.export(title=title, path=md.name, out_filename=html.name, render_wide=True, quiet=True) for l in html: f.write(l.replace(':lt:', '<').replace(':gt:', '>'))
def main(): from optparse import OptionParser parser = OptionParser(usage='usage: %prog -v 1.0') parser.add_option('-v', '--version', dest='version', help="Version number for the SDK") opts, args = parser.parse_args() if not opts.version: parser.error('please specify version with -v') if opts.version: version = opts.version pubDir = '/publish' binDir = '/publish/bin' demoDir = '/Demo' sdkDir = '/SDK' archiveName = '/AnyStore_SDK' print('==> creating publish folder') currDir = os.getcwd() publishPath = currDir + pubDir binPath = currDir + binDir docPath = currDir + '/docs/guide.md' outDocPath = binPath + '/Readme.html' print(publishPath) if os.path.exists(publishPath): shutil.rmtree(publishPath) os.makedirs(publishPath) os.makedirs(binPath) print print('==> generating documentation form markdown') from grip import export export(path=docPath, out_filename=outDocPath) print print('==> copy resources to publish folder') copyanything(currDir + sdkDir, binPath + sdkDir) copyanything(currDir + demoDir, binPath + demoDir) print print('==> create archive') zip(binPath, publishPath + archiveName + version) print
def to_html(self, *, render_inline=True, title=None): if self.path is not None: return grip.export(self.path, title=title, render_inline=render_inline) else: logging.warning("save_file is None")
def grippem(path, username=None, password=None): """Recurse through folders, find .md files, run grip on them. Requires [grip](https://github.com/joeyespo/grip). """ at_least_one = False print "grippem walking through:", path walker = os.walk(path) for cwd in walker: for f in cwd[2]: f = os.path.realpath(os.path.join(path, cwd[0], f)) if fnmatch(f, "*.md"): at_least_one = True print "markdown file found:", f grip.export(f, gfm=True, username=username, password=password) if at_least_one is not True: print "No markdown files found."
def CompileREADME(): # https://stackoverflow.com/questions/3431825/generating-an-md5-checksum-of-a-file def get_hash(filename): import hashlib def hash_bytestr_iter(bytesiter, hasher, ashexstr=False): for block in bytesiter: hasher.update(block) return (hasher.hexdigest() if ashexstr else hasher.digest()) def file_as_blockiter(afile, blocksize=65536): with afile: block = afile.read(blocksize) while len(block) > 0: yield block block = afile.read(blocksize) return hash_bytestr_iter(file_as_blockiter(open(filename, 'rb')), hashlib.sha256(), True) # tor.1.txt production try: from xtor import TorTxt tt = TorTxt(force=False) if tt.run() is True: tt.copy(os.path.join('theonionbox', 'tor')) except: pass old_md_hash = '' old_html_hash = '' old_rst_hash = '' current_md_hash = 'doit' current_html_hash = 'doit' current_rst_hash = 'doit' try: with open('readme/README.hash', 'r') as f: lines = f.readlines() if len(lines) == 3: old_md_hash = lines[0].strip() old_html_hash = lines[1].strip() old_rst_hash = lines[2].strip() except Exception as e: # raise e pass try: current_md_hash = get_hash('README.md') current_html_hash = get_hash('readme/README.html') current_rst_hash = get_hash('readme/README.rst') except Exception as e: # raise e pass hash_changed = False if (old_md_hash != current_md_hash) or (old_html_hash != current_html_hash): from grip import export export(path='README.md', out_filename='readme/README.html', title='The Onion Box v{}'.format(__version__)) hash_changed = True else: print('Skiping generation of README.html; files unchanged!') do_rst = False if do_rst is True: if (old_md_hash != current_md_hash) or (old_rst_hash != current_rst_hash): # path defined by: brew install pandoc # os.environ.setdefault('PYPANDOC_PANDOC', '/usr/local/Cellar/pandoc/2.1') from pypandoc import convert_file print('Generating README.rst') convert_file('README.md', 'rst', outputfile="readme/README.rst") hash_changed = True else: print('Skiping generation of README.rst; files unchanged!') else: print('Generation of README.rst intentionally deactivated!') if hash_changed is True: with open('readme/README.hash', 'w') as f: f.write(current_md_hash + '\n' + current_html_hash + '\n' + current_rst_hash)
def report(**kwargs): """ """ resources_diff = kwargs['resources_diff'] measure = 1024 if 'measure' not in kwargs else kwargs['measure'] report_filename = 'report' if 'report_filename' not in kwargs else kwargs['report_filename'] # FIXME title = u'android资源分析结果' if 'title' not in kwargs else kwargs['title'] path = '.' if 'path' not in kwargs else kwargs['path'] report_md_tpl = "" # FIXME with open("report_tpl.md") as f: report_md_tpl = f.read() var = [] tmp = resources_diff['add']['count'] var.append(tmp) tmp = resources_diff['add']['size'] var.append(tmp) tmp = resources_diff['remove']['count'] var.append(tmp) tmp = resources_diff['remove']['size'] var.append(tmp) tmp = resources_diff['increase']['count'] var.append(tmp) tmp = resources_diff['increase']['size'] var.append(tmp) tmp = resources_diff['decrease']['count'] var.append(tmp) tmp = resources_diff['decrease']['size'] var.append(tmp) # FIXME tmp = '增加' if resources_diff['total']['size_diff'] > 0 else '减少' var.append(tmp) tmp = resources_diff['total']['size_diff'] var.append(tmp) tmp = resources_diff['total']['size_diff_rate'] var.append(tmp) # large resources large_resources = kwargs['large_resources'] tpl = ''' <tr> <td>%s</td> <td>%s</td> <td></td> </tr> ''' tmp = '' for res in large_resources: tmp += tpl % (res['file'], res['size'] / measure) var.append(tmp) # large dynamic libraries large_so_libs = kwargs['large_so_libs'] tpl = ''' <tr> <td>%s</td> <td>%s</td> <td></td> </tr> ''' tmp = '' for so in large_so_libs: tmp += tpl % (so['file'], so['size'] / measure) var.append(tmp) # similar jar packages similar_jars = kwargs['similar_jars'] tpl = ''' <tr> <td>%s</td> <td>%s</td> <td></td> </tr> ''' tmp = '' for jar in similar_jars['json']['list']: tmp += tpl % (jar['file'], jar['size'] / measure) var.append(tmp) report_md = report_md_tpl % tuple(var) report_md_path = path + '/' + report_filename + ".md" with open(report_md_path, 'w') as f: f.write(report_md) report_html_path = path + '/' + report_filename + '.html' export(path=report_md_path, out_filename = report_html_path, title = title)
os.makedirs(save_dir_md) if args.generate_html: os.makedirs(save_dir_html) except OSError as exception: if exception.errno != errno.EEXIST: raise with open(save_full_md, 'w') as the_file: the_file.write(markdown) # use grip to render the .md to .html if args.generate_html: # https://github.com/joeyespo/grip # Transform GitHub-flavored Markdown to HTML from grip import export export(path=save_full_md, gfm=True, out_filename=save_full_html, username=args.github_user, password=args.github_password) # write the endpoints toc # TODO: sort endpoints by URI toc_name = args.dest + os.sep + 'endpoints/markdown/toc.md' try: os.remove(toc_name) except: pass with open(toc_name, 'w') as the_file: the_file.write(endpoints_result['markdown'].encode('utf8')) # write the schemas # for schema in sorted(schemas):
return "{}\n".format(text) @staticmethod def link(text, link): return "[{}]({})\n".format(text, link) # Log into reddit reddit = praw.Reddit(client_id=REDDIT_OUATH_CLIENT_ID, client_secret=REDDIT_OUATH_CLIENT_SECRET, user_agent='my user agent') subreddits = ["rust", "elm", "ProgrammerHumor", "python"] NUM_OF_POSTS = 5 with open('digest.md', 'w') as file: file.write(Markdown.h1("Reddit Digest")) for subreddit in subreddits: file.write(Markdown.h2(subreddit.capitalize())) for post in reddit.subreddit(subreddit).top('week', limit=NUM_OF_POSTS): file.write(Markdown.h3(post.title)) if (len(post.selftext) > 400): file.write(Markdown.text(post.selftext[:400] + ' ...')) else: file.write(Markdown.text(post.selftext)) file.write("\n" + Markdown.link("link",post.url)) grip.export(path="digest.md", quiet=True)
def _generate_html(self): # TODO :: Add File Error Handling export(path=self._build_filename('md'), out_filename=self._build_filename('html'))
def main(): parser = optparse.OptionParser() parser.add_option('-A', '--algorithm', help='Path to algorithm config', type=str) parser.add_option('-R', '--runner', help='Path to core', type=str) parser.add_option('-T', '--tasks', help='Path to folder with tasks', type=str) parser.add_option('-N', '--number_of_runs', help='Number of runs per tasks', type=int) parser.add_option('-O', '--output', help='Output folders', type=str) parser.add_option('-P', '--port', help='Running port', type=int, default=5017) parser.add_option('--parallel', help='Number of parallel threads', type=int, default=1) options, _ = parser.parse_args() output_folder = options.output tasks_folder = options.tasks number_of_runs = options.number_of_runs port = options.port result_folder = os.path.join(output_folder, 'results') tasks = sorted( list(filter(lambda f: f.endswith('json'), os.listdir(tasks_folder)))) process_base = get_process_template(options.runner, options.algorithm) print('>>> Preparing folder') if os.path.exists(result_folder): shutil.rmtree(result_folder) os.makedirs(result_folder) print('>>> Preparing processes') processes = [] counter = 0 for task_id, task in enumerate(tasks): task_name = task[:-5] for i in range(number_of_runs): p = process_base.copy() p += ['--task', tasks_folder + '/' + task] p += ['--port', str(port + counter % options.parallel)] p += [ '--result', os.path.join(result_folder, '{0}_{1}'.format(task_name, i + 1)), '--output', 'json' ] processes.append(p) counter += 1 print('>>> Running optimization tasks') def call_process(p): subprocess.call(p) return i = 0 while i < len(processes): threads = [] for t_id in range(options.parallel): threads.append(Thread(target=call_process, args=[processes[i]])) threads[-1].start() print('>>> >>> Started {0}/{1} process'.format( i + 1, len(processes))) i += 1 for t_id in range(options.parallel): threads[t_id].join() print('>>> Gathering statistics') results = {} for task_id, task in enumerate(tasks): task_name = task[:-5] task_json = json.load(open(os.path.join(tasks_folder, task), 'r')) x_best = {} for kvp in task_json['solution']: x_best[kvp['name']] = kvp['value'] core = UnconstrainedOptimization.from_dict(task_json) result_files = list( filter(lambda f: task_name in f, os.listdir(result_folder))) filtered_results = sorted( list(filter(lambda f: 'real' in f, result_files))) if len(filtered_results) > 0: result_files = filtered_results results[task_name] = { 'values': np.zeros(shape=(len(result_files), )), 'points': [], 'x*': x_best, 'f*': core.f(x_best) } for i, rf in enumerate(result_files): x = parse_result(os.path.join(result_folder, rf)) results[task_name]['points'].append(x) results[task_name]['values'][i] = core.f(x) results[task_name]['min'] = results[task_name]['values'].min() results[task_name]['mean'] = results[task_name]['values'].mean() results[task_name]['max'] = results[task_name]['values'].max() results[task_name]['std'] = results[task_name]['values'].std() results[task_name]['values'] = list(results[task_name]['values']) print('>>> Dumping result') shutil.copyfile(options.algorithm, os.path.join(output_folder, 'config.json')) json.dump(results, open(os.path.join(output_folder, 'statistics.json'), 'w'), indent=2) md = create_markdown(results) with open(os.path.join(output_folder, 'statistics.md'), 'w') as md_file: md_file.write(md) export(path=os.path.join(output_folder, 'statistics.md')) print('>>> Done!\n')
top = HEIGHT*j left = WIDTH*i box = (left, top, left+WIDTH, top+HEIGHT) cropped_img = img.crop(box) cropped_img.save(cell_folder/f'{letter_ix}.png') names={ '00': 'manitoba', '01': 'wii', '10': 'michel_vaillant', '11': 'hongrie' } if STATIC_SITE.exists(): shutil.rmtree(STATIC_SITE) STATIC_SITE.mkdir(parents=True) shutil.copy(TEMPLATES/'main.js', STATIC_SITE) grip.export(path=TEMPLATES/'index.md', out_filename=STATIC_SITE/'index.html') for i in range(WX): for j in range(HX): subfolder = f"{i}{j}" index_template_string = (Path('templates')/'part.html.j2').read_text('utf8') rendered = jinja2.Template(index_template_string).render(num_letters = len(TEXT), subfolder=subfolder) (STATIC_SITE / f"{names[subfolder]}.html").write_text(rendered) split_letters()