def html_generator(entry_list): print 'generating html' f = open('output.html', 'w') #html5 f.write('<!DOCTYPE html>') f.write('<html>') #head #meta_string = '<meta charset="utf-8" name="viewport" content="width=device-width, initial-scale=1.0">' title_string = title('arXiv:PDF') #head_string = head(''.join([meta_string, title_string])) head_string = head(title_string) f.write(head_string) #body f.write('<body>') for entry in entry_list: file_link = a(entry['title'], href=entry['file_source']) entry_title_string = p(file_link) id_string = p(entry['identifier']) author_string = p(', '.join(entry['author'])) summary_string = p(entry['summary']) entry_string = ''.join( [entry_title_string, id_string, author_string, summary_string]) f.write(entry_string) f.write('</body>') f.write('<html>') f.close()
def getHtmlPageFromCsvFile(filePath, css, title): """ Prepare a html page including a table """ myHtml = html( head( link(' ', rel='stylesheet', type='text/css', href=css), title=filePath ) ) myHtml = myHtml.append( body( h1(title), getHtmlTableFromCsvFile(filePath), ) ) return myHtml
def html_generator(entry_list): print 'generating html' f = open('output.html', 'w') #html5 f.write('<!DOCTYPE html>') f.write('<html>') #head #meta_string = '<meta charset="utf-8" name="viewport" content="width=device-width, initial-scale=1.0">' title_string = title('arXiv:PDF') #head_string = head(''.join([meta_string, title_string])) head_string = head(title_string) f.write(head_string) #body f.write('<body>') for entry in entry_list: file_link = a(entry['title'], href=entry['file_source']) entry_title_string = p(file_link) id_string = p(entry['identifier']) author_string = p(', '.join(entry['author'])) summary_string = p(entry['summary']) entry_string = ''.join([ entry_title_string, id_string, author_string, summary_string ]) f.write(entry_string) f.write('</body>') f.write('<html>') f.close()
def to_html(results): PREAMBLE_FILE = os.getenv('LSM_PREAMBLE_FILE', "") preamble = "" methods = [ 'capabilities', 'systems', 'plugin_info', 'pools', 'job_status', 'job_free', 'iscsi_chap_auth', 'volumes', 'volume_create', 'volume_delete', 'volume_resize', 'volume_replicate', 'volume_replicate_range_block_size', 'volume_replicate_range', 'volume_enable', 'volume_disable', 'disks', 'target_ports', 'volume_mask', 'volume_unmask', 'volume_child_dependency', 'volume_child_dependency_rm', 'access_groups', 'access_groups_granted_to_volume', 'access_group_create', 'access_group_delete', 'volumes_accessible_by_access_group', 'access_groups_granted_to_volume', 'access_group_initiator_add', 'access_group_initiator_delete', 'fs', 'fs_create', 'fs_delete', 'fs_resize', 'fs_clone', 'fs_file_clone', 'fs_snapshots', 'fs_snapshot_create', 'fs_snapshot_delete', 'fs_snapshot_restore', 'fs_child_dependency', 'fs_child_dependency_rm', 'export_auth', 'exports', 'export_fs', 'export_remove' ] ch = [] row_data = [] if os.path.isfile(PREAMBLE_FILE): with open(PREAMBLE_FILE, 'r') as pm: preamble = pm.read() #Build column header for r in results: ch.append(r['SYSTEM']['ID']) # Add overall pass/fail for unit tests pass_fail = ['Overall Pass/Fail result'] for r in results: if r['META']['ec'] == '0': pass_fail.append('P') else: pass_fail.append('F') row_data.append(pass_fail) # Append on link for error log error_log = ['Error log (click +)'] for r in results: error_log.append('<a href="%s">+</a>' % ('./' + os.path.basename(r['META']['error_file']))) row_data.append(error_log) for m in methods: row = [m] for r in results: row.append(get_result(r, m)) row_data.append(row) # Build HTML text = '<!DOCTYPE html>' text += str( html( head( link(rel="stylesheet", type="text/css", href="../../test.css"), title("libStorageMgmt test results"), ), body( HTML( h1("%s Results generated @ %s") % (preamble, time.strftime("%c"))), div(table(_table_header(ch), _table_body(row_data)), _class="angled_table"), div( pre(" Legend\n" " P = Pass (Method called and returned without error)\n" " F = Fail (Method call returned an error)\n" " U = Unsupported or unable to test due to other errors\n" " * = Unable to connect to array or provider totally unsupported\n" " + = hyper link to error log"))))) return bs(text).prettify()
def __call__(self, environ, start_response): path = environ['PATH_INFO'].strip('/') if not path: start_response('200 OK', [('Content-Type', 'text/html')]) return [ '%s%s' % ( '<!doctype html>', str( h.html( h.head(h.meta(charset='UTF-8'), h.title('socketio echo test')), h.script(src="/static/jquery-1.6.1.min.js", type="text/javascript"), h.script(src="/static/socket.io.js", type="text/javascript"), h.script(src="/static/echo.js", type="text/javascript"), h.body( h.p(''' This is an echo test for socketio. When you enter values for 'bar' and 'baz' and push 'emit', they'll be sent over a socket, received by the server ('foo()'), and echoed back via emitting 'echo' with 'rab' and 'zab'. (no HTTP GET or POST is executed!) '''), h.hr(), h.form('bar: ', h.input(type='text', id='bar'), ' ', 'baz: ', h.input(type='text', id='baz'), h.button('Emit', type='submit'), id='foo'), h.hr(), h.p( # These will get values subbed in via javascript when rab and zab are echoed from server: 'rab: ', h.span(id='rab'), ' ', 'zab: ', h.span(id='zab')))))) ] if path.startswith('static/'): try: data = open(path).read() except Exception: return not_found(start_response) if path.endswith(".js"): content_type = "text/javascript" # we don't have any other types in this simple example, so... else: return not_found(start_response) start_response('200 OK', [('Content-Type', content_type)]) return [data] if path.startswith("socket.io"): socketio_manage(environ, {'/echo': EchoNamespace}, {}) else: return not_found(start_response)
def to_html(results): PREAMBLE_FILE = os.getenv('LSM_PREAMBLE_FILE', "") preamble = "" methods = ['capabilities', 'systems', 'plugin_info', 'pools', 'job_status', 'job_free', 'iscsi_chap_auth', 'volumes', 'volume_create', 'volume_delete', 'volume_resize', 'volume_replicate', 'volume_replicate_range_block_size', 'volume_replicate_range', 'volume_enable', 'volume_disable', 'disks', 'target_ports', 'volume_mask', 'volume_unmask', 'volume_child_dependency', 'volume_child_dependency_rm', 'access_groups', 'access_groups_granted_to_volume', 'access_group_create', 'access_group_delete', 'volumes_accessible_by_access_group', 'access_groups_granted_to_volume', 'access_group_initiator_add', 'access_group_initiator_delete', 'fs', 'fs_create', 'fs_delete', 'fs_resize', 'fs_clone', 'fs_file_clone', 'fs_snapshots', 'fs_snapshot_create', 'fs_snapshot_delete', 'fs_snapshot_restore', 'fs_child_dependency', 'fs_child_dependency_rm', 'export_auth', 'exports', 'export_fs', 'export_remove' ] ch = [] row_data = [] if os.path.isfile(PREAMBLE_FILE): with open(PREAMBLE_FILE, 'r') as pm: preamble = pm.read() #Build column header for r in results: ch.append(r['SYSTEM']['ID']) # Add overall pass/fail for unit tests pass_fail = ['Overall Pass/Fail result'] for r in results: if r['META']['ec'] == '0': pass_fail.append('P') else: pass_fail.append('F') row_data.append(pass_fail) # Append on link for error log error_log = ['Error log (click +)'] for r in results: error_log.append('<a href="%s">+</a>' % ('./' + os.path.basename(r['META']['error_file']))) row_data.append(error_log) for m in methods: row = [m] for r in results: row.append(get_result(r, m)) row_data.append(row) # Build HTML text = '<!DOCTYPE html>' text += str(html( head(link(rel="stylesheet", type="text/css", href="../../test.css"), title("libStorageMgmt test results"), ), body( HTML(h1("%s Results generated @ %s") % (preamble, time.strftime("%c"))), div(table(_table_header(ch), _table_body(row_data)), _class="angled_table"), div(pre( " Legend\n" " P = Pass (Method called and returned without error)\n" " F = Fail (Method call returned an error)\n" " U = Unsupported or unable to test due to other errors\n" " * = Unable to connect to array or provider totally unsupported\n" " + = hyper link to error log\n\n\n", HTML(' Source code for plug-in for this test run <a href=./smis.py.html>is here. </a>')))) )) return bs(text).prettify()
def vis(): def to_str(x): return '{:.2f}'.format(x.item()) assert FLAGS.dataset == 'mimic-cxr' phase = Phase.test dataset = test_dataset data_loader = test_loader if FLAGS.ckpt_path: logger.info(f'Loading model from {FLAGS.ckpt_path}') load_state_dict(model, torch.load(FLAGS.ckpt_path)) log = Log() converter = SentIndex2Report(index_to_word=Dataset.index_to_word) chexpert = CheXpert() working_dir = os.path.join(FLAGS.working_dir, 'vis', datetime.datetime.now().strftime('%Y-%m-%d-%H%M%S-%f')) image_dir = os.path.join(working_dir, 'imgs') os.makedirs(image_dir) reports = [] prog = tqdm.tqdm(enumerate(data_loader), total=len(data_loader)) for (num_batch, batch) in prog: for (key, value) in batch.items(): batch[key] = value.to(FLAGS.device) losses = {} metrics = {} with torch.no_grad(): batch = model(batch, phase=Phase.test, beam_size=FLAGS.beam_size) _log = {**losses, **metrics} log.update(_log) prog.set_description(', '.join( [f'[{phase:5s}]'] + [f'{key:s}: {log[key]:8.2e}' for key in sorted(losses.keys())] )) item_index = to_numpy(batch['item_index']) _text_length = batch['_text_length'] # (num_reports,) ( _stop, # (num_reports, max_num_sentences, 1) _temp, # (num_reports, max_num_sentences, 1) _attention, # (num_repoers, max_num_sentences, beam_size, max_num_words, 65) _score, # (num_reports, max_num_sentences, beam_size) _sum_log_probability, # (num_reports, max_num_sentences, beam_size) _text, # (num_reports, max_num_sentences, beam_size, max_num_words) _sent_length, # (num_reports, max_num_sentences, beam_size) ) = [ pad_packed_sequence(batch[key], length=_text_length) for key in ['_stop', '_temp', '_attention', '_score', '_sum_log_probability', '_text', '_sent_length'] ] (fig, ax) = plot.subplots(1, figsize=(6, 6), gridspec_kw={'left': 0, 'right': 1, 'bottom': 0, 'top': 1}) for num_report in range(len(_text_length)): num = num_batch * FLAGS.batch_size + num_report item = dataset.df.iloc[int(item_index[num_report])] image_path = mimic_cxr.image_path(dicom_id=item.dicom_id) sentences = [] for num_sentence in range(_text_length[num_report]): beams = [] for num_beam in range(FLAGS.beam_size): num_words = _sent_length[num_report, num_sentence, num_beam] _sentence = _text[num_report, num_sentence, num_beam] _words = Dataset.index_to_word[to_numpy(_sentence[:num_words])] texts = [] for num_word in range(num_words): _attention_path = os.path.join(image_dir, f'{num}-{num_sentence}-{num_beam}-{num_word}.png') print(_attention_path) # texts.append(span(_words[num_word])) _a = _attention[num_report, num_sentence, num_beam, num_word, :FLAGS.image_size * FLAGS.image_size] _a_sum = _a.sum() _a = _a - _a.min() _a = _a / _a.max() _a = _a.reshape(FLAGS.image_size, FLAGS.image_size) _a = F.interpolate(_a[None, None, :], scale_factor=(32, 32), mode='bilinear', align_corners=False)[0, 0] ''' ax.contourf(to_numpy(_a), cmap='gray') ax.set_axis_off() fig.savefig(_attention_path, bbox_inches=0) ax.cla() ''' _a = 255 * to_numpy(_a) PIL.Image.fromarray(_a.astype(np.uint8)).save(_attention_path) texts.append(span(_words[num_word], **{ 'data-toggle': 'tooltip', 'title': ( p('Total attention: ' + to_str(_a_sum)) + img(src=f'http://monday.csail.mit.edu/xiuming{_attention_path}', width='128', height='128') ).replace('"', '\''), })) beams.append({ 'score': to_str(_score[num_report, num_sentence, num_beam]), 'log_prob': to_str(_sum_log_probability[num_report, num_sentence, num_beam]), 'text': str('\n'.join(texts)), }) sentence = {} if mode & Mode.use_label_ce: sentence.update({ 'stop': to_str(_stop[num_report, num_sentence]), 'temp': to_str(_temp[num_report, num_sentence]), 'labels': [{ 'label': label_col, 'prob': to_str(_l), } for (label_col, _l) in zip(Dataset.label_columns, _label[num_report, num_sentence])], }) if mode & Mode.gen_text: sentence.update({ 'beams': beams, }) sentences.append(sentence) reports.append({ 'rad_id': str(dataset.df.rad_id.iloc[item_index[num_report]]), 'dicom_id': str(dataset.df.dicom_id.iloc[item_index[num_report]]), 'image': str(img(src=f'http://monday.csail.mit.edu/xiuming{image_path}', width='256')), 'generated text': sentences, 'ground truth text': converter(batch['text'][num_report], batch['sent_length'][num_report], batch['text_length'][num_report], is_eos=False)[0], }) if num_batch == 0: break s = json2html.convert(json=reports, table_attributes='class="table table-striped"', escape=False) s = ( head([ link(rel='stylesheet', href='https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css'), script(src='https://cdnjs.cloudflare.com/ajax/libs/jquery/3.3.1/jquery.min.js'), script(src='https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js'), script('$(function () { $(\'[data-toggle="tooltip"]\').tooltip({placement: "bottom", html: true}); })', type="text/javascript"), ]) + body(div(div(div(HTML(s), _class='panel-body'), _class='panel panel-default'), _class='container-fluid')) ) with open(os.path.join(working_dir, 'index.html'), 'w') as f: f.write(s)