def _table_body(rd): row = "" rows = "" for r in rd: count = 1 row = _body_entry("operation", r[0]) for i in r[1:]: cl = "" if count % 2: cl = "odd" row += _body_entry(cl, HTML(i)) count += 1 rows += tr(HTML(row)) return tbody(HTML(rows))
def _table_header(rd): count = 1 rc = th() for i in rd: rc += th(_header_entry(count % 2, i), _class="skew") count += 1 return thead(tr(HTML(str(rc))))
def getHtmlTableFromCsvFile(filePath): """ Prepare a html table from a csv file """ mytbody = tbody() with open(filePath, 'r') as f: fReader = csv.DictReader(f, dialect=ExcelFR()) myRow = tr() for col in fReader.fieldnames: myRow = myRow.append(th(col)) mytbody = mytbody.append(myRow) for row in fReader: myRow = tr() for col in fReader.fieldnames: myRow = myRow.append(td(HTML(row[col]), _class=col)) mytbody = mytbody.append(myRow) return table(mytbody)
def to_html(results): PREAMBLE_FILE = os.getenv('LSM_PREAMBLE_FILE', "") preamble = "" methods = [ 'capabilities', 'systems', 'plugin_info', 'pools', 'job_status', 'job_free', 'iscsi_chap_auth', 'volumes', 'volume_create', 'volume_delete', 'volume_resize', 'volume_replicate', 'volume_replicate_range_block_size', 'volume_replicate_range', 'volume_enable', 'volume_disable', 'disks', 'target_ports', 'volume_mask', 'volume_unmask', 'volume_child_dependency', 'volume_child_dependency_rm', 'access_groups', 'access_groups_granted_to_volume', 'access_group_create', 'access_group_delete', 'volumes_accessible_by_access_group', 'access_groups_granted_to_volume', 'access_group_initiator_add', 'access_group_initiator_delete', 'fs', 'fs_create', 'fs_delete', 'fs_resize', 'fs_clone', 'fs_file_clone', 'fs_snapshots', 'fs_snapshot_create', 'fs_snapshot_delete', 'fs_snapshot_restore', 'fs_child_dependency', 'fs_child_dependency_rm', 'export_auth', 'exports', 'export_fs', 'export_remove' ] ch = [] row_data = [] if os.path.isfile(PREAMBLE_FILE): with open(PREAMBLE_FILE, 'r') as pm: preamble = pm.read() #Build column header for r in results: ch.append(r['SYSTEM']['ID']) # Add overall pass/fail for unit tests pass_fail = ['Overall Pass/Fail result'] for r in results: if r['META']['ec'] == '0': pass_fail.append('P') else: pass_fail.append('F') row_data.append(pass_fail) # Append on link for error log error_log = ['Error log (click +)'] for r in results: error_log.append('<a href="%s">+</a>' % ('./' + os.path.basename(r['META']['error_file']))) row_data.append(error_log) for m in methods: row = [m] for r in results: row.append(get_result(r, m)) row_data.append(row) # Build HTML text = '<!DOCTYPE html>' text += str( html( head( link(rel="stylesheet", type="text/css", href="../../test.css"), title("libStorageMgmt test results"), ), body( HTML( h1("%s Results generated @ %s") % (preamble, time.strftime("%c"))), div(table(_table_header(ch), _table_body(row_data)), _class="angled_table"), div( pre(" Legend\n" " P = Pass (Method called and returned without error)\n" " F = Fail (Method call returned an error)\n" " U = Unsupported or unable to test due to other errors\n" " * = Unable to connect to array or provider totally unsupported\n" " + = hyper link to error log"))))) return bs(text).prettify()
def vis(): def to_str(x): return '{:.2f}'.format(x.item()) assert FLAGS.dataset == 'mimic-cxr' phase = Phase.test dataset = test_dataset data_loader = test_loader if FLAGS.ckpt_path: logger.info(f'Loading model from {FLAGS.ckpt_path}') load_state_dict(model, torch.load(FLAGS.ckpt_path)) log = Log() converter = SentIndex2Report(index_to_word=Dataset.index_to_word) chexpert = CheXpert() working_dir = os.path.join(FLAGS.working_dir, 'vis', datetime.datetime.now().strftime('%Y-%m-%d-%H%M%S-%f')) image_dir = os.path.join(working_dir, 'imgs') os.makedirs(image_dir) reports = [] prog = tqdm.tqdm(enumerate(data_loader), total=len(data_loader)) for (num_batch, batch) in prog: for (key, value) in batch.items(): batch[key] = value.to(FLAGS.device) losses = {} metrics = {} with torch.no_grad(): batch = model(batch, phase=Phase.test, beam_size=FLAGS.beam_size) _log = {**losses, **metrics} log.update(_log) prog.set_description(', '.join( [f'[{phase:5s}]'] + [f'{key:s}: {log[key]:8.2e}' for key in sorted(losses.keys())] )) item_index = to_numpy(batch['item_index']) _text_length = batch['_text_length'] # (num_reports,) ( _stop, # (num_reports, max_num_sentences, 1) _temp, # (num_reports, max_num_sentences, 1) _attention, # (num_repoers, max_num_sentences, beam_size, max_num_words, 65) _score, # (num_reports, max_num_sentences, beam_size) _sum_log_probability, # (num_reports, max_num_sentences, beam_size) _text, # (num_reports, max_num_sentences, beam_size, max_num_words) _sent_length, # (num_reports, max_num_sentences, beam_size) ) = [ pad_packed_sequence(batch[key], length=_text_length) for key in ['_stop', '_temp', '_attention', '_score', '_sum_log_probability', '_text', '_sent_length'] ] (fig, ax) = plot.subplots(1, figsize=(6, 6), gridspec_kw={'left': 0, 'right': 1, 'bottom': 0, 'top': 1}) for num_report in range(len(_text_length)): num = num_batch * FLAGS.batch_size + num_report item = dataset.df.iloc[int(item_index[num_report])] image_path = mimic_cxr.image_path(dicom_id=item.dicom_id) sentences = [] for num_sentence in range(_text_length[num_report]): beams = [] for num_beam in range(FLAGS.beam_size): num_words = _sent_length[num_report, num_sentence, num_beam] _sentence = _text[num_report, num_sentence, num_beam] _words = Dataset.index_to_word[to_numpy(_sentence[:num_words])] texts = [] for num_word in range(num_words): _attention_path = os.path.join(image_dir, f'{num}-{num_sentence}-{num_beam}-{num_word}.png') print(_attention_path) # texts.append(span(_words[num_word])) _a = _attention[num_report, num_sentence, num_beam, num_word, :FLAGS.image_size * FLAGS.image_size] _a_sum = _a.sum() _a = _a - _a.min() _a = _a / _a.max() _a = _a.reshape(FLAGS.image_size, FLAGS.image_size) _a = F.interpolate(_a[None, None, :], scale_factor=(32, 32), mode='bilinear', align_corners=False)[0, 0] ''' ax.contourf(to_numpy(_a), cmap='gray') ax.set_axis_off() fig.savefig(_attention_path, bbox_inches=0) ax.cla() ''' _a = 255 * to_numpy(_a) PIL.Image.fromarray(_a.astype(np.uint8)).save(_attention_path) texts.append(span(_words[num_word], **{ 'data-toggle': 'tooltip', 'title': ( p('Total attention: ' + to_str(_a_sum)) + img(src=f'http://monday.csail.mit.edu/xiuming{_attention_path}', width='128', height='128') ).replace('"', '\''), })) beams.append({ 'score': to_str(_score[num_report, num_sentence, num_beam]), 'log_prob': to_str(_sum_log_probability[num_report, num_sentence, num_beam]), 'text': str('\n'.join(texts)), }) sentence = {} if mode & Mode.use_label_ce: sentence.update({ 'stop': to_str(_stop[num_report, num_sentence]), 'temp': to_str(_temp[num_report, num_sentence]), 'labels': [{ 'label': label_col, 'prob': to_str(_l), } for (label_col, _l) in zip(Dataset.label_columns, _label[num_report, num_sentence])], }) if mode & Mode.gen_text: sentence.update({ 'beams': beams, }) sentences.append(sentence) reports.append({ 'rad_id': str(dataset.df.rad_id.iloc[item_index[num_report]]), 'dicom_id': str(dataset.df.dicom_id.iloc[item_index[num_report]]), 'image': str(img(src=f'http://monday.csail.mit.edu/xiuming{image_path}', width='256')), 'generated text': sentences, 'ground truth text': converter(batch['text'][num_report], batch['sent_length'][num_report], batch['text_length'][num_report], is_eos=False)[0], }) if num_batch == 0: break s = json2html.convert(json=reports, table_attributes='class="table table-striped"', escape=False) s = ( head([ link(rel='stylesheet', href='https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css'), script(src='https://cdnjs.cloudflare.com/ajax/libs/jquery/3.3.1/jquery.min.js'), script(src='https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js'), script('$(function () { $(\'[data-toggle="tooltip"]\').tooltip({placement: "bottom", html: true}); })', type="text/javascript"), ]) + body(div(div(div(HTML(s), _class='panel-body'), _class='panel panel-default'), _class='container-fluid')) ) with open(os.path.join(working_dir, 'index.html'), 'w') as f: f.write(s)