def captionFastqc(test, status): passfail = 1 if status == "PASS" else 0 caption = opts["qcCaptions"][test][passfail] if len(caption) == 0: return "" if status == "PASS": ans = htmltag.div( htmltag.HTML( htmltag.div( htmltag.HTML( htmltag.p( htmltag.HTML( htmltag.span( _class="ui-icon ui-icon-info", style="float: left; margin-right: .3em;", ) + caption))), _class="ui-state-highlight ui-corner-all", style="margin-top: 20px; padding: 0 .7em;", )), _class="ui-widget", ) else: ans = htmltag.div( htmltag.HTML( htmltag.div( htmltag.HTML( htmltag.p( htmltag.HTML( htmltag.span( _class="ui-icon ui-icon-info", style="float: left; margin-right: .3em;", ) + caption))), _class="ui-state-error ui-corner-all", style="margin-top: 20px; padding: 0 .7em;", )), _class="ui-widget", ) return ans
def _header_entry(odd, data): cl = "" if odd: cl = "odd" return div(span(data), _class=cl)
def __call__(self, environ, start_response): path = environ['PATH_INFO'].strip('/') if not path: start_response('200 OK', [('Content-Type', 'text/html')]) return [ '%s%s' % ( '<!doctype html>', str( h.html( h.head(h.meta(charset='UTF-8'), h.title('socketio echo test')), h.script(src="/static/jquery-1.6.1.min.js", type="text/javascript"), h.script(src="/static/socket.io.js", type="text/javascript"), h.script(src="/static/echo.js", type="text/javascript"), h.body( h.p(''' This is an echo test for socketio. When you enter values for 'bar' and 'baz' and push 'emit', they'll be sent over a socket, received by the server ('foo()'), and echoed back via emitting 'echo' with 'rab' and 'zab'. (no HTTP GET or POST is executed!) '''), h.hr(), h.form('bar: ', h.input(type='text', id='bar'), ' ', 'baz: ', h.input(type='text', id='baz'), h.button('Emit', type='submit'), id='foo'), h.hr(), h.p( # These will get values subbed in via javascript when rab and zab are echoed from server: 'rab: ', h.span(id='rab'), ' ', 'zab: ', h.span(id='zab')))))) ] if path.startswith('static/'): try: data = open(path).read() except Exception: return not_found(start_response) if path.endswith(".js"): content_type = "text/javascript" # we don't have any other types in this simple example, so... else: return not_found(start_response) start_response('200 OK', [('Content-Type', content_type)]) return [data] if path.startswith("socket.io"): socketio_manage(environ, {'/echo': EchoNamespace}, {}) else: return not_found(start_response)
def vis(): def to_str(x): return '{:.2f}'.format(x.item()) assert FLAGS.dataset == 'mimic-cxr' phase = Phase.test dataset = test_dataset data_loader = test_loader if FLAGS.ckpt_path: logger.info(f'Loading model from {FLAGS.ckpt_path}') load_state_dict(model, torch.load(FLAGS.ckpt_path)) log = Log() converter = SentIndex2Report(index_to_word=Dataset.index_to_word) chexpert = CheXpert() working_dir = os.path.join(FLAGS.working_dir, 'vis', datetime.datetime.now().strftime('%Y-%m-%d-%H%M%S-%f')) image_dir = os.path.join(working_dir, 'imgs') os.makedirs(image_dir) reports = [] prog = tqdm.tqdm(enumerate(data_loader), total=len(data_loader)) for (num_batch, batch) in prog: for (key, value) in batch.items(): batch[key] = value.to(FLAGS.device) losses = {} metrics = {} with torch.no_grad(): batch = model(batch, phase=Phase.test, beam_size=FLAGS.beam_size) _log = {**losses, **metrics} log.update(_log) prog.set_description(', '.join( [f'[{phase:5s}]'] + [f'{key:s}: {log[key]:8.2e}' for key in sorted(losses.keys())] )) item_index = to_numpy(batch['item_index']) _text_length = batch['_text_length'] # (num_reports,) ( _stop, # (num_reports, max_num_sentences, 1) _temp, # (num_reports, max_num_sentences, 1) _attention, # (num_repoers, max_num_sentences, beam_size, max_num_words, 65) _score, # (num_reports, max_num_sentences, beam_size) _sum_log_probability, # (num_reports, max_num_sentences, beam_size) _text, # (num_reports, max_num_sentences, beam_size, max_num_words) _sent_length, # (num_reports, max_num_sentences, beam_size) ) = [ pad_packed_sequence(batch[key], length=_text_length) for key in ['_stop', '_temp', '_attention', '_score', '_sum_log_probability', '_text', '_sent_length'] ] (fig, ax) = plot.subplots(1, figsize=(6, 6), gridspec_kw={'left': 0, 'right': 1, 'bottom': 0, 'top': 1}) for num_report in range(len(_text_length)): num = num_batch * FLAGS.batch_size + num_report item = dataset.df.iloc[int(item_index[num_report])] image_path = mimic_cxr.image_path(dicom_id=item.dicom_id) sentences = [] for num_sentence in range(_text_length[num_report]): beams = [] for num_beam in range(FLAGS.beam_size): num_words = _sent_length[num_report, num_sentence, num_beam] _sentence = _text[num_report, num_sentence, num_beam] _words = Dataset.index_to_word[to_numpy(_sentence[:num_words])] texts = [] for num_word in range(num_words): _attention_path = os.path.join(image_dir, f'{num}-{num_sentence}-{num_beam}-{num_word}.png') print(_attention_path) # texts.append(span(_words[num_word])) _a = _attention[num_report, num_sentence, num_beam, num_word, :FLAGS.image_size * FLAGS.image_size] _a_sum = _a.sum() _a = _a - _a.min() _a = _a / _a.max() _a = _a.reshape(FLAGS.image_size, FLAGS.image_size) _a = F.interpolate(_a[None, None, :], scale_factor=(32, 32), mode='bilinear', align_corners=False)[0, 0] ''' ax.contourf(to_numpy(_a), cmap='gray') ax.set_axis_off() fig.savefig(_attention_path, bbox_inches=0) ax.cla() ''' _a = 255 * to_numpy(_a) PIL.Image.fromarray(_a.astype(np.uint8)).save(_attention_path) texts.append(span(_words[num_word], **{ 'data-toggle': 'tooltip', 'title': ( p('Total attention: ' + to_str(_a_sum)) + img(src=f'http://monday.csail.mit.edu/xiuming{_attention_path}', width='128', height='128') ).replace('"', '\''), })) beams.append({ 'score': to_str(_score[num_report, num_sentence, num_beam]), 'log_prob': to_str(_sum_log_probability[num_report, num_sentence, num_beam]), 'text': str('\n'.join(texts)), }) sentence = {} if mode & Mode.use_label_ce: sentence.update({ 'stop': to_str(_stop[num_report, num_sentence]), 'temp': to_str(_temp[num_report, num_sentence]), 'labels': [{ 'label': label_col, 'prob': to_str(_l), } for (label_col, _l) in zip(Dataset.label_columns, _label[num_report, num_sentence])], }) if mode & Mode.gen_text: sentence.update({ 'beams': beams, }) sentences.append(sentence) reports.append({ 'rad_id': str(dataset.df.rad_id.iloc[item_index[num_report]]), 'dicom_id': str(dataset.df.dicom_id.iloc[item_index[num_report]]), 'image': str(img(src=f'http://monday.csail.mit.edu/xiuming{image_path}', width='256')), 'generated text': sentences, 'ground truth text': converter(batch['text'][num_report], batch['sent_length'][num_report], batch['text_length'][num_report], is_eos=False)[0], }) if num_batch == 0: break s = json2html.convert(json=reports, table_attributes='class="table table-striped"', escape=False) s = ( head([ link(rel='stylesheet', href='https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css'), script(src='https://cdnjs.cloudflare.com/ajax/libs/jquery/3.3.1/jquery.min.js'), script(src='https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js'), script('$(function () { $(\'[data-toggle="tooltip"]\').tooltip({placement: "bottom", html: true}); })', type="text/javascript"), ]) + body(div(div(div(HTML(s), _class='panel-body'), _class='panel panel-default'), _class='container-fluid')) ) with open(os.path.join(working_dir, 'index.html'), 'w') as f: f.write(s)