def not_found(e): page = random.randint(1, rs.url["all"]) url = (SOURCE_BASE + 'all/{}.csv').format(page) data = parser(fetch(url)) return render_template('404.html', data=data, meta=meta, val=int(time.time())), 404
def __init__(self, parent: QMainWindow) -> None: super().__init__(parent=parent) self.parent_ = parent self._init_window_() self._add_component_() self._event = event(self) self.setAccessibleNames() self.setAttribute(QtCore.Qt.WA_StyledBackground, True) self.setStyleSheet(parser(CSS_LOGIN))
def __init__(self, parent,uniqueId=None) -> None: # self._event = bookPreviewEvent(self) self._parent = parent self.mode =1 super().__init__() self._uid = uniqueId self._dbHelper:dbHelper = dbHelper() self.init() self.init_data() self._event = event(self) self.setAttribute(QtCore.Qt.WA_StyledBackground, True) self.setStyleSheet(parser(CSS_LOGIN))
def member(page=1, id=None, pages=rs.url["member"], base_url='member', endpoint='member'): if page > int(pages): abort(404) url = (SOURCE_BASE + base_url + '/{}.csv').format(page) data = hash_url(parser(fetch(url))) return render_template('member.html', data=data, meta=meta, id=id, pagination=gen_pagination(page, pages), endpoint=endpoint, val=int(time.time()))
def __init__(self, uniqueId:str ,parent ): """ :param uniqueId: book unique id :param parent: parent widget obj """ self._event = bookPreviewEvent(self) self._parent = parent super().__init__( event_=self._event) self._uid = uniqueId self._dbHelper:dbHelper = dbHelper() self.init() self.init_data() self.setAutoFillBackground(True) self.setAttribute(QtCore.Qt.WA_StyledBackground, True) self.setAccessibleNames() self.setStyleSheet(parser(CSS_BOOKPREVIEW))
def member_home(hash_url, page=1, id=None, base_url='source', endpoint='member_home'): if hash_url not in rs.url["source"].keys(): abort(404) if page > rs.url["source"][hash_url]: abort(404) url = (SOURCE_BASE + base_url + '/{0}/{1}.csv').format(hash_url, page) data = parser(fetch(url)) return render_template('home.html', data=data, meta=meta, id=id, pagination=gen_pagination( page, rs.url["source"][hash_url]), endpoint=endpoint, kwargs={'hash_url': hash_url}, val=int(time.time()))
def date_year_month(y, m, page=1, id=None, date=rs.url["date"], base_url='date', endpoint='date_year_month', kwargs=None): year_ok = None for year in date: if year['year'] == y: year_ok = year break if not year_ok: print("not year ok", y, date) abort(404) is_month_ok = m in year_ok['month'].keys() if not is_month_ok: print("not month ok", m) abort(404) is_page_ok = page <= year_ok['month'][m] if not is_page_ok: print("not page ok", page) abort(404) url = (SOURCE_BASE + base_url + '/{0:04d}{1:02d}/{2}.csv').format( y, m, page) data = parser(fetch(url)) return render_template('home.html', data=data, meta=meta, id=id, pagination=gen_pagination(page, year_ok['month'][m]), endpoint=endpoint, kwargs=kwargs if kwargs else { 'y': y, 'm': m }, val=int(time.time()))
from utils.rollout import real_batch, evaluate, ma_evaluate, ma_batch from utils import Transition, device import numpy as np import gym import gym.spaces import torch import torch.optim as optim import torch.nn as nn torch.utils.backcompat.broadcast_warning.enabled = True torch.set_default_tensor_type('torch.DoubleTensor') NUM_INTER = 9 args = parser.parser() print('agent type: {}'.format(args.pg_type)) env, env_name = flow_env(render=args.render, use_inflows=True, horizon=4000) ### seeding ### env.seed(args.seed) torch.manual_seed(args.seed) np.random.seed(args.seed) ############### obs_dim = env.observation_space.shape[0] act_dim = env.action_space.shape[0] tb_writer, label = log.log_writer(args) total_steps = 0 normalizer = Normalizer(obs_dim) print("simulated task: {}".format(env_name))
if __name__ == "__main__": with GearToolkitContext() as gear_context: gear_context.init_logging() ( fw_client, save_combined_output, save_binary_masks, conversion_method, input_file_path, input_file_object, work_dir, output_dir, destination_type, save_slicer_color_table, ) = parser(gear_context) main( fw_client, save_combined_output, save_binary_masks, conversion_method, input_file_path, input_file_object, work_dir, output_dir, destination_type, save_slicer_color_table, )
def setStyleSheets(self) -> None: self.setStyleSheet(parser(CSS_MAINWINDOW))
def evaluate(args): scores = dict() for model_class, tokenizer_class, model_config, pretrained_weights in MODELS: tokenizer = tokenizer_class.from_pretrained( pretrained_weights, cache_dir=args.lm_cache_path) if args.from_scratch: config = model_config.from_pretrained(pretrained_weights) config.output_hidden_states = True config.output_attentions = True model = model_class(config).to(args.device) else: model = model_class.from_pretrained(pretrained_weights, cache_dir=args.lm_cache_path, output_hidden_states=True, output_attentions=True).to( args.device) with torch.no_grad(): test_sent = tokenizer.encode('test', add_special_tokens=False) token_ids = torch.tensor([test_sent]).to(args.device) all_hidden, all_att = model(token_ids)[-2:] n_layers = len(all_att) n_att = all_att[0].size(1) n_hidden = all_hidden[0].size(-1) measure = Measure(n_layers, n_att) data = Dataset(path=args.data_path, tokenizer=tokenizer) for idx, s in tqdm(enumerate(data.sents), total=len(data.sents), desc=pretrained_weights, ncols=70): raw_tokens = data.raw_tokens[idx] tokens = data.tokens[idx] if len(raw_tokens) < 2: data.cnt -= 1 continue token_ids = tokenizer.encode(s, add_special_tokens=False) token_ids_tensor = torch.tensor([token_ids]).to(args.device) with torch.no_grad(): all_hidden, all_att = model(token_ids_tensor)[-2:] all_hidden, all_att = list(all_hidden[1:]), list(all_att) # (n_layers, seq_len, hidden_dim) all_hidden = torch.cat([all_hidden[n] for n in range(n_layers)], dim=0) # (n_layers, n_att, seq_len, seq_len) all_att = torch.cat([all_att[n] for n in range(n_layers)], dim=0) if len(tokens) > len(raw_tokens): th = args.token_heuristic if th == 'first' or th == 'last': mask = select_indices(tokens, raw_tokens, pretrained_weights, th) assert len(mask) == len(raw_tokens) all_hidden = all_hidden[:, mask] all_att = all_att[:, :, mask, :] all_att = all_att[:, :, :, mask] else: # mask = torch.tensor(data.masks[idx]) mask = group_indices(tokens, raw_tokens, pretrained_weights) raw_seq_len = len(raw_tokens) all_hidden = torch.stack([ all_hidden[:, mask == i].mean(dim=1) for i in range(raw_seq_len) ], dim=1) all_att = torch.stack([ all_att[:, :, :, mask == i].sum(dim=3) for i in range(raw_seq_len) ], dim=3) all_att = torch.stack([ all_att[:, :, mask == i].mean(dim=2) for i in range(raw_seq_len) ], dim=2) l_hidden, r_hidden = all_hidden[:, :-1], all_hidden[:, 1:] l_att, r_att = all_att[:, :, :-1], all_att[:, :, 1:] syn_dists = measure.derive_dists(l_hidden, r_hidden, l_att, r_att) gold_spans = data.gold_spans[idx] gold_tags = data.gold_tags[idx] assert len(gold_spans) == len(gold_tags) for m, d in syn_dists.items(): pred_spans = [] for i in range(measure.scores[m].n): dist = syn_dists[m][i].tolist() if len(dist) > 1: bias_base = (sum(dist) / len(dist)) * args.bias bias = [ bias_base * (1 - (1 / (len(dist) - 1)) * x) for x in range(len(dist)) ] dist = [dist[i] + bias[i] for i in range(len(dist))] if args.use_not_coo_parser: pred_tree = not_coo_parser(dist, raw_tokens) else: pred_tree = parser(dist, raw_tokens) ps = get_nonbinary_spans(get_actions(pred_tree))[0] pred_spans.append(ps) measure.scores[m].update(pred_spans, gold_spans, gold_tags) measure.derive_final_score() scores[pretrained_weights] = measure.scores if not os.path.exists(args.result_path): os.makedirs(args.result_path) with open(f'{args.result_path}/{pretrained_weights}.txt', 'w') as f: print('Model name:', pretrained_weights, file=f) print('Experiment time:', args.time, file=f) print('# of layers:', n_layers, file=f) print('# of attentions:', n_att, file=f) print('# of hidden dimensions:', n_hidden, file=f) print('# of processed sents:', data.cnt, file=f) max_corpus_f1, max_sent_f1 = 0, 0 for n in range(n_layers): print(f'[Layer {n + 1}]', file=f) print('-' * (119 + measure.max_m_len), file=f) for m, s in measure.scores.items(): if m in measure.h_measures + measure.a_avg_measures: print( f'| {m.upper()} {" " * (measure.max_m_len - len(m))} ' f'| Corpus F1: {s.corpus_f1[n] * 100:.2f} ' f'| Sent F1: {s.sent_f1[n] * 100:.2f} ', end='', file=f) for z in range(len(s.label_recalls[0])): print( f'| {s.labels[z]}: ' f'{s.label_recalls[n][z] * 100:.2f} ', end='', file=f) print('|', file=f) if s.sent_f1[n] > max_sent_f1: max_corpus_f1 = s.corpus_f1[n] max_sent_f1 = s.sent_f1[n] max_measure = m max_layer = n + 1 else: for i in range(n_att): m_att = str(i) if i > 9 else '0' + str(i) m_att = m + m_att + " " * (measure.max_m_len - len(m)) i_att = n_att * n + i print( f'| {m_att.upper()}' f'| Corpus F1: {s.corpus_f1[i_att] * 100:.2f} ' f'| Sent F1: {s.sent_f1[i_att] * 100:.2f} ', end='', file=f) for z in range(len(s.label_recalls[0])): print( f'| {s.labels[z]}: ' f'{s.label_recalls[i_att][z] * 100:.2f} ', end='', file=f) print('|', file=f) if s.sent_f1[i_att] > max_sent_f1: max_corpus_f1 = s.corpus_f1[i_att] max_sent_f1 = s.sent_f1[i_att] max_measure = m_att max_layer = n + 1 print('-' * (119 + measure.max_m_len), file=f) print(f'[MAX]: | Layer: {max_layer} ' f'| {max_measure.upper()} ' f'| Corpus F1: {max_corpus_f1 * 100:.2f} ' f'| Sent F1: {max_sent_f1 * 100:.2f} |') print( f'[MAX]: | Layer: {max_layer} ' f'| {max_measure.upper()} ' f'| Corpus F1: {max_corpus_f1 * 100:.2f} ' f'| Sent F1: {max_sent_f1 * 100:.2f} |', file=f) return scores