def item_find(x, idx=0): "Recursively takes the `idx`-th element of `x`" if is_listy(x): return item_find(x[idx]) if isinstance(x, dict): key = list(x.keys())[idx] if isinstance(idx, int) else idx return item_find(x[key]) return x
def detuplify_pg(d): res = {} for k,v in d.items(): if k == 'params': continue if is_listy(v): res.update(**{f'{k}__{i}': v_ for i,v_ in enumerate(v)}) else: res[k] = v return res
def apply(func, x, *args, **kwargs): "Apply `func` recursively to `x`, passing on args" if is_listy(x): return type(x)([apply(func, o, *args, **kwargs) for o in x]) if isinstance(x, dict): return {k: apply(func, v, *args, **kwargs) for k, v in x.items()} res = func(x, *args, **kwargs) return res if x is None else retain_type(res, x)
def __init__(self, start_lr=1e-7, end_lr=10, num_it=100, stop_div=True): if is_listy(start_lr): self.scheds = { 'lr': [SchedExp(s, e) for (s, e) in zip(start_lr, end_lr)] } else: self.scheds = {'lr': SchedExp(start_lr, end_lr)} self.num_it, self.stop_div = num_it, stop_div self.skip_batch = False
def maybe_item(o): '''extract scalar values from a tensor, lists and dicts of tensors (and pulling it out of gpu/tpu into cpu) else if not tensor just use orig value''' if isinstance(o, torch.Tensor): return o.item() if is_listy(o): kls = o.__class__ k = [maybe_item(i) for i in o] return kls(k) if isinstance(o, dict): return {k: maybe_item(v) for k, v in o.items()} # maybe scalar or object return o
def __init__(self, emb_szs, n_cont, out_sz, layers, ps=None, embed_p=0., y_range=None, use_bn=True, bn_final=False, bn_cont=True, act_cls=nn.ReLU(inplace=True)): ps = ifnone(ps, [0] * len(layers)) if not is_listy(ps): ps = [ps] * len(layers) self.embeds = nn.ModuleList([Embedding(ni, nf) for ni, nf in emb_szs]) self.emb_drop = nn.Dropout(embed_p) self.bn_cont = nn.BatchNorm1d(n_cont) if bn_cont else None n_emb = sum(e.embedding_dim for e in self.embeds) self.n_emb, self.n_cont = n_emb, n_cont sizes = [n_emb + n_cont] + layers + [out_sz] actns = [act_cls for _ in range(len(sizes) - 2)] + [None] _layers = [LinBnDrop(sizes[i], sizes[i + 1], bn=use_bn and (i != len(actns) - 1 or bn_final), p=p, act=a, lin_first=True) for i, (p, a) in enumerate(zip(ps + [0.], actns))] if y_range is not None: _layers.append(SigmoidRange(*y_range)) self.layers = nn.Sequential(*_layers)
def show_training_loop(self: Learner, verbose: bool = False, cbs: Union[None, list, Callback] = None): "Show each step in the training loop, potentially with Callback event descriptions" if cbs is not None: self.add_cbs(cbs) if is_listy(cbs) else self.add_cbs(listify(cbs)) indent = 0 for s in _loop: if s.startswith('Start'): print(f'{" "*indent}{s}') indent += 3 elif s.startswith('End'): indent -= 3 print(f'{" "*indent}{s}') else: if not verbose: print(f'{" "*indent} - {s}:', self.ordered_cbs(s)) else: print(f'{" "*indent} - {s}:') for cb in self.ordered_cbs(s): _print_cb(cb, s, indent) if cbs is not None: self.remove_cbs(cbs) if is_listy(cbs) else self.remove_cbs( listify(cbs))
def to_concat(xs, dim=0): "Concat the element in `xs` (recursively if they are tuples/lists of tensors)" if not xs: return xs if is_listy(xs[0]): return type(xs[0])( [to_concat([x[i] for x in xs], dim=dim) for i in range_of(xs[0])]) if isinstance(xs[0], dict): return { k: to_concat([x[k] for x in xs], dim=dim) for k in xs[0].keys() } #We may receive xs that are not concatenable (inputs of a text classifier for instance), # in this case we return a big list try: return retain_type(torch.cat(xs, dim=dim), xs[0]) except: return sum([ L( retain_type( o_.index_select(dim, tensor(i)).squeeze(dim), xs[0]) for i in range_of(o_)) for o_ in xs ], L())
def to_list(o): "return item o as a list (unchanged if o is already a list and empty list if o is None)" return [] if o is None else [o] if not is_listy(o) else o
try: result["Fiscal_Responsavel"] = id2users.get(int(value), value) except ValueError: pass if (value := result.get("Fiscais", None)) is not None: try: result["Fiscais"] = [id2users.get(int(v), v) for v in result["Fiscais"]] except ValueError: pass users = list(users2id.keys()) result["Users"] = users for f in JSON_FIELDS: if (field := result.get(f)) is None: result[f] = "" continue if is_listy(field): result[f] = [view_string(s) for s in field] else: result[f] = view_string(field) if journal := list(issue.journals): journal = dict(list(issue.journals)[-1]) key = "user" else: journal = dict(issue) key = "author" user = journal[key]["name"] date = datetime.strptime(journal["created_on"], "%Y-%m-%dT%H:%M:%SZ") - timedelta( hours=3 )