def coalesce_conditions(cond_exprs, action_stmts): cond_exprs = (as_list(cond_exprs) + as_list(s.implicit_cond_expr() for s in as_iter(action_stmts))) if not cond_exprs: return None else: ce = cond_exprs[0] for nextce in cond_exprs[1:]: if nextce: ce = ce.coalesced_with(nextce) return ce
def remove_hop(self, hops: Hops): for hop in as_list(hops): node1 = hop.from_node node2 = hop.to_node self.g.nodes[node1]['hops'].remove(hop) self.g.nodes[node2]['hops'].remove(hop.reverse()) self.g.remove_edge(node1, node2, hop.key)
def adjust_deltas(self, g: Graph, deltas: Iterable[Delta]) \ -> Iterable[Delta]: deltas = as_list(deltas) max_delta = max((delta.amt for delta in deltas), default=0) min_delta = min((delta.amt for delta in deltas), default=0) return deltas # TODO
def apply_deltas(self, g: Graph, old_d: ADict, initial_d: ADict, deltas: Iterable[Delta]) -> ADict: '''Returns a new ADict, resulting from applying 'deltas' to 'initial_d', possibly considering the activations from the previous timestep, in 'old_d'.''' """ new_d: ADict = defaultdict(float, initial_d) amts: List[float] = [] #for delta in deltas: for delta in sorted(deltas, key=attrgetter('amt')): new_d[delta.node] += delta.amt print(short(delta)) # LOGGING amts.append(delta.amt) # LOGGING if amts: # LOGGING print(f' mean={mean(amts):1.8f} hmean={harmonic_mean(amts):1.8f} gmean={geometric_mean(amts):1.8f} median={median(amts):1.8f}') print('APDOLD', new_d) return new_d # TODO clip """ new_d: ADict = dict(initial_d) deltas: List[Delta] = as_list(deltas) with logging(LogAdjustedDeltas, deltas): for delta in deltas: new_d[delta.node] = self.clip_a( g, delta.node, initial_d.get(delta.node, 0.0) + delta.amt) return new_d
def test_vma(self) -> None: # Regression test for a bug: When a canvas cell was painted over # after a VariantMakerFromAvails was created using it as a source, # the VariantMakerFromAvails would produce an invalid Consumer # (missing an operand). fm = FARGModel( slipnet=Slipnet(Graph.with_features([VariantMakerFromAvails()]))) ca = fm.build(StepCanvas([Step([4, 5, 6])])) cr0 = CellRef(ca, 0) cr1 = CellRef(ca, 1) cr1.paint(Step([5, 2], StepDelta([6, 4], [2], '-'))) # Consumer(12+5) will snag: 5 is avail but 12 is unavail co1 = fm.build(Consumer(operator=plus, operands=(12, 5), source=cr1)) fm.run_agent(co1, num=2) vma: VariantMakerFromAvails = fm.the( VariantMakerFromAvails) # type: ignore[assignment] assert vma, 'Consumer did not produce a VariantMakerFromAvails' self.assertEqual(vma.agent, co1) # Some other agent paints over cr1, removing the avail 5 cr1.paint(Step([4, 11], StepDelta([6, 5], [11], '+'))) #lenable(Agent, Codelet, Fizzle) fm.run_agent(vma) #ldisable_all() co2: Any = fm.built_by(vma)[0] assert isinstance(co2, Consumer), \ f'built by {short(co2)}, not a Consumer' self.assertEqual(len(as_list(co2.operands)), 2) fm.run_agent(co2) # Check for crash lp: Any = fm.built_by(co2)[0] self.assertIsInstance(lp, LitPainter)
def maximum_entropy_discretize(indata, includevars=None, excludevars=[], numbins=3): """Performs a maximum-entropy discretization of data in-place. Requirements for this implementation: 1. Try to make all bins equal sized (maximize the entropy) 2. If datum x==y in the original dataset, then disc(x)==disc(y) For example, all datapoints with value 3.245 discretize to 1 even if it violates requirement 1. 3. Number of bins reflects only the non-missing data. Example: input: [3,7,4,4,4,5] output: [0,1,0,0,0,1] Note that all 4s discretize to 0, which makes bin sizes unequal. Example: input: [1,2,3,4,2,1,2,3,1,x,x,x] output: [0,1,2,2,1,0,1,2,0,0,0,0] Note that the missing data ('x') gets put in the bin with 0.0. """ # includevars can be an atom or list includevars = as_list(includevars) # determine the variables to discretize includevars = includevars or range(indata.variables.size) includevars = [v for v in includevars if v not in excludevars] for v in includevars: # "_nm" means "no missing" vdata = indata.observations[:,v] vmiss = indata.missing[:,v] vdata_nm = vdata[-vmiss] argsorted = vdata_nm.argsort() if len(vdata_nm): # Find bin edges (cutpoints) using no-missing binsize = len(vdata_nm)//numbins binedges = [vdata_nm[argsorted[binsize*b - 1]] for b in range(numbins)][1:] # Discretize full data. Missings get added to bin with 0.0. indata.observations[:,v] = N.searchsorted(binedges, vdata) oldvar = indata.variables[v] newvar = data.DiscreteVariable(oldvar.name, numbins) newvar.__dict__.update(oldvar.__dict__) # copy any other data attached to variable newvar.arity = numbins indata.variables[v] = newvar # if discretized all variables, then cast observations to int if len(includevars) == indata.variables.size: indata.observations = indata.observations.astype(int) return indata
def make_initial_canvases(self) -> Sequence[CanvasAble]: if isclass(self.initial_canvases): initial_canvases_f = instantiate_dataclass_from_kwargs( self.initial_canvases, self.kwargs) # TODO No infinite sequences of canvases allowed? return list(initial_canvases_f()) else: return as_list(self.initial_canvases)
def coalesced_stmts(stmts): stmts = as_list(stmts) if not stmts: return NullStmt() else: result = stmts[0] for stmt in stmts[1:]: result = result.coalesced_with(stmt) return result
def __str__(self): cl = self.__class__.__name__ os = ' '.join(str(o) for o in [self.operator] + as_list(self.operands)) # TODO Include canvas and addr xs = [os] if self.source is not None: xs.append(f'source={self.source}') if self.dest is not None: xs.append(f'dest={self.dest}') return f"{cl}({', '.join(xs)})"
def make_from(cls, c: CanvasAble, MAX_CLARITY: Optional[int] = None, INITIAL_CLARITY: Optional[int] = None) -> Canvas: if isinstance(c, Canvas): return c # TODO Update with MAX_CLARITY? # elif isinstance(c, list) # return Canvas1D(c) # TODO copy the list # elif isinstance(c, tuple): # return Canvas1D(list(c)) elif is_iter(c): #MAX_CLARITY: int = MAX_CLARITY if isinstance(MAX_CLARITY, int) if MAX_CLARITY is None: return Canvas1D(contents=as_list(c)) else: return Canvas1D(contents=as_list(c), MAX_CLARITY=MAX_CLARITY) else: raise NotImplementedError
def append( cls, ls: Union[List['Criterion'], 'Criterion', None], cs: Union[List['Criterion'], 'Criterion', None] ) -> Union[List['Criterion'], None]: if not cs: return ls elif ls is None: return cs else: print('APPEND1', ls) ls = as_list(ls) print('APPEND2', ls) for c in as_iter(cs): ls.append(c) return ls
def __call__(self, x: Union[int, None, Sequence[int]]) -> float: '''How well does x match self.targetss?''' target: List[int] = as_list(x) return max( self.try_ms(ms, target) # type: ignore[arg-type] # mypy how? for ms in permutations(self.matchers))
def avails(o) -> List[int]: #TODO try-except return as_list(o.avails)
def codelet_args(self, codelet: Codelet, agent: Optional[Agent]=None) \ -> Dict[str, Any]: codelet = self.replace_refs(codelet, as_list(agent)) return dict((param_name, self.value_for_codelet_arg(codelet, param_name, agent)) for param_name in inspect.signature(codelet.go).parameters)
def plot_wordcloud(wc, hovertextsize=32, **kwargs): ''' Plots a wordcloud object. Parameters ---------- wc: wordcloud.wordcloud.WordCloud object hovertextsize: int Size of the hovertext **kwargs: keyword arguments passed onto fig.update_layout() ''' width = wc.width height = wc.height font_family = as_list(PIL.ImageFont.truetype(wc.font_path).getname())[0] fig = go.Figure().add_layout_image( dict( x=0, sizex=width, y=0, sizey=height, xref="x", yref="y", opacity=1.0, layer="below", sizing= "stretch", # has no effect because size of image is size of entire fig below source=wc.to_image())) unnested_layout = [ dict(text=text, freq=freq, fontsize=fontsize, x=col, y=row, orientation=orientation, color=color) for (text, freq), fontsize, (row, col), orientation, color in wc.layout_ ] for word in unnested_layout: # create font object font = PIL.ImageFont.truetype(wc.font_path, word['fontsize']) # transpose font object transposed_font = PIL.ImageFont.TransposedFont( font, orientation=word['orientation']) # creating image img_grey = PIL.Image.new("L", (height, width)) draw = PIL.ImageDraw.Draw(img_grey) # calculate box size box_width, box_height = draw.textsize(word['text'], font=transposed_font) x0 = word['x'] x1 = x0 + box_width + wc.margin y0 = word['y'] y1 = y0 + box_height + wc.margin hovertext = ( '<b>Word:</b> {}<br><b>Relative Frequency:</b> {:.3f}'.format( word['text'], word['freq'])) hoverlabel = dict(bgcolor=word['color'], bordercolor=wc.background_color, font_family=font_family + ', sans-serif', font_color=wc.background_color, font_size=hovertextsize) # add filled transparent boxes with non-transparent hovering fig = fig.add_trace( go.Scatter( x=[x0, x1, x1, x0, x0], y=np.array([y0, y0, y1, y1, y0]) - 1, # -1 when using go.Image fill='toself', text=hovertext, name='', hoveron='fills', opacity=0.0, hoverlabel=hoverlabel)) fig = fig.update_layout(yaxis_showgrid=False, xaxis_showgrid=False, yaxis_zeroline=False, xaxis_zeroline=False, yaxis_range=[height, 0], yaxis_constrain='domain', yaxis_scaleanchor='x', yaxis_scaleratio=1, xaxis_range=[0, width], xaxis_constrain='domain', plot_bgcolor=wc.background_color, xaxis_showticklabels=False, yaxis_showticklabels=False, showlegend=False) return fig.update_layout(**kwargs) # example below ------------- # import re # import requests # from bs4 import BeautifulSoup # from matplotlib.colors import ListedColormap # from sklearn.feature_extraction.text import CountVectorizer # from sklearn.feature_extraction._stop_words import ENGLISH_STOP_WORDS # url = 'https://en.wikipedia.org/wiki/Grenada' # req = requests.get(url) # # available parsers: 'html.parser','lxml','html5lib' # soup = BeautifulSoup(req.text, 'lxml') # vectorizer = CountVectorizer().fit([soup.get_text()]) # counts = vectorizer.transform([soup.get_text()]) # short_english_stopwords = [w for w in ENGLISH_STOP_WORDS if len(w) <= ] # digit_words = [w for w in vectorizer.get_feature_names() if re.match('^\d{1,3}$', w) is not None] # manually_added = ['edit','retrieved','from','with','identifierswikipedia','articles'] # counts_dict = {k: v for k, v in zip(vectorizer.get_feature_names(), counts.toarray()[0]) # if k not in (set(short_english_stopwords)|set(digit_words)|set(manually_added))} # rgb = np.asarray([(191,45,47),(50,120,96),(246,209,75),(255,255,255)]) # last colour is white # cmap = ListedColormap(rgb[:3] / 255 ,name = 'grenada') # wc = WordCloud(font_path = '/Library/Fonts/Microsoft/Arial.ttf', # colormap = cmap, mode = 'RGB', prefer_horizontal = 0.2, # random_state=89860,min_font_size=8, max_words=100, background_color='black', # width = 800, height = 400).generate_from_frequencies(counts_dict) # fig = plot_wordcloud(wc) # fig = fig.update_layout(title = 'Top 100 Words on English Wikipedia Page for "Grenada" 🇬🇩', # title_font_size = 60, title_font_color = 'black', # title_font_family = 'arial', # #margin = dict(l = 80, r = 80, t = 120, b = 80) # ) # fig = fig.add_annotation(text = '<i>By Jillian Augustine, PhD. (@jill_codes)</i>', # x = 0.975, y = 0, xref = 'paper', yref = 'paper', # xanchor = 'right', yanchor = 'top', yshift = 0, # showarrow = False, font_size = 48, font_color = 'black', # font_family = 'arial', # align = 'right') # # fig.show() # # update for png # (fig.update_annotations(font_size = 16) # .update_layout(title_font_size = 20) # .write_image('eg.png')) # # update for html # (fig.update_annotations(font_size = 16) # .update_layout(title_font_size = 20) # .update_traces(hoverlabel_font_size = 16, selector = dict(type = 'scatter')) # .write_html('eg.html'))
def remove_all_hops_to(self, to_node: NodeId): '''It is not an error if there are no hops to to_node.''' for hop in as_list(self.hops_to_neighbor(to_node)): # as_list because Hop sets will change during iteration self.remove(hop)
def run(self): while self.iter_next(): db_inds = self._perm[self._cursor] for datum in as_list(self._dataiter.read(db_inds)): self._queue.put(datum)
def _remove_all_hops_to(self, nodeid: NodeId): for neighbor in as_list(self._neighbors(nodeid)): # as_list because Hop sets will change during iteration self.g.nodes[neighbor]['hops'].remove_all_hops_to(nodeid)
def _impl(data): out = [] for datum in as_list(data): out.append(transformer(datum)) return out
def on_build(self): if not self.action: self.action = Ac.as_action(as_list(self.acs) + as_list(self.post_acs), name=self.name, threshold=self.threshold)
def maximum_entropy_discretize(indata, includevars=None, excludevars=[], numbins=3): """Performs a maximum-entropy discretization of data in-place. Requirements for this implementation: 1. Try to make all bins equal sized (maximize the entropy) 2. If datum x==y in the original dataset, then disc(x)==disc(y) For example, all datapoints with value 3.245 discretize to 1 even if it violates requirement 1. 3. Number of bins reflects only the non-missing data. Example: input: [3,7,4,4,4,5] output: [0,1,0,0,0,1] Note that all 4s discretize to 0, which makes bin sizes unequal. Example: input: [1,2,3,4,2,1,2,3,1,x,x,x] output: [0,1,2,2,1,0,1,2,0,0,0,0] Note that the missing data ('x') gets put in the bin with 0.0. """ # includevars can be an atom or list includevars = as_list(includevars) # determine the variables to discretize includevars = includevars or range(indata.variables.size) includevars = [v for v in includevars if v not in excludevars] for v in includevars: # "_nm" means "no missing" vdata = indata.observations[:, v] vmiss = indata.missing[:, v] vdata_nm = vdata[-vmiss] argsorted = vdata_nm.argsort() if len(vdata_nm): # Find bin edges (cutpoints) using no-missing binsize = len(vdata_nm) // numbins binedges = [ vdata_nm[argsorted[binsize * b - 1]] for b in range(numbins) ][1:] # Discretize full data. Missings get added to bin with 0.0. indata.observations[:, v] = N.searchsorted(binedges, vdata) oldvar = indata.variables[v] newvar = data.DiscreteVariable(oldvar.name, numbins) newvar.__dict__.update( oldvar.__dict__) # copy any other data attached to variable newvar.arity = numbins indata.variables[v] = newvar # if discretized all variables, then cast observations to int if len(includevars) == indata.variables.size: indata.observations = indata.observations.astype(int) return indata
def parse(code, predefs=None, debug=False): result = as_list(predefs) for i in parser.parse(code, tracking=True, debug=debug): for item in as_iter(i): result.append(item) return result