def __init__(self, ignore=[regexp(r'.*\.git.*'), regexp(r'.*\.DS_Store$')], mapping=[], host=None, user=None, password=None, rootdir=None): self.ignore = ignore self.PATH_MAPPING = mapping self.host = host self.user = user self.password = password self.root = rootdir self.observer = None self.stream = None self.sf = None self.connected = False self.exit = False try: self.connect() except Exception as e: logging.critical("SSH Could not connect!") logging.critical(str(e)) exit(1)
def doador_2008(cnpj_ou_cpf): u''' Consulta o CNPJ ou CPF informado na página de doadores da campanha de 2008 e retorna uma lista contendo os campos em "doador_2008.campos". Exemplo: >>> campos = doador_2008('00000000000191') >>> campos is not None True >>> len(campos) == len(doador_2008.campos) True ''' pessoa = pessoa_or_valueerror(cnpj_ou_cpf) scraper = Scraper() # primeiro verifica se a pessoa foi doadora url = 'http://www.tse.jus.br/spce2008ConsultaFinanciamento/lovPesquisaDoador.jsp' scraper.open(url) scraper.browser.form['cdCpfCnpjDoador'] = pessoa.plain() scraper.browser.form.find_control(name='acao').readonly = False scraper.browser.form['acao'] = 'pesquisar' scraper.submit() if scraper.html.find('font', text=regexp('A pesquisa n.o retornou resultado.')): return None # e pega a lista de quem recebeu url = 'http://www.tse.jus.br/spce2008ConsultaFinanciamento/inicioServlet.do?acao=candidato' scraper.open(url) scraper.browser.form.find_control(name='cdCpfCnpjDoador').readonly = False scraper.browser.form['cdCpfCnpjDoador'] = pessoa.plain() scraper.browser.form.find_control(name='acao').readonly = False scraper.browser.form['acao'] = 'resumo' scraper.submit() url = 'http://www.tse.jus.br/spce2008ConsultaFinanciamento/listaReceitaCand.jsp' scraper.open(url) td = scraper.html.find('td', attrs={'class':'Left'}) fields = [] while True: try: # vai percorrendo os campos <td> e pegando o conteúdo items = [c for c in td.contents if isinstance(c, basestring)] s = ''.join(items) s = s.strip() fields.append(s.encode('utf8')) td = td.nextSibling.nextSibling except AttributeError: break return fields
def doador_2004(cnpj_ou_cpf): u''' Retorna uma tabela com as doações desta pessoa (cnpj_ou_cpf). A tabela é uma lista de listas, cada uma contendo os campos em "doador_2004.campos". >>> tabela = doador_2004('85.907.012/0001-57') >>> tabela is not None True >>> len(tabela) 16 >>> len(tabela[0]) == len(doador_2004.campos) True URL: http://www.tse.gov.br/internet/eleicoes/2004/prest_blank.htm ''' pessoa = pessoa_or_valueerror(cnpj_ou_cpf) scraper = Scraper() url = 'http://www.tse.gov.br/sadEleicao2004Prestacao/spce/index.jsp' scraper.open(url) scraper.browser.select_form(name='formDoador') scraper.browser.form.find_control(name='nome').readonly = False scraper.browser.form.find_control(name='numero').readonly = False scraper.browser.form['numero'] = pessoa.plain() scraper.browser.form['nome'] = '%' try: scraper.submit() except: return None if not scraper.html.find(text=regexp('Valor Total de Fornecimento')): return None table = scraper.html.findAll('table')[-1] lines = [] for tr in table.findAll('tr')[1:-1]: columns = [] for td in tr.findAll('td'): try: contents = td.b.contents except: contents = td.contents content = ' '.join(contents).strip() text = html2unicode(content) columns.append(text) lines.append(columns) return lines
def doador_2006(cnpj_ou_cpf): u''' Retorna uma tabela com as doações desta pessoa (cnpj_ou_cpf). A tabela é uma lista de listas, cada uma contendo os campos em "doador_2006.campos". >>> tabela = doador_2006('181.929.206-15') >>> tabela is not None True >>> len(tabela) 1 >>> len(tabela[0]) == len(doador_2006.campos) True URL: http://www.tse.gov.br/internet/eleicoes/2006/prest_contas_blank.htm ''' pessoa = pessoa_or_valueerror(cnpj_ou_cpf) scraper = Scraper() url = 'http://www.tse.gov.br/sadSPCE06F3/faces/careceitaByDoador.jsp' scraper.open(url) scraper.browser.form['frmByDoador:cdCpfCgc'] = pessoa.plain() scraper.submit(name='frmByDoador:_id4') strong = scraper.html.find('strong', text=regexp('.*prestadas pelo doador.*')) if strong is None: return None table = strong.parent.parent.parent.parent table = table.nextSibling.nextSibling.nextSibling.nextSibling lines = [] for tr in table.tbody.findAll('tr'): columns = [] for td in tr.findAll('td'): content = td.contents[0].strip() text = html2unicode(content) columns.append(text) lines.append(columns) return lines
"""Task that analyzes dependencies.""" import datetime from re import compile as regexp from selinon import FatalTaskError import urllib.parse from f8a_worker.base import BaseTask from f8a_worker.errors import TaskError from f8a_worker.schemas import SchemaRef from f8a_worker.solver import get_ecosystem_solver from f8a_worker.utils import json_serial gh_dep = regexp('@?[\w-]+/[\w-]+') class DependencySnapshotTask(BaseTask): """Task that analyzes dependencies.""" _analysis_name = 'dependency_snapshot' schema_ref = SchemaRef(_analysis_name, '1-0-0') def _collect_dependencies(self): """Return all dependencies for current analysis flow (operates on parent mercator result). :return: List[str], list of dependencies """ wr = self.parent_task_result('metadata') if not isinstance(wr, dict): raise TaskError( 'metadata task result has unexpected type: {}; expected dict'.
def FeedShow (macro, Date, Tail, NameDb=DATABASE): from re import match as regexp from datetime import datetime import sqlite3 as db from news2.utils import date2rus, DATE_FMT FeedsCon = None FeedPage = [] f = macro.formatter BaseQuery = "SELECT Title,Link,Published FROM News WHERE Publish=1" DateLimit = " AND Published GLOB '%s*'" % Date DataQuery = BaseQuery if Date: if regexp(r"^[0-9]{4}-[0-1][0-9]-[0-3][0-9]$", Date): DataQuery = DataQuery + DateLimit else: if regexp(r"^@.*$", Date): Tail = NORM_LIMIT Date = None if 1 > Tail > MAX_LIMIT: Tail = MAX_LIMIT RowLimit = " ORDER BY Published DESC LIMIT %d" % Tail DataQuery = DataQuery + RowLimit try: FeedsCon = db.connect(NameDb) FeedsCon.row_factory = db.Row FeedsCur = FeedsCon.cursor() FeedsCur.execute(DataQuery) Feeds = FeedsCur.fetchall() FFeed = True for feed in Feeds: if not FFeed: FeedPage.append(f.rule()) else: FFeed = False if Date == None: Date = feed["Published"][:10] FeedPage.append(f.big(True)+f.strong(True)+f.pagelink(True,pagename=PAGE_LINK+'/'+Date) + f.text(date2rus(datetime.strptime(Date,DATE_FMT).date())) + f.url(False)+f.strong(False)+f.big(False)) FeedPage.append(f.heading(True,4)) FeedPage.append(f.heading(False,4)) FeedPage.append(f.paragraph(True)) FeedPage.append(f.url(True,href=feed["Link"])+f.icon("www")+f.text(feed["Title"])+f.url(False)) if not FFeed: FeedPage.append(f.paragraph(False)) except: FeedPage = "" finally: if FeedsCon: FeedsCon.close() return "".join(FeedPage)
def construct_velocity_tree_py(X1, X2): n = X1.shape[1] # merge two data with a given time t = 0.5 X_all = np.hstack((X1, X1 + t * X2)) # parameter settings maxIter = 20 eps = 1e-3 sigma = 0.001 gamma = 10 # run DDRTree algorithm W, Z, stree, Y, R, history = DDRTree_py( X_all, maxIter=maxIter, eps=eps, sigma=sigma, gamma=gamma ) # draw velocity figure # quiver(Z(1, 1: 100), Z(2, 1: 100), Z(1, 101: 200)-Z(1, 1: 100), Z(2, 101: 200)-Z(2, 1: 100)); # plot(Z(1, 1: 100), Z(2, 1: 100), 'ob'); # plot(Z(1, 101: 200), Z(2, 101: 200), 'sr'); G = stree sG = remove_velocity_points(G, n) tree = sG row = [] col = [] val = [] for i in range(sG.shape[0]): for j in range(sG.shape[1]): if sG[i][j] != 0: row = row + [i] col = col + [j] val = val + [sG[1][j]] tree_fname = "tree.csv" # write sG data to tree.csv ####### branch_fname = "branch.txt" cmd = "python extract_branches.py" + tree_fname + branch_fname branch_cell = [] fid = open(branch_fname, "r") tline = next(fid) while isinstance(tline, str): path = re.regexp(tline, "\d*", "Match") ############ branch_cell = branch_cell + [path] ################# tline = next(fid) fid.close() dG = np.zeros((n, n)) for p in range(len(branch_cell)): path = branch_cell[p] pos_direct = 0 for bp in range(len(path)): u = path(bp) v = u + n # find the shorest path on graph G(works for trees) nodeid = u ve_nodeid = v shortest_mat = shortest_path( csgraph=G, directed=False, indices=nodeid, return_predecessors=True ) velocity_path = [] while ve_nodeid != nodeid: velocity_path = [shortest_mat[nodeid][ve_nodeid]] + velocity_path ve_nodeid = shortest_mat[nodeid][ve_nodeid] velocity_path = [shortest_mat[nodeid][ve_nodeid]] + velocity_path ###v_path = G.Nodes.Name(velocity_path) # check direction consistency between path and v_path valid_idx = [] for i in velocity_path: if i <= n: valid_idx = valid_idx + [i] if len(valid_idx) == 1: # compute direction matching if bp < len(path): tree_next_point = Z[:, path(bp)] v_point = Z[:, v] u_point = Z[:, u] angle = calculate_angle(u_point, tree_next_point, v_point) angle = angle / 3.14 * 180 if angle < 90: pos_direct = pos_direct + 1 else: tree_pre_point = Z[:, path(bp - 1)] v_point = Z[:, v] u_point = Z[:, u] angle = calculate_angle(u_point, tree_pre_point, v_point) angle = angle / 3.14 * 180 if angle > 90: pos_direct = pos_direct + 1 else: if bp < len(path): if path[bp + 1] == valid_idx[2]: pos_direct = pos_direct + 1 else: if path[bp - 1] != valid_idx[2]: pos_direct = pos_direct + 1 neg_direct = len(path) - pos_direct print( "branch=" + str(p) + ", (" + path[0] + "->" + path[-1] + "), pos=" + pos_direct + ", neg=" + neg_direct + "\n" ) print(path) print("\n") if pos_direct > neg_direct: for bp in range(len(path) - 1): dG[path[bp], path[bp + 1]] = 1 else: for bp in range(len(path) - 1): dG[path(bp + 1), path(bp)] = 1 # figure; # plot(digraph(dG)); # title('directed graph') figure; hold on; row = [] col = [] for i in range(dG.shape[0]): for j in range(dG.shape[1]): if dG[i][j] != 0: row = row + [i] col = col + [j] for tn in range(len(row)): p1 = Y[:, row[tn]] p2 = Y[:, col[tn]] dp = p2 - p1 h = plt.quiver( p1(1), p1(2), dp(1), dp(2), "LineWidth", 5 ) ###############need to plot it set(h, "MaxHeadSize", 1e3, "AutoScaleFactor", 1) ############# for i in range(n): plt.text(Y(1, i), Y(2, i), str(i)) ############## plt.savefig("./results/t01_figure3.fig") ##################
def add_regexps(self, regexps): for re in regexps: if isinstance(re, str): self.url_regexps.append(regexp(re)) else: self.url_regexps.append(re)
from re import compile as regexp import os from leapp.libraries.stdlib import run _current_boot_matcher = regexp(r'BootCurrent: (?P<boot_current>([0-9A-F]*))') _next_boot_matcher = regexp(r'BootNext: (?P<boot_next>([0-9A-F]*))') def get_current_boot_match(string): match = _current_boot_matcher.match(string) if not match: return None captured_groups = match.groupdict() return captured_groups['boot_current'] def get_next_boot_match(string): match = _next_boot_matcher.match(string) if not match: return None captured_groups = match.groupdict() return captured_groups['boot_next'] def maybe_emit_updated_boot_entry(): if not os.path.exists('/sbin/efibootmgr'): return efi_info = run(['/sbin/efibootmgr'], checked=False, split=True) if efi_info['exit_code'] != 0:
from requests import get from json import loads from re import compile as regexp from xml.etree import ElementTree as ET # ... call home! # Bunch of tokens that we're going to handle specially, the last # rule matches all letters, in such case lexicographic comparison is # used. In other cases the definition order of the token is used. __tokens = (r'rc', r'pre', r'beta', r'alpha', '[a-zA-Z]*',) # Select number from a string __number = regexp(r'(\d*)') # Split by dots (.), dashes (-) and whatever we define in `__tokens` __splitter = regexp(r'\.|-|({})'.format('|'.join(__tokens))).split def __split(data): """ Split the data using `__splitter` and filter out empties """ return list(filter(None, __splitter(data))) def __parse_vc(n): """ Parse version component """ # Already an int, return if isinstance(n, int): return n # This part is one of our named `__tokens` so sort them # from left-right (hence the -1.0) if n in __tokens: return -1.0 * (__tokens.index(n) + 1)
def check_encode_format(string): """Checks using regexp whether a string fits the encoding format""" return regexp(ENCODE_RE).match(string) is not None
"Convert a Turtle test type into an LD Patch test type." # exceptions entry_name = unicode(entry_name) if entry_name == "turtle-eval-bad-04": return NS.NegativeSyntaxTest # because bad IRIs are forbidden by the LD Patch syntax # else return { RDFT.TestTurtleEval: NS.PositiveEvaluationTest, RDFT.TestTurtleNegativeEval: NS.NegativeEvaluationTest, RDFT.TestTurtlePositiveSyntax: NS.PositiveSyntaxTest, RDFT.TestTurtleNegativeSyntax: NS.NegativeSyntaxTest, }[etype] STRIP_COMMENT = regexp(r'(?<!\\)#[^">]*$|^#.*$') def ttl2patch(iri, revext="", command="Add"): "Convert a ttl file into an ldpatch file." ret = URIRef(iri.replace(".ttl", "{}.ldpatch".format(revext))) ipath = i2p(iri) opath = i2p(ret) with open(opath, "w") as ofile: with open(ipath) as ifile: for line in ifile: if line.startswith("@prefix"): ofile.write(line) ofile.write("{} {{\n".format(command)) with open(ipath) as ifile: for line in ifile: if not line.startswith("@prefix"):
from inspect import getargspec, stack from re import compile as regexp from struct import unpack from uuid import uuid4 from django.conf import settings ValidEmail = regexp( r"(?i)(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"' r')@(?:[A-Z0-9-]+\.)+[A-Z]{2,6}$' ) def choices( choices ): """Constructs a tuple of choices tuples for the specified choices.""" return tuple( [ ( str( choice ), str( choice ).capitalize() ) for choice in choices ] ) def identify_remote_ip( request ): """Identifies the remote ip of the specified request.""" return request.META.get( 'REMOTE_ADDR' ) def trace_stack( indent = '' ): """Traces the stack at the current source line.""" lines = [] for frame, filename, lineno, context, source, pos in reversed( stack()[ 1: ] ): lines.append( '%sfile "%s", line %d, in %s' % ( indent, filename, lineno, context ) ) if source: