def btlinesearch(f, x0, fx0, g, dx, accept_ratio, shrink_factor, max_steps, verbose=False): ''' Find a step size t such that f(x0 + t*dx) is within a factor accept_ratio of the linearized function value improvement. Args: f: the function x0: starting point for search fx0: the value f(x0). Will be computed if set to None. g: search direction, typically the gradient of f at x0 dx: the largest possible step to take accept_ratio: termination criterion shrink_factor: how much to decrease the step every iteration ''' if fx0 is None: fx0 = f(x0) t = 1. m = g.dot(dx) if accept_ratio != 0 and m > 0: util.warn('WARNING: %.10f not <= 0' % m) num_steps = 0 while num_steps < max_steps: true_imp = f(x0 + t*dx) - fx0 lin_imp = t*m if verbose: true_imp, lin_imp, accept_ratio if true_imp <= accept_ratio * lin_imp: break t *= shrink_factor num_steps += 1 return x0 + t*dx, num_steps
def invert(self, rtol, min_count_t): max_divs = 15 min_count_t = max(min_count_t, 2) divs = discrete.divs(min_count_t) if divs > max_divs: raise exc.NumericalError("min. inversion time step count (%s) and corresponding min. divs (%s) too large; max. divs: %s" % (min_count_t, divs, max_divs)) divs = max(divs-1, 0) last_res = None while True: count_t = discrete.steps(divs) res = self._integrate(rtol, count_t) diff = None if None not in (last_res, res): diff = abs(res - last_res) if diff <= rtol * abs(res): break divs += 1 if divs > max_divs: if diff is not None: util.warn("max. inversion time divs (%s) exceeded; rtol: %s; latest step count: %s; latest difference: %s (current: %s; last: %s)" % (max_divs, rtol, count_t, diff, res, last_res), stacklevel=2) break else: msg = "max. inversion time divs (%s) exceeded; rtol: %s; latest step count: %s" % (max_divs, rtol, count_t) if res is not None: util.warn(msg, stacklevel=2) break else: raise exc.NumericalError(msg) last_res = res return res
def _runHelperWait(host): output = [] while True: c = _getConnection(host) if not c: return None (stdin, stdout) = c line = stdout.readline().strip() if line == "~~~": break output += [line] try: rc = int(output[-1]) except ValueError: util.warn("cannot parse exit code from helper on %s: %s" % (host.host, output[-1])) rc = 1 util.debug(1, "exit code %d" % rc, prefix=host.host) for line in output: util.debug(2, " > %s" % line, prefix=host.host) return (rc == 0, output[:-1])
def link_configuration(cfg: Path, host_cfg: Path) -> None: """Ensure that ``cfg`` is a symlink to ``host_cfg``. :param cfg: path to ``configuration.nix`` :param host_cfg: path to ``hosts/$(hostname)-configuration.nix`` """ if cfg.is_symlink(): cfg_dest = cfg.parent / os.readlink(cfg) if cfg_dest.samefile(host_cfg): info(f"{p(cfg)} is already a symlink pointing to {p(host_cfg)}") else: warn( f"{p(cfg)} is a symlink pointing to {p(cfg_dest)}, not {p(host_cfg)}; updating it" ) if not DRY_RUN: cfg.unlink() cfg.symlink_to(host_cfg) elif cfg.exists(): info( f"{p(cfg)} already exists and is a regular file; moving it to {p(host_cfg)}" ) if host_cfg.exists(): fatal(f"{p(host_cfg)} already exists!") elif not DRY_RUN: cfg.rename(host_cfg) else: info(f"{p(cfg)} doesn't exist; creating it as a link to {p(host_cfg)}") if not DRY_RUN: cfg.symlink_to(host_cfg)
def separate_regex_simple(voteddir, regexShr, regexDiff): ballots = [] for dirpath, dirnames, filenames in os.walk(voteddir): imgnames = [f for f in filenames if util.is_image_ext(f)] shrPat = re.compile(regexShr) diffPat = re.compile(regexDiff) curmats = {} # maps {str sim_pat: [(str imgpath, str diff_pat), ...]} for imgname in imgnames: imgpath = pathjoin(dirpath, imgname) sim_match = shrPat.match(imgname) diff_match = diffPat.match(imgname) if sim_match is None or diff_match is None: warn( "Ballot {0} was skipped because it didn't " "match the regular expressions.", imgpath) continue sim_part = sim_match.groups()[0] diff_part = diff_match.groups()[0] curmats.setdefault(sim_part, []).append((imgpath, diff_part)) for sim_pat, tuples in curmats.iteritems(): # sort by diffPart tuples_sorted = sorted(tuples, key=lambda t: t[1]) imgpaths_sorted = [t[0] for t in tuples_sorted] ballots.append(imgpaths_sorted) return ballots
def update_bills_2(congress, bill_type, bill_number, recordtext, changehash, newchangehash, force_update): """Compares a THOMAS search result record to the hash file to see if anything changed, and if so, or if force_update == True, re-parses the bill or amendment.""" key = bill_type + str(bill_number) rec = md5_base64(recordtext) if not force_update and key in changehash and changehash[key] == rec: newchangehash[key] = changehash[key] return if not force_update: warn("Detected Update to %d %s %d." % (congress, bill_type, bill_number)) try: if bill_type == 'hz': #if (!ParseAmendment($bs, 'h', 'Z', $bn)) { return; } pass elif bill_type == 'sp': #if (!ParseAmendment($bs, 's', 'P', $bn)) { return; } pass else: parse_bill(congress, bill_type, bill_number) newchangehash[key] = rec except Exception as e: import traceback warn("Parsing bill %d %s %d: " % (congress, bill_type, bill_number) + unicode(e) + "\n" + traceback.format_exc())
def makeLocalNetworks(path, silent=False): netcfg = config.Config.localnetscfg if not os.path.exists(netcfg): util.warn("list of local networks does not exist in %s" % netcfg) return if ( not silent ): util.output("generating local-networks.bro ...", False) out = open(os.path.join(path, "local-networks.bro"), "w") print >>out, "# Automatically generated. Do not edit.\n" netcfg = config.Config.localnetscfg if os.path.exists(netcfg): nets = readNetworks(netcfg) print >>out, "redef Site::local_nets = {" for (cidr, tag) in nets: print >>out, "\t%s," % cidr, if tag != "": print >>out, "\t# %s" % tag, print >>out print >>out, "};\n" if ( not silent ): util.output(" done.")
def post_install(self): settings = ''' ### added by Boss ### xdebug.remote_autostart = 1 xdebug.remote_enable = 1 xdebug.remote_connect_back = 1 xdebug.remote_port = 9000 xdebug.max_nesting_level = 512 # https://www.jetbrains.com/help/phpstorm/configuring-xdebug.html#configuring-xdebug-vagrant # https://nystudio107.com/blog/using-phpstorm-with-vagrant-homestead#are-we-there-yet # This is usually 10.0.2.2 for vagrant # use this command to get the host's ip: # `netstat -rn | grep "^0.0.0.0" | tr -s " " | cut -d " " -f2` xdebug.remote_host = '10.0.2.2' ''' settings = '\n'.join([i[10:] for i in settings.split('\n')]) if self.distro == (Dist.UBUNTU, Dist.V18_04): xdebug_ini = '/etc/php/7.2/mods-available/xdebug.ini' self.append_to_file(xdebug_ini, settings) elif self.distro == (Dist.UBUNTU, Dist.V20_04): xdebug_ini = '/etc/php/7.4/mods-available/xdebug.ini' self.append_to_file(xdebug_ini, settings) else: warn('Xdebug ini edit not implemented yet for this version of Ubuntu.') self.info('Xdebug ini edited', xdebug_ini)
def isRunning(nodes, setcrashed=True): results = [] cmds = [] for node in nodes: pid = node.getPID() if not pid: results += [(node, False)] continue cmds += [(node, "check-pid", [str(pid)])] for (node, success, output) in execute.runHelperParallel(cmds): # If we cannot connect to the host at all, we filter it out because # the process might actually still be running but we can't tell. if output == None: if config.Config.cron == "0": util.warn("cannot connect to %s" % node.name) continue results += [(node, success)] if not success: if setcrashed: # Grmpf. It crashed. node.clearPID(); node.setCrashed() return results
def write_rparm(pf, name): if name not in RPARMS: util.warn('RUNTIME PARAMETER ' + name + ' NOT SET') sys.exit() datatype = RPARMS[name].datatype default = RPARMS[name].value if datatype == 'integer': if default == None: pf.write('[int] ' + name + ' = \n') else: pf.write('[int] ' + name + ' = %d\n' % default) elif datatype == 'double': if default == None: pf.write('[dbl] ' + name + ' = \n') else: pf.write('[dbl] ' + name + ' = %e\n' % default) elif datatype == 'string': if default == None: pf.write('[str] ' + name + ' = \n') else: pf.write('[str] ' + name + ' = %s\n' % default) else: print(name) util.warn("DATATYPE " + str(datatype) + " NOT RECOGNIZED") sys.exit() del RPARMS[name]
def _readNodes(self): self.nodelist = {} config = ConfigParser.SafeConfigParser() if not config.read(self.nodecfg) and not Installing: util.error("cannot read '%s'" % self.nodecfg) manager = False proxy = False standalone = False file = self.nodecfg counts = {} for sec in config.sections(): node = Node(sec) self.nodelist[sec] = node for (key, val) in config.items(sec): if not key in Node._tags: util.warn("%s: unknown key '%s' in section '%s'" % (file, key, sec)) continue if key == "type": # We determine which types are valid by checking for having an # option specifying which scripts to use for it. cfg = "scripts-%s" % val if not cfg in self.config: util.error("%s: unknown type '%s' in section '%s'" % (file, val, sec)) self.nodelist[sec].scripts = self.config[cfg].split() if val == "manager": if manager: util.error("only one manager can be defined") manager = True if val == "proxy": proxy = True if val == "standalone": standalone = True node.__dict__[key] = val try: node.addr = socket.gethostbyname(node.host) except AttributeError: util.error("%s: no host given in section '%s'" % (file, sec)) except socket.gaierror, e: util.error("%s: unknown host '%s' in section '%s' [%s]" % (file, node.host, sec, e.args[1])) # Each node gets a number unique across its type. type = self.nodelist[sec].type try: counts[type] += 1 except KeyError: counts[type] = 1 node.count = counts[type]
def __init__(self, cfgfile): self.types = {} cnt = 0 if not os.path.exists(cfgfile): if Installing: return util.error("analysis configuration %s does not exist" % cfgfile) for line in open(cfgfile): cnt += 1 line = line.strip() if not line or line.startswith("#"): continue f = line.split() if len(f) < 2: util.warn("cannot parse line %d in %s" % (cnt, cfgfile)) continue type = f[0] mechanism = f[1] descr = "" if len(f) > 2: descr = " ".join(f[2:]) self.types[type] = (mechanism, descr)
def _do_the_test(found_sigs): def multi_signature_msg(key, actual, expect): len_act = len(actual) if type(actual) is list else 1 len_exp = len(expect) if type(expect) is list else 1 return ( "multi-signature count mismatch for '{key}'. " "Actual {len_act} {actual} vs. expected {len_exp} {expect}')". format(**locals())) for key, value in sig_exists.sig_dict.items(): name = key.rsplit(".", 1)[-1] if name in ("next", "__next__"): # ignore problematic cases continue if "<" in key: # Skip over remaining crap in "<...>" continue if key.startswith("sample.SampleNamespace"): # We cannot work with sample namespaces after the change to __qualname__. continue if (key.startswith("smart.SharedPtr") or re.match(r"PySide2\..*?\.QSharedPointer_", key)): # These mangled names are not supported. # We should fix them. continue if key not in found_sigs: warn("missing key: '{} value={}'".format(key, value), stacklevel=3) else: found_val = found_sigs[key] if type(value) is list and (type(found_val) is tuple or len(found_val) < len(value)): # We check that nothing got lost. But it is ok when an older # registry file does not know all variants, yet! warn(multi_signature_msg(key, found_val, value), stacklevel=3)
def isRunning(nodes, setcrashed=True): results = [] cmds = [] for node in nodes: pid = node.getPID() if not pid: results += [(node, False)] continue cmds += [(node, "check-pid", [str(pid)])] for (node, success, output) in execute.runHelperParallel(cmds): # If we cannot connect to the host at all, we filter it out because # the process might actually still be running but we can't tell. if output == None: if config.Config.cron == "0": util.warn("cannot connect to %s" % node.name) continue results += [(node, success)] if not success: if setcrashed: # Grmpf. It crashed. node.clearPID() node.setCrashed() return results
def _readConfig(self, file, allowstate = False): config = {} try: for line in open(file): line = line.strip() if not line or line.startswith("#"): continue args = line.split("=", 1) if len(args) != 2: util.error("%s: syntax error '%s'" % (file, line)) (key, val) = args key = key.strip().lower() val = val.strip() if not allowstate and ".state." in key: util.error("state variable '%s' not allowed in file: %s" % (key, file)) # if the key already exists, just overwrite with new value config[key] = val except IOError, e: util.warn("cannot read '%s' (this is ok on first run)" % file)
def post_reply(reply,post): global badsubs global submissioncount global totalposted try: #TODO change name #possibly remove? not gonna be nsfw reply = "#####	\n\n######	\n\n####	\n"+reply+"^Parent ^commenter ^can [^toggle ^NSFW](/message/compose?to=autowikibot&subject=AutoWikibot NSFW toggle&message=%2Btoggle-nsfw+____id____) ^or[](#or) [^delete](/message/compose?to=autowikibot&subject=AutoWikibot Deletion&message=%2Bdelete+____id____)^. ^Will ^also ^delete ^on ^comment ^score ^of ^-1 ^or ^less. ^| [^(FAQs)](http://www.np.reddit.com/r/autowikibot/wiki/index) ^| [^Mods](http://www.np.reddit.com/r/autowikibot/comments/1x013o/for_moderators_switches_commands_and_css/) ^| [^Magic ^Words](http://www.np.reddit.com/r/autowikibot/comments/1ux484/ask_wikibot/)" a = post.reply('[#placeholder-awb]Comment is being processed... It will be automatically replaced by new text within a minute or will be deleted if that fails.') postsuccess = r.get_info(thing_id='t1_'+str(a.id)).edit(reply.replace('____id____',str(a.id))) if not postsuccess: raise Exception ('reply unsuccessful') totalposted = totalposted + 1 submissioncount[str(post.submission.id)]+=1 success("[OK] #%s "%totalposted) return True except Exception as e: warn("REPLY FAILED: %s @ %s"%(e,post.subreddit)) if str(e).find('TOO_LONG') > -1: a.delete() elif str(e) == '403 Client Error: Forbidden' and str(post.subreddit) not in badsubs: badsubs = badsubs_page.content_md.strip().split() badsubs.append(str(post.subreddit)) editsummary = 'added '+str(post.subreddit) save_changing_variables(editsummary) else: fail(e) a.delete() return False
def create_test_from_file(fl, name, group, policy): txt = fl.read() fl.close() appdir = os.path.join(TESTS_DIR, group, name) if os.path.exists(appdir): if OVERWRITE: if not os.path.isdir(appdir): fatal("Unable to overwrite file: %s" % appdir) warn("Creating in existing directory: %s" % appdir) else: fatal("Not overwriting existing directory: %s" % appdir) prepare_dir(appdir) inputdir = os.path.join(appdir, 'source-input') if os.path.exists(inputdir): assert OVERWRITE if not os.path.isdir(inputdir): fatal("Unable to overwrite non-directory: %s" % inputdir) else: os.makedirs(inputdir) tgtfile = "%s.js" % name tgtpath = os.path.join(inputdir, tgtfile) tgtfl = open(tgtpath, 'w') tgtfl.write(txt) tgtfl.close()
def onPageChanging(self, evt): old = evt.GetOldSelection() if old == -1: # Don't know why these events are sometimes triggered... return try: status = self.notebook.GetPage(old).can_move_on() except ffwx.Panel.StepNotFinished as exn: ffwx.modal(self, exn.message) evt.Veto() return if not status: warn('can_move_on method for {0} should be switched to ' 'new exception-based API', self.notebook.GetPage(old)) msg = '[CHANGE ME]' ffwx.modal(self, msg) evt.Veto() return if old == util.Steps.PROJECT: self.set_project(self.notebook.GetPage(old).get_project()) try: if old >= 1: self.panels[old].stop() except ffwx.Panel.StepNotFinished as exn: ffwx.modal(self, exn.message) evt.Veto() return
def _refresh_using_refresh_token() -> bool: if not os.path.isfile('refresh_token'): util.debug("No refresh token found") return False with open('refresh_token') as f: refresh_token = f.read() token_req_body = { 'grant_type': 'refresh_token', 'refresh_token': refresh_token, 'client_id': conf.IDP_CLIENT_NAME } r = requests.post(conf.IDP_URL + "/auth/realms/master/protocol/openid-connect/token", data=token_req_body) if r.status_code == 200: resp_body = r.json() _write_tokens(resp_body['access_token'], resp_body['expires_in'], resp_body['refresh_token']) util.debug("Refreshed tokens using refresh token") return True else: util.warn("Refreshing tokens failed with status {}".format( r.status_code)) return False
def do_requrest(req_obj): data = json.dumps(req_obj).encode(encoding='utf-8') request = urllib.request.Request( url=url, data=data, headers={'contentType': "text/plain;charset=utf-8"}) string = '%s:%s' % ('zengl', '123456') base64string = base64.standard_b64encode(string.encode('utf-8')) request.add_header("Authorization", "Basic %s" % base64string.decode('utf-8')) try: u = urllib.request.urlopen(request) res = json.loads(u.read()) if res['error'] != None: raise Exception(res['error']) else: # print('result:', res['result']) return res['result'] except urllib.error.HTTPError as e: util.error(e) util.warn(e.headers) except Exception as e: util.error(e)
def component_colormap(graph): """ Colormap by strong compoments """ # automatically color by components # a list of colors in hexadecimal Red/Gree/Blue notation colors = [ ORANGE, SPRING_GREEN, GOLD, TEAL, PURPLE, NAVY, SIENNA, CRIMSON, BLUE, ] # find the strongly connected components components = component.strongly_connected_components(graph) # make sure we have as many colors as components if len(colors) < len(components): util.warn('there are more components than colors!') # create the colormap colormap = {} for color, comp in zip(colors, components): for node in comp: colormap[node] = color return colormap
def gen_internal_blob2ds(self, depth=0): livepix = set(set(self.pixels) - set(self.edge_pixels)) last_edge = set(self.edge_pixels) alldict = Pixel.pixel_ids_to_dict(livepix) edge_neighbors = set() for pixel in last_edge: edge_neighbors = edge_neighbors | set( Pixel.get(pixel).get_neighbors_from_dict( alldict)) # - set(blob2d.edge_pixels) edge_neighbors = edge_neighbors - last_edge bloomstage = livepix livepix = livepix - edge_neighbors b2ds = Blob2d.pixels_to_blob2ds( bloomstage, parent_id=self.id, recursive_depth=self.recursive_depth + 1) # NOTE making new pixels, rather than messing with existing for num, b2d in enumerate(b2ds): b2d = Blob2d.get(b2d) Blob2d.all[self.id].pixels = list( set(Blob2d.all[self.id].pixels) - set(b2d.pixels)) if len(self.pixels) < len(Blob2d.get(self.id).pixels): warn('Gained pixels!!!! (THIS SHOULD NEVER HAPPEN!)') if depth < Config.max_depth: if len(livepix) > 1: for b2d in b2ds: Blob2d.get(b2d).gen_internal_blob2ds(depth=depth + 1)
def translate(self): """Translate a section into something suitable for apache::vhost Only Directory, Files, Location, DirectoryMatch, FilesMatch, and LocationMatch are supported by apache::vhost, all mapping to the directories parameter. Return: A dictionary that can be added to the array for the directories parameter of apache::vhost """ provider = self.name.lower() if provider not in ['directory', 'files', 'location', 'directorymatch', 'filesmatch', 'locationmatch']: util.warn("warning: could not translate section {}".format(self.name)) return None result = { 'provider' : provider, 'path' : self.path, } for child in self.directives: # Not supporting sections within sections if isinstance(child, DirectiveAST): result.update(child.translate()) return result
def makeLocalNetworks(path, silent=False): netcfg = config.Config.localnetscfg if not os.path.exists(netcfg): util.warn("list of local networks does not exist in %s" % netcfg) return if (not silent): util.output("generating local-networks.bro ...", False) out = open(os.path.join(path, "local-networks.bro"), "w") print >> out, "# Automatically generated. Do not edit.\n" netcfg = config.Config.localnetscfg if os.path.exists(netcfg): nets = readNetworks(netcfg) print >> out, "redef Site::local_nets = {" for (cidr, tag) in nets: print >> out, "\t%s," % cidr, if tag != "": print >> out, "\t# %s" % tag, print >> out print >> out, "};\n" if (not silent): util.output(" done.")
def get_host(PATHS): machines = util.get_files(PATHS['MACHINE'], '*') for n in range(len(machines)): machines[n] = machines[n].split('/')[-1].replace('.py', '') try: machine = __import__(machines[n]) except ImportError: continue if machine.matches_host() == True: break del machine try: machine except NameError: util.warn("HOST " + os.uname()[1] + " UNKNOWN"); sys.exit() host = machine.get_options() # overwrite with environment variable if available for name in HOST_OPTIONS: env_name = get_env_name(name) if env_name in os.environ: util.gentle_warn( '{} environment variable overriding {} in machine file'.format(env_name, name) ) host[name] = os.environ[env_name] return host
def _autobuild(self, data, expand_limit): children = [] if os.path.islink(data): node_type = 'symlink' elif os.path.isdir(data): node_type = 'directory' try: children = os.listdir(data) children = map(lambda f,d=data: os.path.join(d,f), children) except OSError: warn("Unable to list directory '%s'" % data) children = [] else: node_type = 'file' self.configure(1,node_type,name = os.path.basename(data)) if (self.properties ^ NP_ABSTRACT) & NP_ABSTRACT: self.treewidget.addNode(self) if self.properties & NP_ABSTRACT: childprops = (self.properties ^ NP_ABSTRACT) | NP_ROOT else: childprops = (self.properties|NP_ROOT) ^ NP_ROOT if self.state & NS_EXPANDED: childstate = self.state else: childstate = ((self.state | NS_PENDING_SHOW | NS_VISIBLE) ^ (NS_PENDING_SHOW | NS_VISIBLE)) for ch in children: treenode = FSTreeNode(self.treewidget,self,ch, expand_limit-1, props=childprops, state=childstate) self.children.append(treenode) if children: self.state = self.state | NS_HAS_CHILDREN
def makeLocalNetworks(): netcfg = config.Config.localnetscfg if not os.path.exists(netcfg): if not config.Installing: util.warn("list of local networks does not exist in %s" % netcfg) return util.output("generating local-networks.bro ...", False) out = open(os.path.join(config.Config.policydirsiteinstallauto, "local-networks.bro"), "w") print >>out, "# Automatically generated. Do not edit.\n" netcfg = config.Config.localnetscfg if os.path.exists(netcfg): nets = readNetworks(netcfg) print >>out, "redef local_nets = {" for (cidr, tag) in nets: print >>out, "\t%s," % cidr, if tag != "": print >>out, "\t# %s" % tag, print >>out print >>out, "};\n" util.output(" done.")
def run_targetpages(debug=False, overwrite=False, refine=None, synonly=False, service=False, apps=None): results = RunResults('targetpages', overwrite) sites = get_lines(TARGETPAGE_FILE, comment='#') polnet = os.path.join(POLICY_DIR, 'network-isolation.policy') policies = {'network-isolation': [polnet]} for site in sites: # Limit to the given sites names, if provided. if apps is not None and site not in apps: continue # Extract the application name from the URL. app = None paramidx = site.find("?payload=") if paramidx > -1: plidx = paramidx + 9 endidx = site.find("&", plidx) if endidx == -1: endidx = len(site) app = site[plidx:endidx] warn("Appname: %s" % app) url = 'http://' + site res = run_website(url, policies, debug=debug, overwrite=overwrite, refine=refine, synonly=synonly, service=service, appname=app) # Track successful results results.add(res) # Space the output. sys.stderr.write('\n') results.printSummary()
def _runHelperWait(host): output = [] while True: c = _getConnection(host) if not c: return None (stdin, stdout) = c line = stdout.readline().strip() if line == "~~~": break output += [line] try: rc = int(output[0]) except ValueError: util.warn("cannot parse exit code from helper on %s: %s" % (host.host, output[0])) rc = 1 util.debug(1, "%-10s exit code %d" % (("[%s]" % host.host), rc)) for line in output: util.debug(2, " > %s" % line) return (rc == 0, output[1:])
def save_report(self): try: ts = time.time() # Comment scores self.c.execute('''CREATE TABLE IF NOT EXISTS comment_scores (cid TEXT, subreddit TEXT, score INTEGER, ts INTEGER)''') self.c.execute('''CREATE INDEX IF NOT EXISTS cscores_time ON comment_scores(ts)''') for cid, score in self.score_map.iteritems(): self.c.execute('''INSERT INTO comment_scores(cid, subreddit, score, ts) VALUES(?,?,?,?)''', (cid, self.subreddit_map[cid], score, ts)) self.conn.commit() # Deleted comments self.c.execute('''CREATE TABLE IF NOT EXISTS deleted_comments (cid TEXT, subreddit TEXT, score INTEGER, ts INTEGER)''') self.c.execute('''CREATE INDEX IF NOT EXISTS cscores_time ON comment_scores(ts)''') for cols in self.del_list: # I'm sure this could be done better curcols = list(cols) curcols.append(ts) self.c.execute('''INSERT INTO deleted_comments(cid, subreddit, score, ts) VALUES(?,?,?,?)''', curcols) self.conn.commit() except Exception, e: warn(e) warn("Failed to write subreddit scores")
def post_reply(reply,post): global badsubs global submissioncount global totalposted # This is a quick hack to fix the double list issue (git issue #12) # Please find the actual source of this bug, and delete this hack # It removes any sentences that are posted more than once lines = [] for line in reply.split("\n"): if line not in lines: lines.append(line) reply = '\n'.join(lines) try: reply = "#####	\n\n######	\n\n####	\n"+reply+"\n^Parent ^commenter ^can [^toggle ^NSFW](http://www.np.reddit.com/message/compose?to=autowikiabot&subject=AutoWikibot NSFW toggle&message=%2Btoggle-nsfw+____id____) ^or[](#or) [^delete](http://www.np.reddit.com/message/compose?to=autowikiabot&subject=AutoWikibot Deletion&message=%2Bdelete+____id____)^. ^Will ^also ^delete ^on ^comment ^score ^of ^-1 ^or ^less. ^| [^(FAQs)](http://www.np.reddit.com/r/autowikiabot/wiki/index) ^| [^Source](https://github.com/Timidger/autowikiabot-py)\n ^(Please note this bot is in testing. Any help would be greatly appreciated, even if it is just a bug report! Please checkout the) [^source ^code](https://github.com/Timidger/autowikiabot-py) ^(to submit bugs)" a = post.reply('[#placeholder-awb]Comment is being processed... It will be automatically replaced by new text within a minute or will be deleted if that fails.') postsuccess = r.get_info(thing_id='t1_'+str(a.id)).edit(reply.replace('____id____',str(a.id))) if not postsuccess: raise Exception ('reply unsuccessful') totalposted = totalposted + 1 submissioncount[str(post.submission.id)]+=1 success("[OK] #%s "%totalposted) return True except Exception as e: warn("REPLY FAILED: %s @ %s"%(e,post.subreddit)) if str(e) == '(TOO_LONG) `this is too long (max: 15000.0)` on field `text`': a.delete() elif str(e) == '403 Client Error: Forbidden' and str(post.subreddit) not in badsubs: badsubs = badsubs_page.content_md.strip().split() badsubs.append(str(post.subreddit)) editsummary = 'added '+str(post.subreddit) save_changing_variables(editsummary) else: fail(e) a.delete() return False
def extract_blob3ds(all_slides, stitched=True): printl('Extracting 3D blobs by combining 2D blobs into 3D', flush=True) blob3dlist = [] if not stitched: warn( 'Extracting blob3ds, and have been told that they haven\'t been stitched. This will be inaccurate' ) printl( 'Extracting blob3ds, and have been told that they haven\'t been stitched. This will be inaccurate' ) # DEBUG for slide_num, slide in enumerate(all_slides): for blob in slide.blob2dlist: if Blob2d.get(blob).b3did == -1: if stitched: # The much better option! ESPECIALLY for recursive_depth = 0 buf = [ b2d for b2d in Blob2d.get( blob).get_stitched_partners() ] # old method # buf = [Blob2d.get(b2d) for b2d in Blob2d.get(blob).getpartnerschain()] # IDEALLY could use this for both... for now, it doesnt work well else: buf = [ Blob2d.get(b2d) for b2d in Blob2d.get(blob).getpartnerschain() ] # TODO setting partners needs filtering like stitching if len(buf) != 0: blob3dlist.append(Blob3d([b2d.id for b2d in buf])) return blob3dlist
def diff_hw_config(hardware_cfg: Path) -> None: """Diff changes if we updated the hardware configuration. :param hardware_cfg: path to ``hosts/$(hostname)-hardware-configuration.nix`` """ # Figure out what would happen if we ran nixos-generate-config. info("Diff if `nixos-generate-config` was run:") cmd( "nixos-generate-config --show-hardware-config " + f"| diff --ignore-all-space {hardware_cfg} - " + "| delta" ) new_cfg = get_output(["nixos-generate-config", "--show-hardware-config"]) diff = get_output( [ "diff", "--report-identical-files", "--new-file", "--unified", "--ignore-all-space", str(hardware_cfg), "-", ], input=new_cfg, # 1 just means we found differences ok_returncodes=[0, 1], ) delta = subprocess.run(["delta"], input=diff, encoding="utf-8", check=False) if delta.returncode != 0: warn(f"delta exited with non-zero return code {delta.returncode}")
def run(): """ Check for 3rd Licenses """ if "help" in sys.argv: warn(help) return 0 check_files = ["./"] if len(sys.argv) > 1: check_files = sys.argv[1:] script_name = os.path.basename(sys.argv[0]) file_list = " ".join(check_files) cmd = 'grep -w -E -i -r -H -n "%s" %s | grep -v %s > %s 2>&1' % \ ("|".join(black_list), file_list, script_name, log_file) info("Check 3rd License with cmd:\n %s ..." % cmd) ret = os.system(cmd) if ret == 0: os.system("cat %s" % log_file) error("Check 3rd License Failed, details refer to: '%s'!\n" % log_file) elif ret == 256: info("Check 3rd License Passed!\n") elif ret == 512: os.system("cat %s" % log_file) error("Run cmd error: %s\n" % cmd)
def cancel_request(conn): warn('canceling spot instance requests and terminating instances...') requests = conn.get_all_spot_instance_requests(load_request_ids()) for r in requests: r.cancel() instance_ids = [r.instance_id for r in requests if r.instance_id is not None] if len(instance_ids) > 0: conn.terminate_instances(instance_ids)
def getCapstatsOutput(nodes, interval): if not config.Config.capstats: if config.Config.cron == "0": util.warn("do not have capstats binary available") return [] results = [] cmds = [] hosts = {} for node in nodes: try: hosts[(node.addr, node.interface)] = node except AttributeError: continue for (addr, interface) in hosts.keys(): node = hosts[addr, interface] capstats = [ config.Config.capstats, "-i", interface, "-I", str(interval), "-n", "1" ] # Unfinished feature: only consider a particular MAC. Works here for capstats # but Bro config is not adapted currently so we disable it for now. # try: # capstats += ["-f", "\\'", "ether dst %s" % node.ether, "\\'"] # except AttributeError: # pass cmds += [(node, "run-cmd", capstats)] outputs = execute.runHelperParallel(cmds) for (node, success, output) in outputs: if not success: results += [(node, "%s: cannot execute capstats" % node.tag, {})] continue fields = output[0].split() vals = {} try: for field in fields[1:]: (key, val) = field.split("=") vals[key] = float(val) results += [(node, None, vals)] except ValueError: results += [(node, "%s: unexpected capstats output: %s" % (node.tag, output[0]), {})] return results
def checkBroVersion(self): if "broversion" not in self.state: return oldversion = self.state["broversion"] version = self._getBroVersion() if version != oldversion: util.warn("new bro version detected (run 'broctl install')")
def __init__(self, popcon_file): self.inst_by_pkg = {} for line in popcon_file: if line.startswith("Package:"): line = remove_space_dups(line) try: _, pkg, _, inst, = line.split(" ")[:4] self.inst_by_pkg[pkg] = int(inst) except ValueError: warn("failed to parse popcon inst value for '%s'" % pkg)
def _updateHTTPStats(): # Create meta file. if not os.path.exists(config.Config.statsdir): try: os.makedirs(config.Config.statsdir) except OSError, err: util.output("error creating directory: %s" % err) return util.warn("creating directory for stats file: %s" % config.Config.statsdir)
def set_security_group(conn, name): info("Setting up security group {} in {}".format(name, conn.region)) sg = get_or_make_group(conn, name) if sg.rules != []: warn('security group {} in {} already has rules, no modification will happen then'.format(name, conn.region)) return proto = ['tcp', 'udp'] authorized_ip = '0.0.0.0/0' # all IP for p in proto: sg.authorize(p, 0, 65535, authorized_ip)
def writeState(self): try: out = open(self.statefile, "w") except IOError: util.warn("can't write '%s'" % self.statefile) return print >>out, "# Automatically generated. Do not edit.\n" for (key, val) in self.state.items(): print >>out, "%s = %s" % (key, self.subst(str(val)))
def parseDiffOutput(diffout): files = [] for ln in diffout.split("\n"): if ln == "": continue match = re.search('^Files (.*) and (.*) differ$', ln) if match: files.append(match.group(1, 2)) else: warn("Unsupported output pattern: %s" % ln) return files
def clean_cache(self): if os.path.isdir(self.gitdir): if not self.src in self._get_remote_fetch(): util.warn("Deleting %s as %s is not in the remote list" % (self.cachedir, self.src)) shutil.rmtree(self.cachedir) else: return elif os.path.exists(self.cachedir): util.warn("Removing non - git clone %s" % (self.cachedir,)) shutil.rmtree(self.cachedir)
def writeState(self): try: out = open(self.statefile, "w") except IOError: util.warn("can't write '%s'" % self.statefile) return print >> out, "# Automatically generated. Do not edit.\n" for (key, val) in self.state.items(): print >> out, "%s = %s" % (key, self.subst(str(val)))
def isAlive(host): if host in _deadHosts: return False (success, output) = runLocalCmd(os.path.join(config.Config.scriptsdir, "is-alive") + " " + util.scopeAddr(host)) if not success and not config.Config.cron == "1": _deadHosts[host] = True util.warn("host %s is not alive" % host) return success
def __init__(self, filename, attrs): if filename is None: util.warn('Warning: not writing log to any file!') self.f = None else: if os.path.exists(filename): raise RuntimeError('Log file %s already exists' % filename) self.f = tables.open_file(filename, mode='w') for k, v in attrs: self.f.root._v_attrs[k] = v self.log_table = None self.schema = None # list of col name / types for display
def set_security_group(conn, name): info("Setting up security group {} in {}".format(name, conn.region)) sg = get_or_make_group(conn, name) if sg.rules != []: warn( 'security group {} in {} already has rules, no modification will happen then' .format(name, conn.region)) return proto = ['tcp', 'udp'] authorized_ip = '0.0.0.0/0' # all IP for p in proto: sg.authorize(p, 0, 65535, authorized_ip)
def post_reply(reply,post): global totalposted try: post.reply(reply) totalposted = totalposted + 1 success("#%s REPLY SUCCESSFUL"%totalposted) return True except Exception as e: warn("REPLY FAILED: %s @ %s"%(e,post.subreddit)) if str(e) == '403 Client Error: Forbidden': badsubs.append(str(post.subreddit)) return False
def appendStateVal(self, key): key = key.lower() try: out = open(self.statefile, "a") except IOError: util.warn("can't append to '%s'" % self.statefile) return print >>out, "%s = %s" % (key, self.state[key]) out.close()
def run(): """ Check for aos.mk """ if "help" in sys.argv: warn(help) return 0 info("Checking Component's Makefile ...") check_files = ["./"] if len(sys.argv) > 1: check_files = sys.argv[1:] mkfiles = [] for item in check_files: if os.path.isfile(item): mkfiles.append(item) elif os.path.isdir(item): mkfiles += find_comp_mkfile(item) else: error("No such file or directory: %s" % item) result = {} for mkfile in mkfiles: tmp = {} if os.access(mkfile, os.X_OK): tmp["is_executable"] = "Fix it with: 'chmod -x %s'" % mkfile if check_win_format(mkfile): tmp["is_win_format"] = "Fix it with: 'dos2unix %s'" % mkfile ret = check_global_include(mkfile) if ret: tmp["has_global_includes"] = "Line: " + ",".join(ret) ret = check_extra_blank(mkfile) if ret: tmp["has_extra_blank"] = "Line: " + ",".join(ret) if tmp: result[mkfile] = tmp if result: with open(log_file, "w") as f: index = 0 for key in result: f.write("[%d] %s:\n" % (index, key)) for issue in result[key]: f.write(" %s: %s\n" % (issue.upper(), result[key][issue])) index += 1 os.system("cat %s" % log_file) error("Check Component's Makefile Failed, details refer to: '%s'!\n" % log_file) else: info("Check Component's Makefile Passed!\n")
def isAlive(host): if host in _deadHosts: return False (success, output) = runLocalCmd( os.path.join(config.Config.scriptsdir, "is-alive") + " " + host) if not success and not config.Config.cron == "1": _deadHosts[host] = True util.warn("host %s is not alive" % host) return success
def isAlive(host): if host in _deadHosts: return False (success, output) = runLocalCmd("ssh -o ConnectTimeout=30 %s true" % util.scopeAddr(host)) if not success: _deadHosts[host] = True if config.Config.cron == "0": util.warn("host %s is not alive" % host) return success
def getNodeByID(self, node_id): """Return the node having a given ID.""" nodelist = filter(lambda n, id=node_id: n.id == id, self.all_nodes) if len(nodelist) > 1: raise RuntimeError, "Apparently two or more nodes have the \ ID '%s'. That shouldn't happen." % node_id elif len(nodelist) == 1: result = nodelist[0] else: warn("TreeWidget.getNodeByID() failed to find a node with \ ID '%s'. This may indicate a programming error." % node_id) result = None return result
def test_signatures(self): found_sigs = enum_all() with isolate_warnings(): for key, value in sig_exists.dict.items(): name = key.rsplit(".", 1)[-1] if name in ("next", "__next__"): # ignore problematic cases continue if key not in found_sigs: warn("missing key: '{}'".format(key)) elif isinstance(value, list) and len(value) != len(found_sigs[key]): warn("multi-signature count mismatch: '{}'".format(key)) if is_ci and check_warnings(): raise RuntimeError("There are errors, see above.")