def codeFile(args,flag,data): PARAM_KEY = 1; PARAM_FILE = 2; # Output file location PARAM_FORMATTER = 3 ARGUMENTS = len(args)-1 # Ability to add a block of code through copy and paste and have it formatted correctly! if( keyExists("files",args[PARAM_KEY])): _file = json.loads(load("files/"+args[PARAM_KEY])); out = '' # loadJSON for x in _file: block = str(load("blocks/"+ x)) if(ARGUMENTS == PARAM_FORMATTER): # Alter all the blocks in said fashion block = format.block(block, args[PARAM_FORMATTER]) out += block out += "\n" # Adds some spacing between blocks # No file specified if(len(args) < 3 ): log(out) else: log("Saving to file "+ args[PARAM_FILE] ) save(args[PARAM_FILE],out) else: error("Error: File does not exist")
def resolve(self,item,captcha_cb=None): item = item.copy() url = item['url'] if url.startswith('http://www.ulozto.sk'): url = self.base_url + url[20:] url = self.decr_url(url) url = self._url(url) if url.startswith('#'): util.error('[uloz.to] - url was not correctly decoded') return self.init_urllib() self.login() self.info('Resolving %s'% url) if not item.has_key('vip'): item['vip'] = False vip = item['vip'] if vip: page = util.request(url) else: try: request = urllib2.Request(url) response = urllib2.urlopen(request) page = response.read() response.close() except urllib2.HTTPError, e: traceback.print_exc() return
def __init__(self, config, basedir, version): global Config Config = self self.config = {} self.state = {} # Read broctl.cfg. self.config = self._readConfig(config) # Set defaults for options we get passed in. self._setOption("brobase", basedir) self._setOption("version", version) # Initialize options. for opt in options.options: if not opt.dontinit: self._setOption(opt.name, opt.default) # Set defaults for options we derive dynamically. self._setOption("mailto", "%s" % os.getenv("USER")) self._setOption("mailfrom", "Big Brother <bro@%s>" % socket.gethostname()) self._setOption("home", os.getenv("HOME")) self._setOption("mailalarmsto", self.config["mailto"]) # Determine operating system. (success, output) = execute.captureCmd("uname") if not success: util.error("cannot run uname") self._setOption("os", output[0].lower().strip()) # Find the time command (should be a GNU time for best results). (success, output) = execute.captureCmd("which time") self._setOption("time", output[0].lower().strip())
def codeProject(args,flag,data): PARAM_KEY = 1 PARAM_PATH = 2 PARAM_FORMATTER = 3 ARGUMENTS = len(args)-1 # JSON mapping files and storage of this if( keyExists("projects",args[1])): if( "stdout" in args[2]): project = json.loads(load("projects/"+args[PARAM_KEY])); # Uses key value storage directory = args[PARAM_PATH] + "/" + args[PARAM_KEY] mkdir(directory) for x in project.keys(): # Reflect that with here _file = json.loads(load("files/"+x)); out = ''; for y in _file: block = str(load("blocks/"+ y)) if(ARGUMENTS == PARAM_FORMATTER): # Alter all the blocks in said fashion block = format.block(block, args[PARAM_FORMATTER]) out += block # Output the file with the correct file name save(directory + "/" + project[x],out) else: error("Error: Project does not exist")
def queryForIssueIds(self, query): if not query: error('No JQL query provided.') # Create POST body content = { 'jql': query, 'startAt': 0, 'fields': ['summary'], 'maxResults': 1000 } # Do request request = self._createRequest() response = request.post('/rest/api/2/search', self._serialize(content), contentType='application/json') # Parse result if response.status == 200: data = Json.loads(response.response) print "#### Issues found" issueIds = [] for item in data['issues']: issueIds.append(item['id']) print u"* {0} - {1}".format(item['id'], item['key']) print "\n" return issueIds else: error(u"Failed to execute search '{0}' in JIRA.".format(query), response)
def checkQuery(self, query): if not query: error('No JQL query provided.') # Create POST body content = { 'jql': query, 'startAt': 0, 'fields': ['summary', 'status'] } # Do request request = self._createRequest() response = request.post('/rest/api/2/search', self._serialize(content), contentType='application/json') # Parse result if response.status == 200: data = Json.loads(response.response) issues = {} for item in data['issues']: issue = item['key'] issues[issue] = (item['fields']['summary'], item['fields']['status']['name']) return issues else: error(u"Failed to execute search '{0}' in JIRA.".format(query), response)
def install(host, src, dstdir): if isLocal(host): if not exists(host, src): util.output("file does not exist: %s" % src) return False dst = os.path.join(dstdir, os.path.basename(src)) if exists(host, dst): # Do not clobber existing files/dirs (this is not an error) return True util.debug(1, "cp %s %s" % (src, dstdir)) try: if os.path.isfile(src): shutil.copy2(src, dstdir) elif os.path.isdir(src): shutil.copytree(src, dst) except OSError: # Python 2.6 has a bug where this may fail on NFS. So we just # ignore errors. pass else: util.error("install() not yet supported for remote hosts") return True
def __init__(self, section_type, section_class=0, section_id=0, section_length=0, load_address=0, extended_length=0, filename=None): """Constructor If filename is specified, this reads in the file and sets the section length to the length of the file. """ self.section_type = section_type self.section_class = section_class self.section_id = section_id self.section_length = section_length if (section_type == TFTF_SECTION_TYPE_SIGNATURE) or \ (section_type == TFTF_SECTION_TYPE_CERTIFICATE): self.load_address = 0xffffffff else: self.load_address = load_address self.expanded_length = extended_length self.filename = filename # Try to size the section length from the section input file if filename: try: statinfo = os.stat(filename) # TODO: Lengths will be different if/when we support # compression: # - section_length will shrink to the compressed size # - expanded_length will remain the input file length self.section_length = statinfo.st_size self.expanded_length = statinfo.st_size except: error("file", filename, " is invalid or missing")
def add_section(self, section_type, section_class, section_id, section_data, load_address=0): # Add a new section to the section table and return a success flag # # (This would be called by "sign-tftf" to add signature and # certificate blocks.) num_sections = len(self.sections) if num_sections < TFTF_HDR_NUM_SECTIONS: # Insert the section to the section list, just in front of # the end-of-table marker. # # Notes: # 1. We assume this is an uncompressable section # 2. We defer pushing the new section into the buffer until # the write stage or someone explicitly calls "pack".) self.sections.insert(num_sections - 1, TftfSection(section_type, section_class, section_id, len(section_data), load_address, len(section_data), None)) # Append the section data blob to our TFTF buffer self.tftf_buf += section_data # Record the length of the entire TFTF blob (this will be longer # than the header's load_length) self.tftf_length = len(self.tftf_buf) return True else: error("Section table full") return False
def _readConfig(self, file, allowstate = False): config = {} try: for line in open(file): line = line.strip() if not line or line.startswith("#"): continue args = line.split("=", 1) if len(args) != 2: util.error("%s: syntax error '%s'" % (file, line)) (key, val) = args key = key.strip().lower() val = val.strip() if not allowstate and ".state." in key: util.error("state variable '%s' not allowed in file: %s" % (key, file)) # if the key already exists, just overwrite with new value config[key] = val except IOError, e: util.warn("cannot read '%s' (this is ok on first run)" % file)
def _get_db(self): try: conn = sqlite.connect(os.path.expanduser(self.WALLPAPER_SETTINGS)) cursor = conn.cursor() return conn, cursor except sqlite.OperationalError: error("Unable to open OS X wallpaper settings.")
def get_conn(): try: conn = ec2.connect_to_region(get_ec2_conf()['Region']) return conn except Exception as e: error(e.message) sys.exit(1)
def queryIssues(self, query, options=None): if not query: error('No JQL query provided.') # Create POST body content = { 'jql': query, 'startAt': 0, 'fields': ['summary', 'status', 'assignee'] } # Do request request = self._createRequest() response = request.post('/rest/api/2/search', self._serialize(content), contentType='application/json') # Parse result if response.status == 200: issues = {} data = Json.loads(response.response) for item in data['issues']: issue = item['key'] issues[issue] = { 'issue' : issue, 'summary' : item['fields']['summary'], 'status' : item['fields']['status']['name'], 'assignee': item['fields']['assignee']['displayName'], 'link' : "{1}/browse/{0}".format(issue, self.jira_server['url']) } return issues else: error(u"Failed to execute search '{0}' in JIRA.".format(query), response)
def test(cmd): print "" print "Starting " + cmd util.run(cmd) clientlist = [] start = time.time() for i in range(0, NUM_CLIENT): client = testit("Client-" + str(i)) client.setDaemon(True) clientlist.append(client) client.start() for client in clientlist: client.join() end = time.time() if util.is_server_alive(cmd) == -1: util.error("Ouch! Server is dead!" " Your bounded buffered may not be well protected"); print "Elapsed time (in seconds): " + str(end-start) if end - start > EXPECTED_TIME: util.error("your server is not multithreaded")
def test(cmd): global expected global got global count util.info("") util.info("- Starting " + cmd) util.info("") util.run(cmd) start = time.time() clientlist = [] expected = [] for i in range(1, NUM_CLIENT): expected.append(commands.getoutput("cat ./testdata/file%s.txt" % str(i))) commands.getoutput("rm -rf %s" % tmpfile) for i in range(0, NUM_CLIENT): client = testit("Client-" + str(i), i) clientlist.append(client) client.start() time.sleep(0.3) for client in clientlist: client.join() end = time.time() util.info("Elapsed time (in seconds): " + str(end-start)) time.sleep(CGI_SPIN_TIME + 2) res = commands.getoutput("cat %s" % tmpfile) if util.is_server_alive(cmd) == -1: util.error("Ouch! Server is dead!" " Your bounded buffered may not be well protected"); pos0 = res.find(expected[0]) pos1 = res.find(expected[1]) pos2 = res.find(expected[2]) passed = pos0 > 0 and pos1 > 0 and pos2 > 0 and pos0 < pos1 and pos1 < pos2 util.info(res) if passed: print "" print "#####################################" print "GOOD! you implement SFF correctly" print "#####################################" print "" count = count + 1 else: print "" print "#####################################" print "Oh oh! ERROR ERROR!" print "SFF is not implemented correctly" print "#####################################" print "" sys.exit(-1)
def post(self,tlkey): try: tlist = db.get(db.Key(tlkey)) pos=0 if tlist.insertAtBottom: pos = tlist.firstTaskOrder + tlist.taskCount else: pos = tlist.firstTaskOrder - 1 tlist.firstTaskOrder -= 1 #book keeping on the list tlist.taskCount+=1 tlist.activeTaskCount+=1 #put the task list to ensure it has a key tlist.put() task = models.Task( taskList = tlist, order = pos, ) task.put() if util.isAjax(self): self.response.out.write(template.render("views/task.html", {"tl":tlist, "task":task})) else: self.redirect("/list/"+str(tlist.key())) except: logging.error(sys.exc_info()) util.error(self,500,"Something went wrong on our end when creating the todo, please try again")
def build(self, gcode): '''New gCode that uses the indexedDict''' puts(colored.blue('Building Toolpath:')) for i,line in enumerate(progress.bar(gcode)): # for i,line in enumerate(gcode): if 'G' in line: # only handle the gcodes # Get the G code number assign it to cmd # for human readablitiy cmd should be changes to g_command_number # or somehting like that # however notice that it is hardcoded as a dict key as well '''copy over the relevant data x y z i j index and g_command_number''' '''To an indexdict named move with the name attribute set to the string "move" ''' cmd = line['G'] move = IndexDict(name='move') for j,x in enumerate(AXIS): if x in line: move[x] = line[x] move['cmd'] = cmd move['index'] = line['index'] try: fcn = GCMD[cmd] move.name = 'cmd[% 2i]'%cmd # Try using the indexdict instance as info for the next coordinates to be attached to the toolpath # by way of the function fcn selcted from the dict of functions GCMD above fcn(self, move, cmd) except KeyError: # raise error('Missing command in GCMD: %d(%s)'%(cmd, line))
def determineBroVersion(self): version = None bro = self.subst("${bindir}/bro") if execute.exists(None, bro): (success, output) = execute.captureCmd("%s -v 2>&1" % bro) if success: version = output[len(output)-1] if not version: # Ok if it's already set. if "broversion" in self.state: return util.error("cannot find Bro binary to determine version") m = re.search(".* version ([^ ]*).*$", version) if not m: util.error("cannot determine Bro version [%s]" % version.strip()) version = m.group(1) if version.endswith("-debug"): version = version[:-6] self.state["broversion"] = version self.state["bro"] = self.subst("${bindir}/bro")
def resolve(self,item,captcha_cb=None): item = item.copy() url = item['url'] if url.startswith('http://www.ulozto.sk'): url = 'http://www.ulozto.cz' + url[20:] if url.startswith('#'): ret = json.loads(util.request(url[1:])) if not ret['result'] == 'null': url = b64decode(ret['result']) url = self._url(url) if url.startswith('#'): util.error('[uloz.to] - url was not correctly decoded') return self.init_urllib() self.info('Resolving %s'% url) logged_in = self.login() if logged_in: page = util.request(url) else: try: request = urllib2.Request(url) response = urllib2.urlopen(request) page = response.read() response.close() except urllib2.HTTPError, e: traceback.print_exc() return
def run_hooks_for(trigger): from sys import exit from os.path import sep from subprocess import call global _triggers if trigger not in _triggers: raise ValueError("unknown trigger: '" + str(trigger) + "'") hooks = list(set(_hooks[trigger]) - set(_hooks_done[trigger])) num_done = 0 if len(hooks) > 0: util.info("running hooks for trigger '" + str(trigger) + "'") for fname in hooks: rv = call(config.hooks_dir + sep + fname, env=_create_env()) _hooks_done[trigger].append(fname) num_done += 1 if rv != 0: util.error("hook '" + str(fname) + "' exited abnormally") util.exit(util.ERR_ABNORMAL_HOOK_EXIT) util.info("successfully ran " + str(num_done) + " " + \ util.plural('hook', num_done))
def _importPlugin(self, path): sys.path = [os.path.dirname(path)] + sys.path try: module = __import__(os.path.basename(path)) except Exception, e: util.error("cannot import plugin %s: %s" % (path, e))
def __init__(self, section_type, section_length=0, extended_length=0, copy_offset=0, filename=None): """Constructor If filename is specified, this reads in the file and sets the section length to the length of the file. """ self.section_length = section_length self.expanded_length = extended_length self.copy_offset = copy_offset self.section_type = section_type self.filename = filename # Try to size the section length from the section input file if filename: try: statinfo = os.stat(filename) # TODO: Lengths will be different if/when we support # compression: # - section_length will shrink to the compressed size # - expanded_length will remain the input file length self.section_length = statinfo.st_size self.expanded_length = statinfo.st_size except: error("file", filename, " is invalid or missing")
def load_source(args): if os.path.isdir(args.source): return FilesystemSource(args.source, recursive=args.recursive) elif args.source.endswith(".lrcat"): return LightroomSource(args.source) else: error("{} is neither a directory nor a Lightroom catalog.".format(args.source))
def t_NUMBER(self, t): "[\+-]*\d+\.?\d*" try: t.value = float(t.value) except ValueError: util.error( "value too large", t.value ) return t
def get(self, key): if not len(key): return util.error(self, 404, 'No level specified') lvl = db.get(db.Key(key)) if not lvl: return util.error(self, 404, 'Level not found') util.render(self, 'level/read.html', {'level': lvl.getDict(), 'title': lvl.title})
def main(args): ec2_conf = get_ec2_conf() conn = get_conn() if args.submit: info('waiting for spot instance requests to be fulfilled, you can cancel by ctrl+c ...') try: requests = submit_request(conn, ec2_conf) except (KeyboardInterrupt, RequestFailedError) as e: error(e) exit(1) info('spot instance requests fulfilled') instance_id_to_tag_ip = {} rid_tag = request_id_to_tag(requests) info('getting instance IPs...') for r in requests: instance_id = r.instance_id info('waiting for ip to be allocated to the machine') ip = conn.get_only_instances([instance_id])[0].ip_address while ip is None: time.sleep(1) ip = conn.get_only_instances([instance_id])[0].ip_address instance_id_to_tag_ip[instance_id] = (rid_tag[r.id], ip) info('mocking vagrant info under .vagrant...') mock_vagrant_info(instance_id_to_tag_ip) info('creation of spot instances done') info('waiting for ssh to be available...') wait_for_ssh([ip for tag, ip in instance_id_to_tag_ip.values()]) info('ssh for all instances are ready') elif args.cancel: cancel_request(conn)
def run(self, args, environ): stop(self.config).run([], environ) waitFor(self.config.options.restartWaitTime()) pid = self.checkProcessAlive() if pid: error('Process is still running at pid %s' % pid) start(self.config).run([], environ)
def add_element(self, element_type, element_id, element_generation, element_location, element_length, filename): """Add a new element to the element table Adds an element to the element table but doesn't load the TFTF file into the ROMimage buffer. That is done later by post_process. Returns a success flag (We would typically be called by "create-ffff" after parsing element parameters.) """ if len(self.elements) < FFFF_MAX_ELEMENTS: element = FfffElement(len(self.elements), self.ffff_buf, self.flash_capacity, self.erase_block_size, element_type, element_id, element_generation, element_location, element_length, filename) if element.init(): self.elements.append(element) span_start = element.element_location span_end = span_start + len(element.tftf_blob.tftf_buf) self.ffff_buf[span_start:span_end] = element.tftf_blob.tftf_buf return True else: return False else: error("too many elements") return False
def fetch_labels(self, query): self.__clear_labels() self.wq.search(query, sites='en.wikipedia.org', count=self.max_docs) opener = urllib2.build_opener() opener.addheaders = [('User-agent', 'MwClient-0.6.4')] for idx, url in enumerate(self.wq.urls()[0:self.max_docs]): try: infile = opener.open(url) page = infile.read() doc = libxml2dom.parseString(page, html=1) if self.debug: util.log("url", url) labels = DocLabels() labels.title = self.__collect_text(doc.xpath("//*[@id='firstHeading']")[0]) labels.categories = self.__nodes_to_array(doc.xpath("//*[@id='mw-normal-catlinks']/span")) # remove disambiguation pages dp_str = 'Disambiguation pages' if dp_str in labels.categories: labels.categories.remove(dp_str) # headline text labels.headlines = [] for node in doc.xpath("//h3/*[@class='mw-headline']"): labels.headlines.append(self.__collect_text(node)) labels.num_anchors = len(doc.getElementsByTagName("a")) labels.anchors = [] # only taking external link texts for node in doc.xpath("//ul/li/*[@class='external text']"): labels.anchors.append(self.__collect_text(node)) labels.rank = idx + 1 self.labels_for_urls[url] = labels except (urllib2.HTTPError, IndexError), e: if self.debug: util.error("%s, url: %s" % (e, url))
def get(self,tlkey): tl = db.get(db.Key(tlkey)) if tl: self.response.headers["Content-type"] = "application/rss+xml" self.response.out.write(template.render("views/rss.html", {"tl":tl})) else: util.error(self,404,"List doesn't exist. Make sure you're using the right url.")
def run(self): clientcmd = "./testclient localhost 2010 /testdata/testfile.txt" util.info(self.clientname + ": " + clientcmd) response = commands.getoutput(clientcmd) if response.find("hey this is a test file") == -1: util.error(self.clientname + ":ouchs! client can not get the right file") print self.clientname + ":Client got expected response"
def parse_string(self, input, compiler): compiler.reset() start, _, end = parser.Parser.parse(self, input, processor = compiler) if end < len(input): error(input, end) if not compiler.context.grammars.has_key('input'): error(input, end, 'Required grammar "input" not found.') return compiler.context
def _checkIssue(self, issue_id): request = self._createRequest() response = request.get(self._issueUrl(issue_id), contentType='application/json') if response.status != 200: error(u"Unable to find issue {0}".format(self.link(issue_id)), response)
def run(self): clientcmd = "./testclient localhost 2010 /output.cgi" print self.clientname + ": " + clientcmd response = commands.getoutput(clientcmd) if response.find("Welcome to the CGI program") == -1: util.error(self.clientname + ":ouchs! client can not get the right file") print self.clientname + ":Client got expected response"
def __init__(self, jira_server, username, password, encoding='utf-8'): if jira_server is None: error('No server provided.') self.jira_server = jira_server self.username = username self.password = password self.encoding = encoding
def readFLIMheader(self, hdr): self.byteorder = ENDIANS[rawutil.unpack_from('>H', hdr, 4)[0]] hdata = self.unpack(BFLIM_FLIM_HDR_STRUCT, hdr) if hdata[0] != b'FLIM': error('Invalid magic %s, expected FLIM' % byterepr(hdata[0]), 301) #bom = hdata[1] #headerlen = hdata[2] self.version = hdata[3]
def extract(self): for file in self.files: content = self.data[file.offset:file.offset + file.size] if self.dochecks: if sha256(content).digest() != file.hash: paf error('File %s hash mismatch' % file.name, 305) bwrite(content, self.outdir + file.name)
def __init__(self, config, basedir, distdir, version, standalone): global Config global Installing Config = self if "BROCTL_INSTALL" in os.environ: Installing = True global BroBase BroBase = basedir if "MAKE_DESTDIR" in os.environ: global MakeDestDir MakeDestDir = os.environ["MAKE_DESTDIR"] self.config = {} self.state = {} # Read broctl.cfg. self.config = self._readConfig(os.path.join(basedir, config)) # Set defaults for options we get passed in. self._setOption("brobase", basedir) self._setOption("distdir", distdir) self._setOption("version", version) self._setOption("standalone", standalone and "1" or "0") # Initialize options. for opt in options.options: if not opt.dontinit: self._setOption(opt.name.lower(), opt.default) # Set defaults for options we derive dynamically. self._setOption("mailto", "%s" % os.getenv("USER")) self._setOption("mailfrom", "Big Brother <bro@%s>" % socket.gethostname()) self._setOption("home", os.getenv("HOME")) self._setOption("mailalarmsto", self.config["mailto"]) # Determine operating system. (success, output) = execute.captureCmd("uname") if not success: util.error("cannot run uname") self._setOption("os", output[0].lower().strip()) # Find the time command (should be a GNU time for best results). (success, output) = execute.captureCmd("which time") self._setOption("time", output[0].lower().strip()) # Read nodes.cfg and broctl.dat. self._readNodes() self.readState() # Setup the kinds of analyses which we support. self._analysis = Analysis(self.analysiscfg) # Make sure cron flag is cleared. self.config["cron"] = "0"
def post_process(self, buf): """Post-process the FFFF header Process the FFFF header, assigning unspecified element locations to be contiguous (on erase-block-size boundaries), and read the TFTF files into the buffer at those locations. (Called by "create-ffff" after processing all arguments) """ # Revalidate the erase block size self.erase_block_mask = self.erase_block_size - 1 # Scan the elements and fill in missing start locations. # Elements are concatenated at the granuarity of the erase block size location = self.elements[0].element_location for index, element in enumerate(self.elements): element.index = index if element.element_type != FFFF_ELEMENT_END_OF_ELEMENT_TABLE: if element.element_location == 0: element.element_location = location error("Note: Assuming element [{0:d}]" " loads at {1:08x}".format(element.index, location)) if self.flash_image_length != 0 and \ element.element_location + element.element_length >= \ self.flash_image_length: error("--element-location " + format(element.element_location, "#x") + " + --element-length " + format(element.element_length, "#x") + " exceeds --image-length " + format(self.flash_image_length, "#x")) sys.exit(PROGRAM_ERRORS) location = next_boundary( element.element_location + element.element_length, self.erase_block_size) if element.element_type == FFFF_ELEMENT_END_OF_ELEMENT_TABLE: break if self.flash_image_length == 0: self.flash_image_length = location self.validate_element_table() # fill in and/or trim selected FFFF fields self.sentinel = FFFF_SENTINEL self.timestamp = strftime("%Y%m%d %H%M%S", gmtime()) if len(self.flash_image_name) >= FFFF_FLASH_IMAGE_NAME_LENGTH: self.flash_image_name = \ self.flash_image_name[0:FFFF_FLASH_IMAGE_NAME_LENGTH - 1] warning("flash_image_name truncated to '{0:s}'".format( self.flash_image_name)) self.tail_sentinel = FFFF_SENTINEL # Flush the structure elements to the FFFF buffer and do a final # sniff test on the results self.pack() self.validate_ffff_header()
def get_data_cached(self, url, post=False): try: url.index('/json/') self._oldapi() except Exception: pass headers = { 'X-UID': self.uid, 'X-LANG': self.tr['language'], 'X-VER': sctop.API_VERSION, 'Accept': 'application/vnd.bbaron.kodi-plugin-v%s+json' % (sctop.API_VERSION), } url = self._url(url) code = None try: if post != False: util.debug("POST URL: %s %s" % (url, str(post))) (ret, code) = sctop.post(url, post, headers, "extend") self.handleHttpError(code) return ret util.info("GET x URL: %s" % url) ret = False if sctop.getSettingAsBool('usecache') is not False: util.debug("[SC] skusam cache") ret = self.cache.get(str(url)) if not ret: util.debug("[SC] url BEZ cache %s" % str(url)) (ret, code, info) = sctop.request(url, headers, "info") util.debug("[SC] code: %s %s" % (str(code), str(info))) self.handleHttpError(code, data=ret, i=info) if code == 200: ttl = datetime.timedelta(hours=2) try: util.debug("[SC] info: %s " % str(info)) if 'x-ttl' in info: ttl = datetime.timedelta( seconds=int(info.get('x-ttl'))) util.debug("[SC] mame TTL: %s" % str(ttl)) except: pass try: self.cache.cache.set(str(url), ret, expiration=ttl) except: self.cache.set(str(url), ret) else: util.debug("[SC] url z cache %s" % str(url)) util.debug("[SC] return data") return ret except Exception, e: util.error('[SC] ERROR URL: ' + str(e)) if code is None: sctop.dialog.ok("error", url) return False
def readprt1(self, data): ptr = 8 info, ptr = self.readpane(data, ptr) secname = 'prt1-%s' % self.actnode['__prevname'] self.actnode[secname] = OrderedDict() localnode = self.actnode[secname] localnode.update(info) count, ptr = self.uint32(data, ptr) localnode['section-count'] = count localnode['section-scale-X'], ptr = self.float32(data, ptr) localnode['section-scale-Y'], ptr = self.float32(data, ptr) entryoffsets = [] extraoffsets = [] entries = [] for i in range(count): entry = OrderedDict() entry['name'] = self.string(data, ptr)[0] ptr += 24 entry['unknown'], ptr = self.uint8(data, ptr) entry['flags'], ptr = self.uint8(data, ptr) padding, ptr = self.uint16(data, ptr) entryoffset, ptr = self.uint32(data, ptr) ptr += 4 #padding? extraoffset, ptr = self.uint32(data, ptr) entryoffsets.append(entryoffset) extraoffsets.append(extraoffset) entries.append(entry) if len(entryoffsets) == 0: localnode['extra'] = data[ptr:] else: localnode['extra'] = data[ptr: entryoffsets[0]] for i in range(count): entry = entries[i] parentnode = self.actnode self.actnode = entry self.actnode['__parentnode'] = parentnode entryoffset = entryoffsets[i] extraoffset = extraoffsets[i] if entryoffset != 0: length, ptr = self.uint32(data, entryoffset + 4) entrydata = data[entryoffset: entryoffset + length] ptr = max(ptr, entryoffset + length) magic = entrydata[0:4].decode('ascii') try: method = eval('self.read' + magic) # quicker to code than if magic=='txt1':... except AttributeError: error('Invalid section magic: %s' % magic, 303) method(entrydata) if extraoffset != 0: extra = data[extraoffset:extraoffset + 48].hex() ptr = max(ptr, extraoffset + 48) #key = list(entry.keys())[-1] entry['extra'] = extra self.actnode = self.actnode['__parentnode'] localnode['entries'] = entries if ptr < len(data): localnode['dump'] = data[ptr:]
def __init__(self, mantisbt_server): if mantisbt_server is None: error(u'No Server provided') self.content_type = 'application/json' self.encoding = 'utf-8' self.mantisbt_server = mantisbt_server if self.mantisbt_server['apiKey'] is None: error(u'No API Key provided') self.headers = {'Authorization': self.mantisbt_server['apiKey'] }
def __init__(self, battery_type, n_series, n_parallel): if battery_type not in batteries: error("Unknown battery type %s. Choices are %s" % (battery_type, batteries.keys())) self.battery_type = battery_type self.cell_series_count = n_series self.cell_parallel_count = n_parallel self.cell = batteries[self.battery_type] self.cell_count_total = self.cell_parallel_count * self.cell_series_count
def parse(self, inp): try: tokens = self.tokenize(inp) TOKEN = 0 LEXEME = 1 except Exception, pos: util.error ('Lexical error. Cannot tokenize "at pos %s. Context: '\ ' %s"'% (pos, inp[pos[0]:pos[0]+24])) return 'Error'
def select_base_learners(self, y_train, fitted_base_learners): """Select base learners from candidate learners based on ensembling algorithm. """ cv_errors = np.array([p.cv_error for p in self.candidate_learners]) if self.verbose: print("cv errors: {}".format(cv_errors)) # greedy ensemble forward selection assert self.algorithm in {'greedy', 'stacking', 'best_several'}, "The ensemble selection method must be either greedy forward selection (by Caruana et al.), or stacking, or selecting several best algorithms." if self.algorithm == 'greedy': x_tr = () # initial number of models in ensemble n_initial = 3 for i in np.argsort(cv_errors)[:n_initial]: x_tr += (self.candidate_learners[i].cv_predictions.reshape(-1, 1), ) if fitted_base_learners is None: pre_fitted = None else: pre_fitted = fitted_base_learners[self.candidate_learners[i].index] if pre_fitted is not None: self.base_learners.append(pre_fitted) else: self.base_learners.append(self.candidate_learners[i]) x_tr = np.hstack(x_tr) candidates = list(np.argsort(cv_errors)) error = util.error(y_train, mode(x_tr, axis=1)[0], self.p_type) while len(self.base_learners) <= self.max_size: looped = True for i, idx in enumerate(candidates): slm = np.hstack((x_tr, self.candidate_learners[i].cv_predictions.reshape(-1, 1))) err = util.error(y_train, mode(slm, axis=1)[0], self.p_type) if err < error: error = err x_tr = slm if fitted_base_learners is None: pre_fitted = None else: pre_fitted = fitted_base_learners[self.candidate_learners[i].index] if pre_fitted is not None: self.base_learners.append(pre_fitted) else: self.base_learners.append(self.candidate_learners[i]) looped = False break if looped: break self.second_layer_features = x_tr elif self.algorithm == 'stacking': self.base_learners = self.candidate_learners x_tr = [p.cv_predictions.reshape(-1, 1) for p in self.candidate_learners] self.second_layer_features = np.hstack(tuple(x_tr)) elif self.algorithm == 'best_several': self.base_learners = [] for i in np.argsort(cv_errors)[:(self.max_size)]: self.base_learners.append(self.candidate_learners[i])
def display(self, header_index, filename=None): """Display an FFFF header""" if self.ffff0 and self.ffff1: identical = self.ffff0.same_as(self.ffff1) self.ffff0.display(0, not identical, identical, filename) self.ffff1.display(1, True, identical, filename) else: error("No FFFF to display")
def reject_trade(self, player: Player, trade_id: str): trade: Trade = util.shrink([trade for trade in self.trades if trade.id == trade_id]) if not trade: return util.error("Trade does not exist") if player is not trade.p2: return util.error("You are not in this trade") # Remove trade from trades self.trades = [trade for trade in self.trades if trade.id != trade_id] return util.success("Trade successfully rejected")
def buy_field(self, player: Player): '''Buy third field for 3 coins''' if player.coins < 3: return util.error("Not enough coins to purchase third field") if player.fields[2].enabled: return util.error("Field already purchased") player.coins -= 3 player.fields[2].enabled = True return util.success("Successfully purchased third field")
def start_game(self, player: Player) -> Dict[str, str]: '''Starts game by dealing cards to players and setting status''' if self.status != 'Awaiting': return util.error('Game has already started') if not player.is_host: return util.error('Only host can start game') self.deal_cards() self.status = 'Running' return util.success('Successfully started game')
def login(self): if not self.username or not self.password: self.logout() return True # fall back to free account elif self.token is not None: if self.userData() is not False: return True self.token = None if self.username and self.password and len(self.username) > 0 and len( self.password) > 0: self.logout() util.info('[SC] Login user=%s, pass=*****' % self.username) try: # get salt headers, req = self._create_request( '', {'username_or_email': self.username}) data = post(self._url('api/salt/'), req, headers=headers) xml = ET.fromstring(data) if not xml.find('status').text == 'OK': util.error( '[SC] Server returned error status, response: %s' % data) return False salt = xml.find('salt').text # create hashes password = hashlib.sha1( md5crypt(self.password.encode('utf-8'), salt.encode('utf-8'))).hexdigest() digest = hashlib.md5(self.username + ':Webshare:' + self.password).hexdigest() # login headers, req = self._create_request( '', { 'username_or_email': self.username, 'password': password, 'digest': digest, 'keep_logged_in': 1 }) data = post(self._url('api/login/'), req, headers=headers) xml = ET.fromstring(data) if not xml.find('status').text == 'OK': self.clearToken() util.error( '[SC] Server returned error status, response: %s' % data) return False self.saveToken(xml.find('token').text) try: util.cache_cookies(None) except: pass util.info('[SC] Login successfull') return True except Exception, e: util.info('[SC] Login error %s' % str(e))
def readhdr(self, data): if data[0] != 0x40: error('Invalid magic 0x%02x, expected 0x40' % data[0]) self.unco_len = self.unpack_from('U', data, 1)[0] hdrend = 4 if self.unco_len == 0: self.unco_len = self.unpack_from('I', data, 4)[0] hdrend = 8 return data[hdrend:]
def query(self, sql, params=()): result = None try: self.cur.execute(sql, params) result = self.cur.fetchall() except mysql.connector.Error as e: ### Error util.error('DB query Error', e) return result
def rsa_signverify(thekey, message, signature): hash_obj = SHA512.new(message) try: #因为签名被base64编码,所以这里先解码,再验签 pkcs1_15.new(thekey).verify(hash_obj, base64.b64decode(signature)) return True except (ValueError, TypeError): util.error('The signature is NOT valid') return False
def loadmap(self): ''' loadmap() Creates a browser object and loads the webpage. It sets up the map to the proper zoom level. Returns the browser on success, None on fail. Now using Chromedriver ''' browser = webdriver.Chrome('/usr/bin/chromedriver', desired_capabilities=capabilities, options=options) # browser = webdriver.Firefox('/usr/bin/chromedriver', desired_capabilities=capabilities, options=options) browser.set_window_size(abovetustin_image_width, abovetustin_image_height) print("getting web page {}".format(self.url)) browser.set_page_load_timeout(15) browser.get(self.url) # Need to wait for the page to load timeout = g_request_timeout print("waiting for page to load...") wait = WebDriverWait(browser, timeout) try: element = wait.until( EC.element_to_be_clickable((By.ID, 'dump1090_version'))) except seleniumexceptions.TimeoutException: util.error("Loading %s timed out. Check that you're using the " "correct driver in the .ini file." % (self.url, )) browser.save_screenshot('timeout.png') util.error('Saved screenshot at timeout.png') raise print("reset map:") resetbutton = browser.find_elements_by_xpath( '//*[contains(@title,"Reset Map")]') resetbutton[0].click() print("zoom in 3 times:") try: # First look for the Open Layers map zoom button. zoomin = browser.find_element_by_class_name('ol-zoom-in') print("Zoom: ", zoomin) except seleniumexceptions.NoSuchElementException as e: # Doesn't seem to be Open Layers, so look for the Google # maps zoom button. zoomin = browser.find_elements_by_xpath('//*[@title="Zoom in"]') if zoomin: zoomin = zoomin[0] zoomin.click() zoomin.click() zoomin.click() self.browser = browser
def loadmap(self): ''' loadmap() Creates a browser object and loads the webpage. It sets up the map to the proper zoom level. Returns the browser on success, None on fail. ''' # Define Chrome webbrowser options. You may need to hard-code the location of the webdriver. #Installation on RPI via apt-get works out of the box options = Options() options.headless = True browser = webdriver.Chrome(chrome_options=options) browser.set_window_size(abovetustin_image_width, abovetustin_image_height) print("getting web page {}".format(self.url)) browser.set_page_load_timeout(15) browser.get(self.url) # Need to wait for the page to load timeout = g_request_timeout print("waiting for page to load...") wait = WebDriverWait(browser, timeout) try: element = wait.until( EC.element_to_be_clickable((By.ID, 'dump1090_version'))) except seleniumexceptions.TimeoutException: util.error("Loading %s timed out. Check that you're using the " "correct driver in the .ini file." % (self.url, )) browser.save_screenshot('timeout.png') util.error('Saved screenshot at timeout.png') raise print("reset map:") resetbutton = browser.find_elements_by_xpath( "//*[contains(text(), 'Reset Map')]") resetbutton[0].click() print("zoom in 4 times:") try: # First look for the Open Layers map zoom button. zoomin = browser.find_element_by_class_name('ol-zoom-in') print(zoomin) except seleniumexceptions.NoSuchElementException as e: # Doesn't seem to be Open Layers, so look for the Google # maps zoom button. zoomin = browser.find_elements_by_xpath('//*[@title="Zoom in"]') if zoomin: zoomin = zoomin[0] zoomin.click() zoomin.click() zoomin.click() zoomin.click() self.browser = browser
def makeSemantFunc (code, numArgs, globalObject): args = ['n0'] for i in xrange (numArgs): args.append ('n%d'% (i+1)) try: return util.createFunction (util.uniqueIdentifier (), args, code, globalObject) except Exception, e: util.error ("couldn't create semantic function: " + str(e)) sys.exit(1)
def main(self, filenames, outname, endian, opts={}): filename = filenames[0] tree = load(read(filename)) if list(tree.keys())[2] != 'BFLYT': error('This is not a converted BFLYT file', 203) self.version = tree['version'] self.byteorder = endian self.sections = tree['BFLYT'] self.final = self.repackdata() bwrite(self.final, outname)
def main(): """ Test if there is a README or README.txt file. """ if os.path.exists("./README") or os.path.exists("./README.txt"): util.good("You passed") sys.exit(0) # pass else: util.error("Missing README or README.txt")
def readSFAT(self, data, ptr): sfat, ptr = self.unpack_from(SFAT_STRUCT, data, ptr, getptr=True) magic = sfat[0] if magic != b'SFAT': error('Issue with SFAT: invalid magic %s' % byterepr(magic), 301) #headerlen = sfat[1] self.node_count = sfat[2] self.hash_multiplier = sfat[3] self.nodes = [SFATnode(node) for node in sfat[4]] return ptr
def get_boards(self, board_name): if not board_name: error("No board name provided.") request = self._createRequest() response = request.get("/rest/agile/1.0/board?name=%s" % board_name, contentType="application/json") if response.status != 200: error(u"Unable to find boards for {0}".format(board_name), response) return json.loads(response.response)['values']