def allrolesDelete(): if request.method == 'POST': data = request.values try: # jsonDict = data.to_dict( jsonstr = json.dumps(data.to_dict()) if len(jsonstr) > 10: jsonnumber = re.findall(r"\d+\.?\d*", jsonstr) for key in jsonnumber: # for subkey in list(key): Roleid = int(key) try: role = session.query(Role).filter_by( ID=Roleid).delete() except Exception as ee: print(ee) return json.dumps([{ "status": "error:" + string(ee) }], cls=AlchemyEncoder, ensure_ascii=False) return json.dumps([{ "status": "OK" }], cls=AlchemyEncoder, ensure_ascii=False) except Exception as e: print(e) return json.dumps([{ "status": "Error" + string(e) }], cls=AlchemyEncoder, ensure_ascii=False)
def ensure_dbms_is_running(self, options, properties, scmStatus=None): """ :param scmStatus : SvcStatusCallback :rtype : None """ db_host_components = self.database_host.split("\\") if len(db_host_components) == 1: db_machine = self.database_host sql_svc_name = "MSSQLServer" else: db_machine = db_host_components[0] sql_svc_name = "MSSQL$" + db_host_components[1] if db_machine == "localhost" or db_machine.lower() == os.getenv("COMPUTERNAME").lower() or \ db_machine.lower() == socket.getfqdn().lower(): #TODO: Configure the SQL Server service name in ambari.properties ret = WinServiceController.EnsureServiceIsStarted(sql_svc_name) if 0 != ret: raise FatalException(-1, "Error starting SQL Server: " + string(ret)) if scmStatus is not None: scmStatus.reportStartPending() ret = WinServiceController.EnsureServiceIsStarted("SQLBrowser") #The SQL Server JDBC driver needs this one if 0 != ret: raise FatalException(-1, "Error starting SQL Server Browser: " + string(ret)) pass
def ensure_dbms_is_running(self, options, properties, scmStatus=None): """ :param scmStatus : SvcStatusCallback :rtype : None """ db_host_components = self.database_host.split("\\") if len(db_host_components) == 1: db_machine = self.database_host sql_svc_name = "MSSQLServer" else: db_machine = db_host_components[0] sql_svc_name = "MSSQL$" + db_host_components[1] if db_machine == "localhost" or db_machine.lower() == os.getenv("COMPUTERNAME").lower() or \ db_machine.lower() == socket.getfqdn().lower(): #TODO: Configure the SQL Server service name in tbds.properties ret = WinServiceController.EnsureServiceIsStarted(sql_svc_name) if 0 != ret: raise FatalException(-1, "Error starting SQL Server: " + string(ret)) if scmStatus is not None: scmStatus.reportStartPending() ret = WinServiceController.EnsureServiceIsStarted("SQLBrowser") #The SQL Server JDBC driver needs this one if 0 != ret: raise FatalException(-1, "Error starting SQL Server Browser: " + string(ret)) pass
std::string def nextToken(): if (self.savedTokens != NULL): cp = self.savedTokens token = cp.strr savedTokens = cp.link # delete cp ?? return token while (true): if (ignoreWhitespaceFlag): skipSpaces() ch = self.isp.get() if (ch == '/' and ignoreCommentsFlag): ch = isp.get() if (ch == '/'): while (true): ch = isp.get() if (ch == '\n' or ch == '\r' or ch == EOF) break continue else if (ch == '*'): int prev = EOF while (true): ch = isp->get() if (ch == EOF or (prev == '*' and ch == '/')) break prev = ch continue if (ch != EOF) isp->unget() ch = '/' if (ch == EOF) return "" if ((ch == '"' or ch == '\'') and scanStringsFlag): isp->unget() return scanString() if (ch.isdigit() and scanNumbersFlag): isp.unget() return scanNumber() if (isWordCharacter(ch)): isp.unget() return scanWord() std::string op = std::string(1, ch) while (isOperatorPrefix(op)): ch = isp->get() if (ch == EOF) break op += ch while (op.length() > 1 and not isOperator(op)): isp->unget() op.erase(op.length() - 1, 1) return op
def _ctcp_dequote(message): """[Internal] Dequote a message according to CTCP specifications. The function returns a list where each element can be either a string (normal message) or a tuple of one or two strings (tagged messages). If a tuple has only one element (ie is a singleton), that element is the tag; otherwise the tuple has two elements: the tag and the data. Arguments: message -- The message to be decoded. """ def _low_level_replace(match_obj): ch = match_obj.group(1) # If low_level_mapping doesn't have the character as key, we # should just return the character. return _low_level_mapping.get(ch, ch) if _LOW_LEVEL_QUOTE in message: # Yup, there was a quote. Release the dequoter, man! message = _low_level_regexp.sub(_low_level_replace, message) if _CTCP_DELIMITER not in message: return [message] else: # Split it into parts. (Does any IRC client actually *use* # CTCP stacking like this?) chunks = message.split(_CTCP_DELIMITER) messages = [] i = 0 while i < len(chunks)-1: # Add message if it's non-empty. if len(chunks[i]) > 0: messages.append(chunks[i]) if i < len(chunks)-2: # Aye! CTCP tagged data ahead! messages.append(tuple(chunks[i+1].split(" ", 1))) i = i + 2 if len(chunks) % 2 == 0: # Hey, a lonely _CTCP_DELIMITER at the end! This means # that the last chunk, including the delimiter, is a # normal message! (This is according to the CTCP # specification.) messages.append(_CTCP_DELIMITER + chunks[-1]) return messages
def parse(self, response): signList = response.xpath("/html/body//div") for onesign in signList: try: item["price"] = site.xpath("ul/li/div/a/span/text()").extract()[0] except IndexError: item["price"] = site.xpath("ul/li/a/span/text()").extract()[0] print string('onesign.xpath("@id")')[0].extract() print onesign.xpath("@class")[0].extract() print onesign print ("-" * 48)
def _ctcp_dequote(message): """[Internal] Dequote a message according to CTCP specifications. The function returns a list where each element can be either a string (normal message) or a tuple of one or two strings (tagged messages). If a tuple has only one element (ie is a singleton), that element is the tag; otherwise the tuple has two elements: the tag and the data. Arguments: message -- The message to be decoded. """ def _low_level_replace(match_obj): ch = match_obj.group(1) # If low_level_mapping doesn't have the character as key, we # should just return the character. return _low_level_mapping.get(ch, ch) if _LOW_LEVEL_QUOTE in message: # Yup, there was a quote. Release the dequoter, man! message = _low_level_regexp.sub(_low_level_replace, message) if _CTCP_DELIMITER not in message: return [message] else: # Split it into parts. (Does any IRC client actually *use* # CTCP stacking like this?) chunks = message.split(_CTCP_DELIMITER) messages = [] i = 0 while i < len(chunks) - 1: # Add message if it's non-empty. if len(chunks[i]) > 0: messages.append(chunks[i]) if i < len(chunks) - 2: # Aye! CTCP tagged data ahead! messages.append(tuple(chunks[i + 1].split(" ", 1))) i = i + 2 if len(chunks) % 2 == 0: # Hey, a lonely _CTCP_DELIMITER at the end! This means # that the last chunk, including the delimiter, is a # normal message! (This is according to the CTCP # specification.) messages.append(_CTCP_DELIMITER + chunks[-1]) return messages
def parse(self, response): signList = response.xpath('/html/body//div') for onesign in signList: try: item['price'] = site.xpath( 'ul/li/div/a/span/text()').extract()[0] except IndexError: item['price'] = site.xpath('ul/li/a/span/text()').extract()[0] print string('onesign.xpath("@id")')[0].extract() print onesign.xpath('@class')[0].extract() print onesign print('-' * 48)
def unpacker(filename, dtype=None, endian=None, sort=True, hdrstring=False): ''' Basic Data Loader. Automatically Loads .raw files, .nrrd files and .nhdr lists, and processes headers. @param filename: a string or list of filenames. If not sorted in order, the filenames will be sorted by default in alphabetical order. @param dtype The expected data type in the file. .nrrd Headers will automatically be used @param endian Specify the endianness of the data. Little will be assumed if this parameter is None. @param sort If filename is a list of files, if sort is True, the filenames @return Tuple containing the header data as a dictionary and a numpy array containing the file data. The header may be empty for raw files. ''' path=os.getcwd() newpath=os.path.dirname(filename) print(newpath) os.chdir(newpath) try: if type(filename)==list or type(filename)==tuple: assert (type(filename[0])==type(str) or type(filename[0])==type(string())),\ "Input problem: all items of filename must be strings." header,data=getSerialImages(filename) elif type(filename)==str or type(filename)==type(string()): header,data=processFile(filename) else: raise Exception("Parameter filename must be a string or a list of strings.") #Next, we need the data type that is being used. if dtype is None: if "dtype" in header: dtype=header["dtype"] else: dtype="float32" #Convert the raw data into a numpy npd=unpacker3(data, dtype=dtype) if "dsize" in header: shape=header["dsize"] print("Dsize = {}".format(shape)) print("Size = {}".format(npd.shape)) npd=npd.reshape([i for i in shape[::-1]]) print(npd.shape) if hdrstring: return header,npd, None finally: os.chdir(path) return header,npd
def get_links(url): # Scrape 3 letter site with beautifulsoup try: resp = requests.get(url) # Request data from url soup = bs.BeautifulSoup(resp.text, 'lxml') # Parse text from site body = soup.body links = [link.get('href') for link in body.find_all('a')] # Get all links from body links = [handle_local_links(url, link) for link in links] links = [string(link.encode('ascii')) for link in links] return links except TypeError as e: print(e) print( 'Got a TypeError, probably got a None that we tried to iterate over' ) return [] except IndexError as e: print(e) print( 'We probably did not find any useful links, returning empty list') return [] except AttributeError as e: print(e) print('Likely got None for links') return [] except Exception as e: print(str(e)) return []
def allrolesCreate(): if request.method == 'POST': data = request.values str = request.get_json() try: json_str = json.dumps(data.to_dict()) if len(json_str) > 10: session.add( Role(RoleCode=data['RoleCode'], RoleName=data['RoleName'], RoleSeq=data['RoleSeq'], Description=data['Description'], CreatePerson=data['CreatePerson'], CreateDate=datetime.datetime.now())) session.commit() return json.dumps([{ "status": "OK" }], cls=AlchemyEncoder, ensure_ascii=False) except Exception as e: print(e) return json.dumps([{ "status": "Error" + string(e) }], cls=AlchemyEncoder, ensure_ascii=False)
def launch(): from config import config bnet_pool = eventlet.GreenPool() for i in xrange( 9001 ): # race condition: will ignore the 9001st and subsequent battle.net entries in config try: settings = dict(config['bnet'].items() + config['bnet' + str(i)].items()) except KeyError: break # print i path = string(settings.get('war3path', 'WAR3')) # if(path[-1] != '/'): # path += '/' bn = BNet() bn.id = i bn.war3version = int(settings.get('war3version', '26')) bn.tft = int(settings.get('tft', '1')) bn.localeId = int(settings.get('localeid', '1033')) bn.countryAbbrev = bytes( settings.get('countryabbrev', 'USA').decode('ascii')) bn.country = bytes( settings.get('country', 'United States').decode('ascii')) bn.war3exePath = bytes( settings.get('war3exepath', 'war3.exe').decode('ascii')) bn.stormdllPath = bytes( settings.get('stormdllpath', 'Storm.dll').decode('ascii')) bn.gamedllPath = bytes( settings.get('gamedllpath', 'game.dll').decode('ascii')) bn.keyRoc = bytes( settings.get('keyroc', 'FFFFFFFFFFFFFFFFFFFFFFFFFF').decode('ascii')) assert bn.keyRoc != b'' bn.keyTft = bytes( settings.get('keytft', 'FFFFFFFFFFFFFFFFFFFFFFFFFF').decode('ascii')) assert bn.keyTft != b'' or not bn.tft bn.username = bytes(settings.get('username', '').decode('ascii')) assert bn.username != '' bn.password = bytes(settings.get('password', '').decode('ascii')) bn.firstChannel = bytes( settings.get('firstchannel', 'The Void').decode('ascii')) bn.server = str(settings.get('server', '')) assert bn.server != '' bn.port = int(settings.get('port', '6112')) bn.bindaddress = str(settings.get('bindaddress', '')) bn.bindport = int(settings.get('bindport', '0')) bnets.append(bn) bnet_pool.spawn(bn.run) # eventlet.sleep(10.) # HACK: I think bncsutil doesn't like being used by multiple different bnet transactions bnet_pool.waitall()
def allrolesSearch(): if request.method == 'POST': data = request.values try: json_str = json.dumps(data.to_dict()) if len(json_str) > 2: strconditon = "%" + data['condition'] + "%" roles = db_session.query(Role).filter( Role.RoleName.like(strconditon)).all() total = Counter(roles) jsonroles = json.dumps(roles, cls=AlchemyEncoder, ensure_ascii=False) jsonroles = '{"total"' + ":" + str( total.__len__()) + ',"rows"' + ":\n" + jsonroles + "}" return jsonroles except Exception as e: print(e) logger.error(e) insertSyslog("error", "擦护心角色列表报错Error:" + str(e), current_user.Name) return json.dumps([{ "status": "Error:" + string(e) }], cls=AlchemyEncoder, ensure_ascii=False)
def allrolesFind(): if request.method == 'GET': data = request.values try: json_str = json.dumps(data.to_dict()) if len(json_str) > 10: pages = int(data['page']) rowsnumber = int(data['rows']) inipage = (pages - 1) * rowsnumber + 0 endpage = (pages - 1) * rowsnumber + rowsnumber total = session.query(func.count(Role.ID)).scalar() roles = session.query(Role).all()[inipage:endpage] #ORM模型转换json格式 jsonroles = json.dumps(roles, cls=AlchemyEncoder, ensure_ascii=False) jsonroles = '{"total"' + ":" + str( total) + ',"rows"' + ":\n" + jsonroles + "}" return jsonroles except Exception as e: print(e) return json.dumps([{ "status": "Error:" + string(e) }], cls=AlchemyEncoder, ensure_ascii=False)
def string_empty_test(): errors = 0 print("Testing my string empty function") print("Testing none case") string = None c = my_string_empty(string) if c == False: print("ok, expected false got false") else: print("Failed: expected false got " + str(c)) errors = errors + 1 print("Testing empty case") string = string() c = my_string_empty(string) if c == True: print("ok, expected True got True") else: print("Failed: expected True got " + str(c)) errors = errors + 1 print("\n\n") print("Testing non-empty case") string = String("Hello!") c = my_string_empty(string) if c == False: print("ok, expected False got False") else: print("Failed: expected False got " + str(c)) errors = errors + 1 return errors > 0
def assertContained(self, values, string, additional_info=""): if type(values) not in [list, tuple]: values = [values] for value in values: if type(string) is not str: string = string() if value in string: return # success raise Exception( "Expected to find '%s' in '%s', diff:\n\n%s\n%s" % ( limit_size(values[0]), limit_size(string), limit_size( "".join( [ a.rstrip() + "\n" for a in difflib.unified_diff( values[0].split("\n"), string.split("\n"), fromfile="expected", tofile="actual" ) ] ) ), additional_info, ) )
def allrolesFind(): if request.method == 'GET': data = request.values try: json_str = json.dumps(data.to_dict()) if len(json_str) > 10: pages = int(data.get("offset")) # 页数 rowsnumber = int(data.get("limit")) # 行数 inipage = pages * rowsnumber + 0 # 起始页 endpage = pages * rowsnumber + rowsnumber # 截止页 total = db_session.query(func.count(Role.ID)).scalar() roles = db_session.query(Role).all()[inipage:endpage] # ORM模型转换json格式 jsonroles = json.dumps(roles, cls=AlchemyEncoder, ensure_ascii=False) jsonroles = '{"total"' + ":" + str( total) + ',"rows"' + ":\n" + jsonroles + "}" return jsonroles except Exception as e: print(e) logger.error(e) insertSyslog("error", "查询角色列表报错Error:" + str(e), current_user.Name) return json.dumps([{ "status": "Error:" + string(e) }], cls=AlchemyEncoder, ensure_ascii=False)
def allrolesUpdate(): if request.method == 'POST': data = request.values str = request.get_json() try: json_str = json.dumps(data.to_dict()) if len(json_str) > 10: Roleid = int(data['ID']) role = session.query(Role).filter_by(ID=Roleid).first() role.RoleCode = data['RoleCode'] role.RoleName = data['RoleName'] role.RoleSeq = data['RoleSeq'] role.Description = data['Description'] role.CreatePerson = data['CreatePerson'] role.CreateDate = data['CreateDate'] session.commit() return json.dumps([{ "status": "OK" }], cls=AlchemyEncoder, ensure_ascii=False) except Exception as e: print(e) return json.dumps([{ "status": "Error" + string(e) }], cls=AlchemyEncoder, ensure_ascii=False)
def entity_id(value): """Validate Entity ID.""" value = string(value).lower() if valid_entity_id(value): return value raise vol.Invalid('Entity ID {} is an invalid entity id'.format(value))
def assertNotContained(self, value, string): if type(value) is not str: value = value() # lazy loading if type(string) is not str: string = string() if value in string: raise Exception("Expected to NOT find '%s' in '%s', diff:\n\n%s" % ( limit_size(value), limit_size(string), limit_size(''.join([a.rstrip()+'\n' for a in difflib.unified_diff(value.split('\n'), string.split('\n'), fromfile='expected', tofile='actual')])) ))
def _rsa_sub_routine(self, x, n, mode): ''' Rsa Cipher Sub-Routine for crypting text ''' if mode == 'encode': output_string = '' for _ in self.__text: output_string.append(ord(_) - ord(a) + 1) print('Output string is : {} '.format(output_string)) encrypted_data = pow(int(output_string), x) % n return string(encrypted_data) else: encrypted_data = int(self.__text) decrypted_data = string(pow(encrypted_data, x) % n) output_string = '' for _ in decrypted_data: output_string.append(chr(int(_) + ord('a') - 1)) return output_string
def service(value): """Validate service.""" value = string(value).lower() if valid_service(value): return value raise vol.Invalid('Service {} is an invalid service'.format(value))
def wrappedString(s, x, y, w, spacing=0): curY = y while s != "": wide = __largestStringThatFits(s, w) box = string(s[:wide], x, curY) curY = (spacing + box[3]) s = s[wide:].strip()
def main(): args = parse.parse_args() is_debug = args.is_debug logFilePath = args.logFilePath keyName = args.keyName parse_result = (0,) debug("start parse Json from logFile...", is_debug) if logFilePath is None: debug("Json file path is None", is_debug) debug(msg) return parse_result if keyName is None: debug("key name is None", is_debug) return parse_result if os.path.exists(logFilePath) == False: debug("Json file path is not exists", is_debug) return parse_result keyNameList = keyName.split(":") try: f = file(logFilePath) s = json.load(f) ret_val = '' for k in keyNameList: if ret_val == '': ret_val = s[k] else: if type(ret_val) is types.ListType and unicode(str(k)).isdecimal(): ret_val = ret_val[string.atoi(k)] elif type(ret_val) is types.ListType and unicode(str(k)).isdecimal() == False: debug('keyName type is not int, keyName=>' + string(k)) elif type(ret_val) is types.DictType: ret_val = ret_val[k] else: debug('ret_val type is String or int, ret_val => ' + string(ret_val)) ret_val = None break parse_result = (1,ret_val) #print(parse_result) except Exception, e: debug(str(e),is_debug)
def send_to_arduino(coneection, command): try: while True: coneection.write(string(command)) except KeyboardInterrupt: pass # do cleanup here pass
class person(dbobject): """ This is our minimal data model consiting of one class """ __relation__ = "person" id = common_serial() name = Unicode() image = delayed(string())
def assertContained(self, values, string, additional_info=''): if type(values) not in [list, tuple]: values = [values] for value in values: if type(string) is not str: string = string() if value in string: return # success raise Exception("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % ( limit_size(values[0]), limit_size(string), limit_size(''.join([a.rstrip()+'\n' for a in difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual')])), additional_info ))
def __init__(self, ideal, start, end, fragc, crumbc, age, guilty, name, frags): self.ideal = int(ideal) self.start = int(start) self.end = int(end) self.fragc = int(fragc) self.crumbc = int(crumbc) self.age = int(age) self.guilty = int(guilty) self.name = string(name) self.frags = frags
def assertContained(self, values, string, additional_info=''): if type(values) not in [list, tuple]: values = [values] for value in values: if type(value) is unicode: string = string.decode('UTF-8') # If we have any non-ASCII chars in the expected string, treat the test string from ASCII as UTF8 as well. if type(string) is not str and type(string) is not unicode: string = string() if value in string: return # success raise Exception("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % ( limit_size(values[0]), limit_size(string), limit_size(''.join([a.rstrip()+'\n' for a in difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual')])), additional_info ))
def MAC_DL(pdu): pdu = "".join(list(map(str, pdu))) size_lis, data = data_unit(pdu) sdu = [] for i in size_lis: sdu.append(data[:i]) data = data[i:] k = string(sdu) print("packet sent to RLC layer : " + k) print("") #TEMPO. RLC_Receive(k)
def Random_Value(self,Type,rangelen): Rand_Type_Value = None min = string.atoi(rangelen.split('..')[0]) max = string.atoi(rangelen.split('..')[-1]) if Type=='String': len_num = random.randint(min,max) Rand_Type_Value = self.STRRandom(len_num) #''.join(map(lambda xx:(hex(ord(xx))[2:]),os.urandom(16))) elif Type=='INTEGER': Rand_Type_Value = string(random.randint(min,max)) #print 'Rand_Type_Value:',Rand_Type_Value return Rand_Type_Value
def Random_Value(self, Type, rangelen): Rand_Type_Value = None min = string.atoi(rangelen.split('..')[0]) max = string.atoi(rangelen.split('..')[-1]) if Type == 'String': len_num = random.randint(min, max) Rand_Type_Value = self.STRRandom(len_num) #''.join(map(lambda xx:(hex(ord(xx))[2:]),os.urandom(16))) elif Type == 'INTEGER': Rand_Type_Value = string(random.randint(min, max)) #print 'Rand_Type_Value:',Rand_Type_Value return Rand_Type_Value
def requesthandler(self): """Combined GET and POST request handler.""" username = self.request.get('username') callback = self.request.get('callback') pw = self.request.get('pw') if len(username) > 0 and len(pw) > 0: m = md5.new() m.update(pw) pwhash = string(m.hexdigest()) udb = db.Query(Users) udb.filter('username = '******'pwhash = ', pwhash) item = udb.get() if item is not None: firstname = self.request.get('firstname') lastname = self.request.get('lastname') description = self.request.get('description') skype = self.request.get('skype') facebook = self.request.get('facebook') linkedin = self.request.get('linkedin') proz = self.request.get('proz') wwwurl = self.request.get('www') if len(firstname) > 0: item.firstname = firstname if len(lastname) > 0: item.lastname = lastname if len(description) > 0: item.description = description if len(skype) > 0: item.skype = skype if len(facebook) > 0: item.facebook = facebook if len(linkedin) > 0: item.linkedin = linkedin if len(proz) > 0: item.proz = proz if len(www) > 0: item.www = wwwurl item.put if len(callback) > 0: self.redirect(callback) else: self.response.headers['Content-Type']='text/plain' self.response.out.response('ok') else: if len(callback) > 0: self.redirect(callback) else: self.response.headers['Content-Type']='text/plain' self.response.out.write('error') else: www.serve(self, self.__doc__, title='/users/update')
def __largestStringThatFits(s, w): ans = len(s) while ans > 0: box = string(s[:ans], 0, -1000) if box[2] < w: return ans pos = s[:ans].rfind(' ') if pos < 0: return ans ans = pos return ans
def array(_type: str, inner_interval: Union[str, int] = default_interval, interval: Union[str, int] = default_interval) -> list: _min_len, _max_len = 0, default_interval if isinstance(interval, str): _min_len, _max_len = __parse_string_interval__(interval) else: _max_len = int(interval) array_length = randrange(_min_len, _max_len) if _type == 'string': return [string(inner_interval) for _ in range(array_length)] return [number(inner_interval) for _ in range(array_length)]
def deleteUser(): if request.method == 'POST': data = request.values try: jsonstr = json.dumps(data.to_dict()) if len(jsonstr) > 10: jsonnumber = re.findall(r"\d+\.?\d*", jsonstr) for key in jsonnumber: id = int(key) try: oclass = db_session.query(User).filter_by(id=id).first() db_session.delete(oclass) db_session.commit() except Exception as ee: db_session.rollback() print(ee) insertSyslog("error", "删除户ID为"+string(id)+"报错Error:" + string(ee), current_user.Name) return json.dumps("删除用户报错", cls=AlchemyEncoder,ensure_ascii=False) return 'OK' except Exception as e: print(e) logger.error(e) insertSyslog("error", "删除用户报错Error:" + str(e), current_user.Name) return json.dumps([{"status": "Error:" + str(e)}], cls=AlchemyEncoder, ensure_ascii=False)
def validate_string_is_decimal( s, dp=2, name=None, location=None, kind='integer', ): # dp = decimal places string(s, name, location, kind) if not isinstance(dp, int): msg = "which has type '{}', not 'int'.".format(type(dp).__name__) name2 = 'dp (i.e. the number of decimal places)' msg = build_error_msg(msg, dp, name=name2, location=location, kind=None) raise TypeError(msg) regex = r'^\d*.\d{%d}$' % dp decimal_pattern = re.compile(regex) if not decimal_pattern.match(s): msg = 'which is not a valid {}-decimal-place decimal value.'.format(dp) msg = build_error_msg(msg, s, name, location, kind) raise ValueError(msg)
def buildStack(session,name,children,setWipes=None, setCompOp=None,chosenAudioInput=None, alignStartFrames=None,strictFrameRanges=None): logger.debug("buildStack %s"%name) stack = session.newNode ("Stack") stack.setUIName ("Stack of %s"%name) if setWipes: stack.setWipes (float(setWipes)) if setCompOp: stack.setCompOp (string(setCompOp)) if chosenAudioInput: stack.chosenAudioInput (int(chosenAudioInput)) if strictFrameRanges: stack.strictFrameRanges (int(strictFrameRanges)) if alignStartFrames: stack.alignStartFrames (int(alignStartFrames)) for i, child in enumerate(children): logger.debug("Switch,adding child %d"%i) stack.addInput (child) return stack
def read_hall_lens(cam, pos): global TID # data="0123" time = execute(lcc_write + increase_TID() + "40 80 " + module_bitmask(cam) + string(pos) + "\"") # data = execute(lcc_read + increase_TID()+ "40 00 "+ module_bitmask(cam)+"\"") # data = data.replace("\r", " ") # data = data.replace("\n", " ") # data = data[36:-3] # data = data[3:] + data[:2] # print " "+cam +"\t " +string_bigendian(pos) +"\t\t " + data time = time.replace("\r", " ") time = time.replace("\n", " ") time = time[70:-3] print " " + cam + "\t " + string_bigendian(pos) + "\t\t" + time
def array(input, index): array_gen = [] while input[index] != ']': if input[index] == '"' or input[index] == "'": val, index = string(input, index + 1) array_gen.append(val) elif input[index] == '[': val, index = array(input,index + 1) array_gen.append(val) elif input[index] in '0123456789': val, index = number(input, index) array_gen.append(val) elif input[index] == '{': dict_return, index = dict(input, index + 1) array_gen.append(dict_return) else: index += 1 return array_gen, index + 1
def prepare_data_disk_func(disks): """ Prepare the data disk for usage. This includes format, and mount """ try: # echo -e "o\nn\np\n1\n\n\nw" | fdisk /dev/sdc for index, disk in enumerate(disks): disk_path = "/dev/{}".format(disk) partition_path = "/dev/{}1".format(disk) if string("xfs") == cmdline("wipefs -npq {} | cut -d ',' -f 4 | sed -n 2p".format(partition_path)).rstrip(): logger.info("Disk partition {} is already formateed xfs".format(partition_path)) elif "{}1".format(disk) in cmdline("fdisk -l"): logger.fatal("Partitioned disk {} already mounted. Please unmount and re-initialize disk before retrying.".format(disk)) sys.exit() else: logger.info("Partitioning the disk '{}'".format(disk_path)) ps = subprocess.Popen(["echo", "-e", "\"o\nn\np\n1\n\n\nw\""], stdout=subprocess.PIPE) output = subprocess.check_output(["fdisk", disk_path], stdin=ps.stdout) ps.wait() # os.system("echo -e o\nn\np\n1\n\n\nw | fdisk /dev/sdc") # Make File Filesystem in attached Volume logger.info("Make File filesystem in '{}'".format(partition_name)) subprocess.call(["mkfs.xfs", "-f", partition_path]) uuid_name = "uuid-{}".format(get_partition_uuid(partition_path)) # mkdir -p /ecs/uuid-1 logger.info("Make /ecs/{} Directory in attached Volume".format(uuid_name)) subprocess.call(["mkdir", "-p", "/ecs/{}".format(uuid_name)]) # mount /dev/sdc1 /ecs/uuid-1 logger.info("Mount attached /dev{} to /ecs/{} volume.".format(partition_path, uuid_name)) subprocess.call(["mount", partition_path, "/ecs/{}".format(uuid_name)]) except Exception as ex: logger.exception(ex) logger.fatal("Aborting program! Please review log.") sys.exit()
def dict(input, index): dict_gen = {} key, value, quote_hit, val = None, None, False, None while input[index] != '}': if input[index] == '"' or input[index] == "'": val, index = string(input, index+1) if quote_hit: value = val else: key = val elif input[index] in '0123456789': val, index = number(input, index) if quote_hit: value = val else: key = val elif input[index] == '[': val, index = array(input, index+1) if quote_hit: value = val else: key = val elif input[index] == ',': quote_hit = False dict_gen[key] = value index += 1 elif input[index] == ':': quote_hit = True index += 1 elif input[index] == '{': val, index = dict(input, index + 1) if quote_hit: value = val else: key = val else: index += 1 if key is not None and value is not None: dict_gen[key] = value print dict_gen, index + 1 return dict_gen, index + 1
def main(): filename = sys.argv[1] fixGlassCSV(filename) fileIn = open(filename, 'r') lines = fileIn.readlines() lastLine = "" lastOrder = createOrder(CLIENT+"0") for i in len(lines): l = lines[i] if not l or "\r\n" == l: if not lastLine: # two block line a raw continue # block line mean a new order order = createOrder(CLIENT+string(i+1)) print "create a new order: " + order #order.save() lastOrder = order else: l = l.replace("\r\n", "") data = l.split(",") if (len(data) != 6): raise Exception("data format wrong :" + data) mode = int(data[0]) glassType = data[1] width = float(data[2]) height = float(data[3]) quantity = int(data[4]) rub = data[5] unitPrice = findPriceByGlassType(glassType) if (mode!=0) else 0 row = GlassRow.create(name='row', quantity=quantity, unitPrice=unitPrice, orderId=lastOrder, width=width, height=height, rate=1.2, rubEdge=rub, rubEdgeUnitPrice=0.8, extra="", extraCost=0) print "create a row :" + row #row.save() lastLine = l
def string(node, length_fmt=":%s", end=True, newline=True): "Recursively create a newick string from node." if not node.isleaf: node_str = "(%s)%s" % \ (",".join([ string(child, length_fmt, False, newline) \ for child in node.children ]), node.label or "" ) else: node_str = "%s" % node.label if node.length is not None: length_str = length_fmt % node.length else: length_str = "" semicolon = "" if end: if not newline: semicolon = ";" else: semicolon = ";\n" s = "%s%s%s" % (node_str, length_str, semicolon) return s
def __add__(self, x): i=0 sumfrac="" while i<hex(fraclength(self)) or i<hex(fraclength(x)): sumfrac[i]=string(string(fraction(self)[i]+fraction(x)[i])[0]+sumfrac[i])[0] if string(fraction(self)[i]+fraction(x)[i])>=MAX: if i==0:#enters if it's the first frac part sumint[intlength]=(string(fraction(self)[i]+fraction(x)[i]))[1]#first frac carries over to the last int part else: sumfrac[i-1]=(string(fraction(self)[i]+fraction(x)[i]))[1]#carries while in frac i=i+1 if intlength(self)<intlength(x): i=intlength(x) elif intlength(self)>intlength(x): i=intlength(self) else: i=intlength(self) while i<intlength(self): sumint[i]=string(string(sumint[i]+integer(self)[i])+integer(x)[i]) sumint[i-1]=sumint[i][1] sumint[i]=sumint[i][0] i=i-1 sum=sumint+"."+sumfrac return (sum)
""" the rules of terratri ===================== step notation ------------- We represent the history as a string of 2 chars (steps) per move. lowercase = red uppercase = blue 'n','N' = north 's','S' = south 'e','E' = east 'w','W' = west 'f','F' = fort 'x','X' = pass (second step only) board notation -------------- We represent either as a 5 x 5 array (list of lists) of characters (a "grid"), or as a flattened 25-character string (a "board"). In both cases, the characters are: ' '= unclaimed square
def main(): """This provides a command line interface to the password database, with awareness of a deployment home's file layout """ usage = "\n %prog [options] view [key]\n %prog [options] create input_filename\n %prog [options] update key" parser = OptionParser(usage=usage) parser.add_option( "--deployment-home", "-d", dest="deployment_home", default=None, help="Location of deployed application - can figure this out automatically unless installing from source", ) (options, args) = parser.parse_args() # check the command line valid_commands = ["view", "view-json", "update", "create"] if len(args) == 0: command = "view" else: command = args[0] if command not in valid_commands: parser.error("Invalid command %s" % command) if len(args) > 2: parser.error("Too many arguments") if command == "update" and len(args) != 2: paser.error("Need to specify key to be updated") if command == "create": if len(args) != 2: parser.error("Need to specify input filename") create_input_filename = abspath(args[1]) if not os.path.exists(create_input_filename): parser.error("Input file %s does not exist" % create_input_filename) if command == "view-json" and len(args) > 1: parser.error("view-json does not accept any additional arguments") # setup the file layout efl = engage_file_layout.get_engine_layout_mgr() if options.deployment_home: dh = abspath(options.deployment_home) if not os.path.exists(dh): parser.error("Deployment home %s not found" % dh) elif efl.has_deployment_home(): dh = efl.get_deployment_home() else: parser.error("Not running from a deployment home, and -d was not specified") # read the existing password database, if present pw_dir = os.path.join(dh, "config") pw_file = os.path.join(pw_dir, pw_repository.REPOSITORY_FILE_NAME) salt_file = os.path.join(pw_dir, pw_repository.SALT_FILE_NAME) if os.path.exists(pw_file) and command != "create": pw_db = pw_repository.PasswordRepository.load_from_file( pw_file, salt_file, _get_master_password(ask_only_once=True) ) else: pw_db = pw_repository.PasswordRepository(_get_master_password()) # run the commands if command == "view": if len(args) == 2: key = args[1] if not pw_db.has_key(key): print "Password database does not contain key '%s'" % key return -1 print "'%s' password is '%s'" % (key, pw_db.get_value(key)) return 0 else: for (k, v) in pw_db.items(): print "'%s' password is '%s'" % (k, v) print "%d entries found." % len(pw_db.items()) return 0 elif command == "view-json": print json.dumps(pw_db.data, indent=2) return 0 elif command == "update": key = args[1] pw_db.update_key(key, _prompt_for_password("Enter password for key '%s'" % key, key)) pw_db.save_to_file(pw_file, salt_file) print "Updated password database with key '%s'" % key return 0 else: assert command == "create" try: with open(create_input_filename, "rb") as f: data = json.load(f) except e: parser.error("Unable to parse input JSON file %s: %s" % (input_filename, string(e))) if not isinstance(data, dict): parser.error("Input file %s does not contain an object/dictionary" % input_filename) for (k, v) in data.items(): pw_db.add_key(k, v) pw_db.save_to_file(pw_file, salt_file) print "Created password database" return 0
by the `file` keyword argument.""" print(format_color(string), file=file) _RE_STRING_START = "[bBrRuU]*" _RE_STRING_TRIPLE_DOUBLE = '"""' _RE_STRING_TRIPLE_SINGLE = "'''" _RE_STRING_DOUBLE = '"' _RE_STRING_SINGLE = "'" _STRINGS = (_RE_STRING_TRIPLE_DOUBLE, _RE_STRING_TRIPLE_SINGLE, _RE_STRING_DOUBLE, _RE_STRING_SINGLE) RE_BEGIN_STRING = re.compile("(" + _RE_STRING_START + "(" + "|".join(_STRINGS) + "))") """Regular expression matching the start of a string, including quotes and leading characters (r, b, or u)""" RE_STRING_START = re.compile(_RE_STRING_START) """Regular expression matching the characters before the quotes when starting a string (r, b, or u, case insensitive)""" RE_STRING_CONT = { k: re.compile(v) for k, v in { '"': r'((\\(.|\n))|([^"\\]))*', "'": r"((\\(.|\n))|([^'\\]))*", '"""': r'((\\(.|\n))|([^"\\])|("(?!""))|\n)*', "'''": r"((\\(.|\n))|([^'\\])|('(?!''))|\n)*", }.items() } """Dictionary mapping starting quote sequences to regular expressions that match the contents of a string beginning with those quotes (not including the terminating quotes)"""
import string #Quantidade de casos de testes n = int(input()) #quantidade de linhas que vem a seguir l = int(input()) #Lista de letras do alfabeto a = list(string.ascii_uppercase) hash = 0 cont = 0 #Valor = (Posição no alfabeto) + (Elemento de entrada) + (Posição do elemento) for i in range(l): valor = input("") for j in a: print(a) if (string(j) == valor[j]): hash = j + cont + i cont += cont print (hash)
if line == 'q': break if pat.search(line): print 'matched:', line else: print 'no match:', line Comments: We import module re in order to use regular expresions. "re.compile()" compiles a regular expression so that we can reuse the compiled regular expression without compiling it repeatedly. 2.3 Using regular expressions Use match to match at the beginning of a string (or not at all). Use search to search a string and match the first string from the left. Here are some examples: >>> import re >>> pat = re.compile('aa[0-9]*bb') >>> x = pat.match('aa1234bbccddee') >>> x <_sre.SRE_Match object at 0x401e9608> >>> x = pat.match('xxxxaa1234bbccddee') >>> x >>> type(x) <type 'NoneType'> >>> x = pat.search('xxxxaa1234bbccddee') >>> x
import sys, re try: import string except: # emulate string module under IronPython class string(object): def join(self, seq, sep): return sep.join(seq) def replace(self, text, *args): return text.replace(*args) def split(self, text, *args): return text.split(*args) def strip(self, text, *args): return text.strip(*args) string = string() class _SimpleElementPath: # emulate pre-1.2 find/findtext/findall behaviour def find(self, element, tag): for elem in element: if elem.tag == tag: return elem return None def findtext(self, element, tag, default=None): for elem in element: if elem.tag == tag: return elem.text or "" return default def findall(self, element, tag): if tag[:3] == ".//":
if (not matches): values = [datetime.datetime.now(), myZone, "", "", "", "Error Reading Humidity"] continue humidity = float(matches.group(1)) print "Temperature: %.1f C" % tempC print "Temperature: %.1f F" % tempF print "Humidity: %.1f %%" % humidity values = [datetime.datetime.now(), myZone, tempC, tempF, humidity] # print "values:" # print values # print time.time() cmd="echo " +string(datetime.datetime.now()) +":" +string(myZone) +":" +string(tempC) +":" +string(tempF) +":" +string(humidity) #+" > " +time.time() print cmd # os.system("echo %s" % values) # try: # worksheet.append_row(values) # except: # print "Unable to append data. Check your connection?" # sys.exit()