def get_bridge(driver): bridge = WebthingBridge(driver, 'Bridge') res = httpx.get('http://127.0.0.1:8000/things') data = res.json() # print(data) for i in data: if 'TemperatureSensor' in i['@type'] and len(i['@type']) == 2: bridge.add_accessory( TemperatureSensor(driver, i['title'], aid=fnv1a_32(bytes(i['id'], encoding='utf-8')), thing_id=i['id'])) if 'DoorSensor' in i['@type']: bridge.add_accessory( ContactSensor(driver, i['title'], aid=fnv1a_32(bytes(i['id'], encoding='utf-8')), thing_id=i['id'])) if 'Light' in i['@type']: bridge.add_accessory( LightBulb(driver, i['title'], aid=fnv1a_32(bytes(i['id'], encoding='utf-8')), thing_id=i['id'])) if 'OnOffSwitch' in i['@type'] and 'Light' not in i['@type']: bridge.add_accessory( Switch( driver, i['title'], aid=fnv1a_32(bytes(i['id'], encoding='utf-8')), thing_id=i['id'] )) # bridge.add_accessory(FakeFan(driver, 'Big Fan')) # bridge.add_accessory(GarageDoor(driver, 'Garage')) return bridge
async def connect(self, callback): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(10) self._set_backend_server_host( self._authentication_client.auth_data.region) connection = ssl.wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLSv1) connection.connect( (self._BACKEND_SERVER_HOST, self._BACKEND_SERVER_PORT)) connection.do_handshake() self.connect_callback = callback self.connection = connection request = connection_service_pb2.ConnectRequest() request.bind_request.imported_service_hash.extend( [fnv1a_32(bytes(s, "UTF-8")) for s in self._imported_services]) request.bind_request.exported_service.extend([ connection_service_pb2.BoundService(hash=fnv1a_32( bytes(s.name, "UTF-8")), id=s.id) for s in self._exported_services ]) self._send_request(self._CONNECTION_SERVICE, 1, request, self._on_connect_response)
def check_hdr_value(hdr, hdrname, hdr_value_table): val = hdr.split(":")[1] if val[0] == " ": val = val[1:] hdr_coded = HDRL[hdrname] + ":" ret = [] if "," in val: # simple splitting of compound values if ";q=" in val: # we do not tokenize compound values with quality parameters at this moment return hdr_coded + format(fnv1a_32(val.encode()), "x") t = [] if ", " in val: t = val.split(", ") else: t = val.split(",") for j in t: if j == "": return hdr_coded + format(fnv1a_32(val.encode()), "x") if j not in hdr_value_table: print("Unknown header value - " + hdr, file=sys.stderr) return hdr_coded + format(fnv1a_32(val.encode()), "x") ret.append(hdr_value_table[j]) else: k = "" if val in hdr_value_table: k = hdr_value_table[val] else: print("Unknown header value - " + hdr, file=sys.stderr) k = format(fnv1a_32(val.encode()), "x") ret.append(k) return hdr_coded + ",".join(ret)
def hash_response_list(self, response): """Return a dictionary mapping a hash with a response in a response list. """ response_dict = {} if isinstance(response, str): response_hash = str(fnv1a_32(response.encode('utf-8'))) response_dict[response_hash] = response elif isinstance(response, list): for resp in response: sentences = [] for edge in resp['edge_list']: for (_, sentence, _) in edge['stmts']: sentences.append(sentence) response_str = ' '.join(sentences) response_hash = str(fnv1a_32(response_str.encode('utf-8'))) response_dict[response_hash] = resp elif isinstance(response, dict): results = [ str(response.get('sat_rate')), str(response.get('num_sim')) ] response_str = ' '.join(results) response_hash = str(fnv1a_32(response_str.encode('utf-8'))) response_dict[response_hash] = response else: raise TypeError('Response should be a string or a list.') return response_dict
def test_fnv1a_32(self): """ Tests the 32 bit FNV-1a hash implementation. """ for string, expected_hval in vector.fnv1a_32_vector.items(): result = fnvhash.fnv1a_32(string) self.assertEqual(result, expected_hval)
def catbus_string_hash(s): s = str(s) # no unicode! if len(s) == 0: return 0 return fnv1a_32(s)
def check_ua_value(hdr): val = hdr.split(":")[1] if val[0] == " ": val = val[1:] name = HDRL["user-agent"] ret = name + ":" + format(fnv1a_32(val.encode()), "x") return ret
async def run(self): """Schedule tasks for each of the accessories' run method.""" for acc in self.accessories.values(): self.driver.async_add_job(acc.run) async with websockets.connect("ws://127.0.0.1:8000/things/urn:thingtalk:server") as websocket: await websocket.send(json.dumps({ "messageType": "addEventSubscription", "data": { "device_pairing": {} } })) while True: recv_json = json.loads(await websocket.recv()) if recv_json.get("messageType") == "event": data = recv_json['data'] event = data['device_pairing'] data = event['data'] if 'OnOffSwitch' in data['@type'] and 'Light' not in data['@type']: self.add_accessory( Switch( self.driver, data['title'], aid=fnv1a_32(bytes(data['id'], encoding='utf-8')), thing_id=data['id'] )) for acc in self.accessories.values(): self.driver.async_add_job(acc.run) self.driver.config_changed() break
def resolve_path(dtype, object_key, use_fnv=False): # type: (str, str, bool) -> str """ Get path to a file using data type and object key (for sharding) """ path, prefix_length = PATHS[dtype] p = fnvhash.fnv1a_32(object_key) if use_fnv else ord(object_key[0]) prefix = p & (2 ** prefix_length - 1) return path.format(key=prefix)
def _generate_aids(unique_id: str, entity_id: str) -> int: """Generate accessory aid.""" if unique_id: # Use fnv1a_32 of the unique id as # fnv1a_32 has less collisions than # adler32 yield fnv1a_32(unique_id.encode("utf-8")) # If there is no unique id we use # fnv1a_32 as it is unlikely to collide yield fnv1a_32(entity_id.encode("utf-8")) # If called again resort to random allocations. # Given the size of the range its unlikely we'll encounter duplicates # But try a few times regardless for _ in range(5): yield random.randrange(AID_MIN, AID_MAX)
def hex_path(comic_path): """Translate an integer into an efficient filesystem path.""" fnv = fnv1a_32(bytes(str(comic_path), "utf-8")) hex_str = "{0:0{1}x}".format(fnv, HEX_FILL) parts = [] for i in range(0, len(hex_str), PATH_STEP): parts.append(hex_str[i:i + PATH_STEP]) path = Path("/".join(parts)) return path
def do_n2i_load(db_name, table_fmt, table_num, fields, key_fmt, where_cond=""): conn = MySQLdb.connect(host=host, user=user, passwd=passwd, db=db_name, port=port, unix_socket=unix_socket) begin = datetime.datetime.today() rows_affected = 0 with conn: fields = ",".join(str(e) for e in fields) cur = conn.cursor() for table_index in range(table_num): table = table_fmt % (table_index, ) # print(table) tmpf = file_path + "tmp" + table remove_file_if_exists(tmpf) sql = (select_into_fmt % (fields, add_quote(tmpf), table)) + where_cond # print("sql:", sql) rows_affected += cur.execute(sql) # print("rows affected:", rows_affected) for redis_index in range(redis_num): remove_file_if_exists(file_path + db_name + str(redis_index)) for table_index in range(table_num): table = table_fmt % (table_index, ) tmpf = file_path + "tmp" + table with open(tmpf, "r") as f: for line in f.readlines(): elts = (line.split("\n")[0]).split("\t") # print("elts:", elts) shard = fnv1a_32(elts[0]) % redis_num redisf_name = file_path + db_name + str(shard) with open(redisf_name, "ab+") as redis_file: insert_item = gen_insert_item(key_fmt, elts) if len(insert_item) != 0: redis_file.write(insert_item) for redis_index in range(redis_num): redisf_name = file_path + db_name + str(redis_index) cmd = "cat " + redisf_name + " | redis-cli " + " -h " + redis_conf[redis_index][0] + \ " -p " + redis_conf[redis_index][1] + " -n " + redis_conf[redis_index][2] + " --pipe" fp = os.popen(cmd) # print(fp.read()) end = datetime.datetime.today() delta = end - begin print("db:", db_name, "table_fmt:", table_fmt, "table_num:", table_num, "fields:", fields, "key_fmt:", key_fmt) print("begin:", begin, "end:", end, "delta:", delta.seconds + delta.days * 86400) print("rows_affected:", rows_affected)
def lookup(self, word): #print("Checking",self.k,"linear combinations of ",word) print("****Looking :",word) found=True for h in range(self.k): murmur = mmh3.hash(word) fnvhash= fnv1a_32(word.encode('utf8')) hIndex=(murmur+ h*fnvhash) % self.m if self.bit_array[hIndex]==0: found=False return found
def add(self,word): print("Adding :",word, "to 1 in",self.k,"indexes") if(self.addedWords<self.n): self.addedWords+= 1 for h in range(self.k): murmur = mmh3.hash(word) fnvhash= fnv1a_32(word.encode('utf8')) hIndex=(murmur+ h*fnvhash) % self.m print("\tIn index:",hIndex) self.bit_array[hIndex] = 1 else: print("Cannot add anymore words")
def get_hdr_order(request_split): ret = [] for reqline in request_split[1:]: hdr = reqline.split(":")[0] hdr_lower = hdr.lower() hdr_coded = format(fnv1a_32(hdr.encode()), "x") if hdr_lower in HDRL: if get_hdr_case(hdr): hdr_coded = HDRL[hdr_lower] else: hdr_coded = "!" + HDRL[hdr_lower] ret.append(hdr_coded) return ",".join(ret)
def hash_response_list(self, response_list): """Return a dictionary mapping a hash with a response in a response list. """ response_dict = {} for response in response_list: sentences = [] for sentence, _ in response: sentences.append(sentence) response_str = ' '.join(sentences) response_hash = str(fnv1a_32(response_str.encode('utf-8'))) response_dict[response_hash] = response return response_dict
def check_hdr_order(pkt): ret = [] for i in pkt[1:]: t = i.split(":")[0] t_low = t.lower() k = format(fnv1a_32(t.encode()), "x") if t_low in HDRL: if check_hdr_case(t): k = HDRL[t_low] else: k = "!" + HDRL[t_low] ret.append(k) return ",".join(ret)
def _word_to_feature(self, word: str) -> int: """Converts a word to an identifier. Parameters ---------- word : str The word to convert. Returns ------- An identifier (int) which is either computed by a specified vocabulary or by using a hashing vectorizer. """ return fnv1a_32(unidecode.unidecode( word.lower()).encode('utf-8')) % self.word_vocab_size
def hash(key): """Print hash for a key""" # convert to ASCII key = str(key) hashed_key = catbus_string_hash(key) click.echo('%d 0x%08x' % (hashed_key, hashed_key & 0xffffffff)) from fnvhash import fnv1a_32 hashed_key = fnv1a_32(key) click.echo('FNV1A: %d 0x%08x' % (hashed_key, hashed_key & 0xffffffff))
def _generate_aids(unique_id: str, entity_id: str) -> int: """Generate accessory aid.""" # Backward compatibility: Previously HA used to *only* do adler32 on the entity id. # Not stable if entity ID changes # Not robust against collisions yield adler32(entity_id.encode("utf-8")) if unique_id: # Use fnv1a_32 of the unique id as # fnv1a_32 has less collisions than # adler32 yield fnv1a_32(unique_id.encode("utf-8")) # If there is no unique id we use # fnv1a_32 as it is unlikely to collide yield fnv1a_32(entity_id.encode("utf-8")) # If called again resort to random allocations. # Given the size of the range its unlikely we'll encounter duplicates # But try a few times regardless for _ in range(5): yield random.randrange(AID_MIN, AID_MAX)
def hashme(s, hashType): if hashType == "md4": return "0x" + hashlib.new("md4", s).hexdigest() elif hashType == "md5": return "0x" + hashlib.new("md5", s).hexdigest() elif hashType == "sha1": return "0x" + hashlib.new("sha1", s).hexdigest() elif hashType == "sha224": return "0x" + hashlib.new("sha224", s).hexdigest() elif hashType == "sha256": return "0x" + hashlib.new("sha256", s).hexdigest() elif hashType == "sha384": return "0x" + hashlib.new("sha384", s).hexdigest() elif hashType == "sha512": return "0x" + hashlib.new("sha512", s).hexdigest() elif hashType == "ripemd160": return "0x" + hashlib.new("ripemd160", s).hexdigest() elif hashType == "whirlpool": return "0x" + hashlib.new("whirlpool", s).hexdigest() elif hashType == "crc8": return hex(crc8(s)) elif hashType == "crc16": return hex(crc16(s)) elif hashType == "crc32": return hex(crc32(s)) elif hashType == "crc64": return hex(crc64(s)) elif hashType == "djb2": return hex(djb2(s)) elif hashType == "sdbm": return hex(sdbm(s)) elif hashType == "loselose": return hex(loselose(s)) elif hashType == "fnv1_32": return hex(fnvhash.fnv1_32(s)) elif hashType == "fnv1a_32": return hex(fnvhash.fnv1a_32(s)) elif hashType == "fnv1_64": return hex(fnvhash.fnv1_64(s)) elif hashType == "fnv1a_64": return hex(fnvhash.fnv1a_64(s)) elif hashType == "murmur3": # this might also take a different seed return hex(mmh3.hash(s, signed=False))
def get_alias(self, string: str) -> int: try: alias = self.aliases_by_string[string] except KeyError: encoded_string, consumed = utf8codec.encode(string) assert consumed == len(string) alias = fnv1a_32(encoded_string) try: conflicting_string = self.strings_by_alias[alias] except KeyError: self.strings_by_alias[alias] = string self.aliases_by_string[string] = alias self.is_dirty = True else: raise StringTable.HashCollisionError( "The fnv1a hash {} of the new string '{}' " "collides with that of the existing string " "{}".format(alias, string, conflicting_string)) return alias
def check_content_type(hdr): val = hdr.split(":")[1] if val[0] == " ": val = val[1:] hdr_coded = HDRL["content-type"] + ":" ret = [] if "," in val: vals = [] if ", " in val: vals = val.split(", ") else: vals = val.split(",") for itv in vals: if ";" in itv: if "boundary=" in itv: bnd_ind = itv.index("boundary=") bnd_offset = len("boundary=") val_bnd = val[:bnd_ind + bnd_offset] return hdr_coded + format(fnv1a_32(val_bnd.encode()), "x") else: ret.append(format(fnv1a_32(itv.encode()), "x")) else: k = format(fnv1a_32(itv.encode()), "x") if itv not in CONTENTTYPE: print("Unknown Content-Type value - " + hdr, file=sys.stderr) else: k = CONTENTTYPE[itv] ret.append(k) else: if ";" in val: if "boundary=" not in val: return hdr_coded + format(fnv1a_32(val.encode()), "x") bnd_ind = val.index("boundary=") bnd_offset = len("boundary=") val_bnd = val[:bnd_ind + bnd_offset] return hdr_coded + format(fnv1a_32(val_bnd.encode()), "x") else: k = format(fnv1a_32(val.encode()), "x") if val not in CONTENTTYPE: print("Unknown Content-Type value - " + hdr, file=sys.stderr) else: k = CONTENTTYPE[val] ret.append(k) return hdr_coded + ",".join(ret)
def fnv(self, string): hashed = fnv1a_32(string) # Evitamos valores fuera de la longitud de la tabla index = hashed % len(self._buckets) return index
def check_accept_language_value(hdr): val = hdr.split(":")[1] name = HDRL["accept-language"] ret = name + ":" + format(fnv1a_32(val.encode()), "x") return ret
def get_signature(_key, data): return hex(fnv1a_32(_key + data))[2:]
def hash_query(query_json, model_id): """Create an FNV-1a 32-bit hash from the query json and model_id.""" unique_string = model_id + ':' + sorted_json_string(query_json) return fnv1a_32(unique_string.encode('utf-8'))
def fnv(elem): return fnv1a_32(b'foo')%numOfBits
def hash(hash_index, value, bits) -> int: return fnvhash.fnv1a_32((str(value) + str(hash_index)).encode()) % bits
def _get_query_hash(query_json): """Create an FNV-1a 32-bit hash from the query json""" return fnv1a_32(sorted_json_string(query_json).encode('utf-8'))
# Return the ngrams for the given word def word2ngrams(word, min_n=3, max_n=3): word = '<' + word + '>' ngrams = [word] if len(word) > min_n + 1: for n in range(min_n, max_n + 1): for pos in range(0, len(word) - n + 1): subword = word[pos:pos + n] ngrams.append(subword) return ngrams print(fnv1a_32(b'foo')) print(fnv1a_32(b'ibis')) print(fnv1a_32(b'prevedello')) dt1 = np.dtype(('i4', [('bytes', 'u1', 4)])) print(dt1) print(np.int8('ibis')) # # Get vocabulary # tokenizer = Tokenizer() # tokenizer.fit_on_texts(sentences) # V = len(tokenizer.word_index) + 1 # vocab = tokenizer.word_index.keys() #
def hash_shared_attrs_bytes(shared_attrs_bytes: bytes) -> int: """Return the hash of json encoded shared attributes.""" return cast(int, fnv1a_32(shared_attrs_bytes))
def hash_shared_data_bytes(shared_data_bytes: bytes) -> int: """Return the hash of json encoded shared data.""" return cast(int, fnv1a_32(shared_data_bytes))