def test_magic_str_unicode_bytes(self, p): if utils.is_py2(): assert utils.is_byte_string(p.__str__()) assert utils.is_text(p.__unicode__()) else: assert utils.is_text(p.__str__()) assert utils.is_byte_string(p.__bytes__()) assert str(p) == "0" assert bytes(p) == b"0"
def wrap_urlopen(url, data=None, timeout=None): """Wraps urlopen to record the response when communicating with a real CCU.""" assert utils.is_byte_string(data) try: obj = urlopen(url, data=data, timeout=timeout) response = obj.read() http_status = obj.getcode() except HTTPError as e: response = e.reason.encode("utf-8") http_status = e.code assert utils.is_byte_string(response) if not os.path.exists(resources_path): os.makedirs(resources_path) # FIXME: The ccu is performing wrong encoding at least for output of # executed rega scripts. But maybe this is a generic problem. Let's see # and only fix the known issues for the moment. if b"ReGa.runScript" in data or b"Interface.getParamsetDescription" in data: response = pmatic.api.AbstractAPI._replace_wrong_encoded_json( response.decode("utf-8")).encode("utf-8") # Fake the session id to a fixed one for offline testing. This is needed # to make the recorded data change less frequently. fake_data = fake_session_id(data, data) fake_response = fake_session_id(data, response) # Ensure normalized sorting of keys. # For hashing we need a constant sorted representation of the data. # CCU API has always JSON, but pushover notify has urlencoded data. if "pushover.net" not in url: fake_data = json.dumps(json.loads(fake_data.decode("utf-8")), sort_keys=True).encode("utf-8") # When json can not be parsed, write the original response to the file try: fake_response = json.dumps(json.loads(fake_response.decode("utf-8")), sort_keys=True).encode("utf-8") except ValueError: pass rid = request_id(fake_data) open(response_file_path(rid), "wb").write(fake_response) open(status_file_path(rid), "wb").write(str(http_status).encode("utf-8")) open(data_file_path(rid), "wb").write(fake_data) obj = StringIO(response) obj.getcode = lambda: http_status return obj
def normalize_spec(d): dd = {} for key in d.keys(): val = d.get(key) if isinstance(val, list): for index, item in enumerate(val): val[index] = Decodeutf8(item) elif utils.is_byte_string(val): val = Decodeutf8(val) new_key = Decodeutf8(key.lower()) if new_key in ["aes_active", "roaming"]: val = val == 1 elif new_key == "updatable": val = "%d" % val elif new_key in ["link_source_roles", "link_target_roles"]: val = val.split() elif new_key in ["rf_address", "rx_mode"]: continue dd[new_key] = val return dd
def normalize_spec(d): for key in d.keys(): val = d.pop(key) if isinstance(val, list): for index, item in enumerate(val): val[index] = item.decode("utf-8") elif utils.is_byte_string(val): val = val.decode("utf-8") new_key = key.lower().decode("utf-8") if new_key in [ "aes_active", "roaming" ]: val = val == 1 elif new_key == "updatable": val = "%d" % val elif new_key in [ "link_source_roles", "link_target_roles" ]: val = val.split() elif new_key in [ "rf_address", "rx_mode" ]: continue d[new_key] = val return d
def test_is_byte_string(): assert utils.is_byte_string(bytes(b"X")) if sys.version_info[0] == 3: assert not utils.is_byte_string("X") assert utils.is_byte_string(b"X") assert utils.is_byte_string(bytes(b"X")) else: assert utils.is_byte_string(str("X")) assert not utils.is_byte_string("X")