def test_unflatten_with_list(self): """Dictionary with lists""" dic = { 'a': 1, 'b_0': 1, 'b_1': 2, 'c_a': 'a', 'c_b_0': 1, 'c_b_1': 2, 'c_b_2': 3 } expected = { 'a': 1, 'b': [1, 2], 'c': {'a': 'a', 'b': [1, 2, 3]} } actual = unflatten_list(dic) self.assertEqual(actual, expected) dic = {'a': 1, 'b_0': 5} expected = {'a': 1, 'b': [5]} actual = unflatten_list(dic) self.assertEqual(actual, expected) dic = {'a': 1, 'b:0': 5} expected = {'a': 1, 'b': [5]} actual = unflatten_list(dic, ':') self.assertEqual(actual, expected)
def transform_py(**kwargs): s3 = kwargs.get('s3_conn_id', None) s3_key = kwargs.get('templates_dict').get('s3_key', None) transformed_key = kwargs.get('templates_dict').get('transformed_key', None) s3_bucket = kwargs.get('s3_bucket', None) hook = S3Hook(s3) (hook.get_key(s3_key, bucket_name=s3_bucket) .get_contents_to_filename('temp.csv')) df = pd.read_csv('temp.csv') records = json.loads(df.to_json(orient='records')) del df records = [unflatten_list(record) for record in records] records = '\n'.join([json.dumps(record) for record in records]) hook.load_string(string_data=records, key=transformed_key, bucket_name=s3_bucket, replace=True)
def test_unflatten_with_list_custom_separator(self): """Complex dictionary with lists""" dic = { 'a:b': 'str0', 'c:0:d:0:e': 'str1', 'c:1:d:0:e': 'str4', 'c:1:f': 'str5', 'c:0:f': 'str2', 'c:1:g': 'str6', 'c:0:g': 'str3', 'h:d:0:e': 'str7', 'h:i:0:f': 'str8', 'h:i:0:g': 'str9' } expected = { 'a': {'b': 'str0'}, 'c': [ { 'd': [{'e': 'str1'}], 'f': 'str2', 'g': 'str3' }, { 'd': [{'e': 'str4'}], 'f': 'str5', 'g': 'str6' } ], 'h': { 'd': [{'e': 'str7'}], 'i': [{'f': 'str8', 'g': 'str9'}] } } actual = unflatten_list(dic, ':') self.assertEqual(actual, expected)
def convert_data_to_dict(data, int_conversions={}, str_args={}, bool_args={}): header = [x.split() if x else None for x in data[0]] for block in read_data_blocks(data[1:]): ret = {} counter = Counter() for block_row in block: converted_row = {} for arridx, prop_path, val in zip(range(0, len(header)), header, block_row): if not prop_path: continue if ' '.join(prop_path) in int_conversions: val = int(val) if val else None elif ' '.join(prop_path) in str_args: val = val if val else "" elif ' '.join(prop_path) in bool_args: if val != '': val = val == 'True' else: continue flattened_path = [] for item in prop_path: if "@" in item: flattened_path.append(item[1:]) key = "_".join(prop_path) flattened_path.append(str(counter[key])) counter.update({key: 1}) else: flattened_path.append(item) converted_row["_".join(flattened_path)] = val converted_row = {k: v for k, v in converted_row.items() if len(v) > 0} ret.update(converted_row) yield unflatten_list(ret)
def test_unflatten_with_list_issue15(self): """https://github.com/amirziai/flatten/issues/15""" dic = { "Required": { "a": "1", "b": ["1", "2", "3"], "c": { "d": { "e": [[{ "s1": 1 }, { "s2": 2 }], [{ "s3": 1 }, { "s4": 2 }]] } }, "f": ["1", "2"] }, "Optional": { "x": "1", "y": ["1", "2", "3"] } } dic_flatten = flatten(dic) actual = unflatten_list(dic_flatten) self.assertEqual(actual, dic)
def flatten_json(self, data): logger.info("start flatten data") try: data = self.addlist(unflatten_list(data, separator='__')) return data except Exception as e: logger.error(str(e)) raise Exception("SQL Error")
def Json_Normalize(json_path, norm_path): Normalize_result = Normalize(json_path, norm_path) result = [] for i in range(0, len(Normalize_result)): temp = Normalize_result[i] result.append(unflatten_list(temp, separator='%%%')) #return result return json.dumps(result, indent=True, ensure_ascii=False)
def _row_to_item(row) -> dict: clean_result = dict() for k in row.keys(): if row[k] is not None: clean_result[k] = row[k] clean_result['id'] = clean_result['uid'] del clean_result['uid'] return unflatten_list(clean_result, '.')
def test_unflatten_with_list_issue31(self): """https://github.com/amirziai/flatten/issues/31""" dic = {"testdict": {"seconddict": [["firstvalue", "secondvalue"], ["thirdvalue", "fourthvalue"]]}} dic_flatten = flatten(dic) actual = unflatten_list(dic_flatten) self.assertEqual(actual, dic)
def test_unflatten_with_list_issue15(self): """https://github.com/amirziai/flatten/issues/15""" dic = {"Required": {"a": "1", "b": ["1", "2", "3"], "c": {"d": {"e": [[{"s1": 1}, {"s2": 2}], [{"s3": 1}, {"s4": 2}]]}}, "f": ["1", "2"]}, "Optional": {"x": "1", "y": ["1", "2", "3"]}} dic_flatten = flatten(dic) actual = unflatten_list(dic_flatten) self.assertEqual(actual, dic)
def test_unflatten_with_list_issue31(self): """https://github.com/amirziai/flatten/issues/31""" dic = { "testdict": { "seconddict": [["firstvalue", "secondvalue"], ["thirdvalue", "fourthvalue"]] } } dic_flatten = flatten(dic) actual = unflatten_list(dic_flatten) self.assertEqual(actual, dic)
def test_unflatten_with_list_deep(self): dic = { 'a': [{ 'b': [{ 'c': [{ 'a': 5, 'b': { 'a': [1, 2, 3] }, 'c': { 'x': 3 } }] }] }] } dic_flatten = flatten(dic) actual = unflatten_list(dic_flatten) self.assertEqual(actual, dic)
def convert_data_to_dict(data: list, str_args: tuple = tuple(), int_conversions: tuple = tuple(), bool_args: tuple = tuple() ) -> list: """ Function that translate excel flatten json into standard nested json. :param data: List of lists, where each list represent row in excel table. :type data: list :param str_args: :type str_args: :param int_conversions: :type int_conversions: :param bool_args: :type bool_args: :return: list of nested dictionaries :rtype: list """ header = data[0] content = data[1:] res = [] for r, row in enumerate(content): row_dict = {} for c, column in enumerate(row): if column: if header[c] in str_args: row_dict[header[c]] = str(column) elif header[c] in int_conversions: row_dict[header[c]] = int(column) elif header[c] in bool_args: row_dict[header[c]] = bool(column) else: row_dict[header[c]] = column res.append(row_dict) return [unflatten_list(x) for x in res]
import redis # connect to redis r = redis.Redis(host='yourhost', password='******') # parse out the JSON file from the command line arguments parser = argparse.ArgumentParser() parser.add_argument('--json', nargs=1, help="JSON file", type=argparse.FileType('r')) arguments = parser.parse_args() # beflatten it flat_json = flatten(json.load(arguments.json[0])) # write it to Redis my_hash_key = 'pyjson' r.hmset(my_hash_key, flat_json) print('Wrote to key: ' + my_hash_key) # fetch it from Redis flat_json_from_redis = r.hgetall(my_hash_key) print('Read flat version from key: ' + my_hash_key) print(flat_json_from_redis) # take the flat verison and convert it back to a nested dic unflattened_json = unflatten_list(flat_json_from_redis) print('Unflattened to:') print(unflattened_json)
def GenerateTemplate(SCM: dict, extensions: dict, legacy: bool = False): # Template that will be modified later. template = { # Use app name as template name. "name": SCM["Properties"]["AppName"], # Current metadata version. # Needs to be 1, until a new type of metadata releases. "metadata-version": 1, # Extension version that this template generated for. "extension_version": 5, # Template author name. "author": "<your name>", # List of AI2 distributions that will template work on. "platforms": SCM["authURL"], # Contains used extensions in this template along with their class names. # Example: # { # "HelloWorld": "io.foo.HelloWorld" # } "extensions": extensions, # Template parameters. # Will be generated automatically from SCM. "keys": [], # Components that will be created. # Will be generated automatically from SCM. "components": [] } # Create a variable to store modified flatted JSON. flatten_json = {} # Edit the flatten JSON. for key, value in flatten(SCM["Properties"], "/").items(): k = str(key) val = value # If key ends with Uuid or Version, ignore it. # Because DynamicComponents-AI2 extension's JSON templates doesn't need it. if k.endswith("/Uuid") or k.endswith("/$Version"): continue # Else; else: # Replace the "$Components" with "components" according to the template structure. # $Components --> components k = k.replace("/$Components/", "/components/") # Rename the $Name and $Type according to the template structure. # $Name --> id # $Type --> type if k[-5:] in ["$Name", "$Type"]: k = k.replace("/$Name", "/id").replace("/$Type", "/type") # Move the properties inside a "properties" object. # components/Button/Text --> components/Button/properties/Text else: path = k.split("/") path.insert(-1, "properties") k = "/".join(path) # Check if value contains template parameter(s). # Parameters are defined with curly brackets. # {text}, {age}, {color} for parameter in re.findall(r'(?<=(?<!\{)\{)[^{}]*(?=\}(?!\}))', str(value) + " " + k): if parameter not in template["keys"]: template["keys"].append(parameter) # Try to convert the value automatically. # So if value is "True" or "False", then it will be converted to the bool and so on. try: val = ast.literal_eval(value) except: pass # An exception for the color converting. if str(val).startswith("&H"): val = str(val)[2:] if len(val) == 6: alpha = int("FF", 16) else: alpha = int(str(val)[0:2], 16) val = BuildColor(int(str(val)[2:4], 16), int(str(val)[4:6], 16), int(str(val)[6:], 16), alpha) # If the component name is in the extensions list, # then use its full internal name as it is an external package that # doesn't exists in the App Inventor sources. if k.endswith("/type") and (val in extensions): val = extensions[val] # Add the value and key to the modified flatten dictionary. flatten_json[k] = val # Now, unflat the modified flatten dictionary. # Save the output to the template. template["components"] = unflatten_list(flatten_json, "/")["$Components"] # Remove DynamicComponent instances from template, because it is not needed. for component in template["components"].copy(): if component["type"] == "DynamicComponents": if component in template["components"]: template["components"].remove(component) # Return the template. return template
def test_unflatten_with_list_deep(self): dic = {'a': [ {'b': [{'c': [{'a': 5, 'b': {'a': [1, 2, 3]}, 'c': {'x': 3}}]}]}]} dic_flatten = flatten(dic) actual = unflatten_list(dic_flatten) self.assertEqual(actual, dic)
def test_unflatten_with_list_nested(self): dic = {"a": [[{"b": 1}], [{"d": 1}]]} dic_flatten = flatten(dic) actual = unflatten_list(dic_flatten) self.assertEqual(actual, dic)
def start_assay(self, set_path): if self._assay_running: print('Assay already running.') return print('Starting assay...') print('Setting time.') self.mouse_joystick_controller.set_time(int(time.time())) print('Clearing any previous set from controller.') self.mouse_joystick_controller.clear_set() print('Importing new set csv file.') self._set_path = pathlib.Path(set_path).expanduser() if self._set_path.exists(): print('Set csv file found. {0}'.format(self._set_path)) else: print('Set csv file does not exist! {0}'.format(self._set_path)) return print('Sending new set to controller.') with open(self._set_path) as set_csvfile: setreader = csv.DictReader(set_csvfile) checked_header = False for block_flat in setreader: if not checked_header: if set(block_flat.keys()) == set(self._block_fieldnames): print('Set csv file header is correct.') else: print('Set csv file header does not match this: {0}'. format(self._block_fieldnames)) return checked_header = True block_unflattened = flatten_json.unflatten_list( block_flat, separator=self._SEPARATOR) block = {} for key, value in block_unflattened.items(): if isinstance(value, str): block[key] = int(value) elif isinstance(value, list): block[key] = [int(element) for element in value] block_added = self.mouse_joystick_controller.add_block_to_set( block['repeat_trial_count'], block['pull_torque'], block['lickport_reward_duration'], block['reach_position']) if block_added == block: print('Added block to set. {0}'.format(block_added)) else: raise RuntimeError( 'Block added does not equal block in csv file!') print('Setting up trial csv output file.') self._assay_path = self._base_path / self._get_date_time_str() self._assay_path.mkdir(parents=True, exist_ok=True) trials_filename = 'trials.csv' trials_path = self._assay_path / trials_filename self._trials_file = open(trials_path, 'w') self._trials_writer = csv.DictWriter( self._trials_file, fieldnames=self._trials_fieldnames) self._trials_writer.writeheader() print('Trials.csv file created. {0}'.format(trials_path)) self._assay_running = self.mouse_joystick_controller.start_assay() if self._assay_running: self._check_for_unread_data_timer = Timer( self._CHECK_FOR_UNREAD_DATA_PERIOD, self._check_for_unread_data) self._check_for_unread_data_timer.start() print('Assay running!') else: print('Assay not running!') self._cleanup()
def delinearize(dict, separator="."): """ Returns a nested dictionary from provided flattened/linearized dictionary. """ return flatten_json.unflatten_list(dict, separator)