def load_from_config(find_dict, prefix=None, path=default_config_path, allow_sub_keys=False): if not prefix: prefix = tuple() config = load_config(path=path) if not config: return {} found_config_values = {} flat_find_dict = flatten_dict.flatten(find_dict, keep_empty_types=(dict, )) flat_config = flatten_dict.flatten(config, keep_empty_types=(dict, )) for find_key, _ in flat_find_dict.items(): for flat_key, flat_value in flat_config.items(): prefixed_key = prefix + find_key intersection = tuple([ v for i, v in enumerate(prefixed_key) if i < len(flat_key) and v == flat_key[i] ]) difference = tuple([v for v in flat_key if v not in intersection]) # HACK, Only append differences at the current depth of the config, # e.g. don't allow subdict append if prefixed_key == intersection and (allow_sub_keys or not difference): sub_key = find_key + difference found_config_values[sub_key] = flat_value return flatten_dict.unflatten(found_config_values)
def summary( cls, resources_current: typing.Mapping, resources_desired: typing.Mapping, plan: "Plan", ) -> typing.Dict[str, "Plan.Outcome"]: resources = cls._resources(resources_current, resources_desired) resources_current_flat = flatten_dict.flatten(resources_current, reducer=lambda *x: x[-1]) resources_desired_flat = flatten_dict.flatten(resources_desired, reducer=lambda *x: x[-1]) summary_unsorted: typing.Dict[str, typing.Dict] = { "folder": {}, "dashboard": {}, } for resource in resources: if resource not in plan: continue if resource not in resources_desired_flat: summary_unsorted[ resources_current_flat[resource]._kind].update( {resource: Plan.Outcome.REMOVE}) continue if resource not in resources_current_flat: summary_unsorted[ resources_desired_flat[resource]._kind].update( {resource: Plan.Outcome.CREATE}) continue summary_unsorted[resources_desired_flat[resource]._kind].update( {resource: Plan.Outcome.UPDATE}) summary_sorted: typing.Dict[str, Plan.Outcome] = collections.OrderedDict() for kind in ("folder", "dashboard"): summary_sorted.update(**summary_unsorted[kind]) return summary_sorted
def _get_feed_dict(self, iteration, batch): """Construct a TensorFlow feed dictionary from a sample batch.""" batch_flat = flatten(batch) placeholders_flat = flatten(self._placeholders) samp_feed_dict = { placeholders_flat[key]: batch_flat[key] for key in placeholders_flat.keys() if key in batch_flat.keys() } samp_feed_dict[self._placeholders['rewards']] = np.zeros( batch['rewards'].shape) # TODO: Allow for different ratio of data goal_feed_dict = self._get_goal_feed_dict( batch_size=batch_flat[next(iter(batch_flat))].shape[0]) feed_dict = { key: np.concatenate([samp_feed_dict[key], goal_feed_dict[key]], axis=0) for key in samp_feed_dict } if iteration is not None: feed_dict[self._placeholders['iteration']] = iteration feed_dict[self._placeholders['reward']['running_ext_rew_std']] = ( self._running_ext_rew_std) if self._rnd_int_rew_coeff: feed_dict[self._placeholders['reward']['running_int_rew_std']] = ( self._running_int_rew_std) return feed_dict
def __init__(self, configfile: str, fix_values, fix_missing, write=False): """ Creates a config object from a json file :param configfile: the config filename (json) :param fix_values: a function to resolve values of the wrong type :param fix_missing: a function to resolve missing values :param write: writes changes (fix_values and fix_missing function results) to the file """ parsed = load_config(configfile) flat = flatten(parsed, 'path') alloweds = flatten(internalconf.CONFIG_TYPES, 'path') buffer = dict() for key, _type in alloweds.items(): if key not in flat.keys() or key is None: value = fix_missing(key) elif type(flat[key]) is not _type: value = fix_values(key) else: value = flat[key] buffer[key] = value if write: with open(config_path(configfile), 'w') as f: logging.info("Writing changes to the config file...") f.write(json.dumps(unflatten(buffer, 'path'), indent=4)) # On windows, flatten uses \\ for paths instead of /. Fix that. self._dict = {k.replace('\\','/'):v for k,v in buffer.items()}
def _get_feed_dict(self, iteration, batch): batch_flat = flatten(batch) placeholders_flat = flatten(self._placeholders) # if np.random.rand() < 1e-4 and 'pixels' in batch['observations']: # import os # from skimage import io # random_idx = np.random.randint( # batch['observations']['pixels'].shape[0]) # image_save_dir = os.path.join(os.getcwd(), 'pixels') # image_save_path = os.path.join( # image_save_dir, f'observation_{iteration}_batch.png') # if not os.path.exists(image_save_dir): # os.makedirs(image_save_dir) # io.imsave(image_save_path, # batch['observations']['pixels'][random_idx].copy()) feed_dict = { placeholders_flat[key]: batch_flat[key] for key in placeholders_flat.keys() if key in batch_flat.keys() } if iteration is not None: feed_dict[self._placeholders['iteration']] = iteration ext_rew_std_ph = self._placeholders['reward'][f'running_ext_rew_std_{self._goal_index}'] feed_dict[ext_rew_std_ph] = self._running_ext_rew_stds[self._goal_index] if self._rnd_int_rew_coeffs[self._goal_index]: int_rew_std_ph = self._placeholders['reward'][f'running_int_rew_std_{self._goal_index}'] feed_dict[int_rew_std_ph] = self._running_int_rew_stds[self._goal_index] return feed_dict
def _get_feed_dict(self, iteration, batch): """Construct a TensorFlow feed dictionary from a sample batch.""" batch_flat = flatten(batch) placeholders_flat = flatten(self._placeholders) feed_dict = { placeholders_flat[key]: batch_flat[key] for key in placeholders_flat.keys() if key in batch_flat.keys() } if self._goal_classifier: if 'images' in batch.keys(): images = batch['images'] goal_sin = batch['observations'][:, -2].reshape((-1, 1)) goal_cos = batch['observations'][:, -1].reshape((-1, 1)) goals = np.arctan2(goal_sin, goal_cos) else: images = batch['observations'][:, :32 * 32 * 3].reshape( (-1, 32, 32, 3)) feed_dict[self._placeholders['rewards']] = self._classify_as_goals( images, goals) else: feed_dict[self._placeholders['rewards']] = batch['rewards'] if iteration is not None: feed_dict[self._placeholders['iteration']] = iteration return feed_dict
def _diff_dicts(old_dict, new_dict): old_default = None new_default = None if isinstance(new_dict, dict): new = flatten(new_dict, reducer=_dot) else: new = defaultdict(lambda: "not a dict") new_default = "unable to parse" if isinstance(old_dict, dict): old = flatten(old_dict, reducer=_dot) else: old = defaultdict(lambda: "not a dict") old_default = "unable to parse" res = defaultdict(dict) xpaths = set(old.keys()) xpaths.update(set(new.keys())) for xpath in xpaths: old_val = old.get(xpath, old_default) new_val = new.get(xpath, new_default) val_diff = _diff_vals(old_val, new_val) if val_diff: res[xpath] = val_diff return dict(res)
def format_yaml(yaml, **kwargs): """Formats a yaml template. Example usage: format_yaml('{"abc": ${x.y}}', x={'y': 123}) output should be '{"abc": 123}' """ template = _YamlTemplateOnlyFillWorkflow(yaml) try: # checkout whether variables which can be observed at workflow_template # module is consistent with placeholders in string format_workflow = template.substitute(flatten(kwargs or {}, reducer='dot')) except KeyError as e: raise ValueError( f'Unknown placeholder: {e.args[0]}') from e template = _YamlTemplateFillAll(format_workflow) try: # checkout whether other placeholders are valid and # format them with {} in order to go ahead to next step, # json format check return template.substitute(flatten(kwargs or {}, reducer='dot')) except ValueError as e: raise ValueError(f'Wrong placeholder: {str(e)} . ' f'Origin yaml: {format_workflow}')
def add_path(self, path): path = path.copy() path_flat = flatten(path) path_length = path_flat[next(iter(path_flat.keys()))].shape[0] path.update({ 'episode_index_forwards': np.arange( path_length, dtype=self.fields['episode_index_forwards'].dtype )[..., None], 'episode_index_backwards': np.arange( path_length, dtype=self.fields['episode_index_backwards'].dtype )[::-1, None], 'relabeled': np.array([False]*path_length)[:, None], }) self.add_samples(path) path = self._environment.relabel_path(path.copy()) path_flat = flatten(path) path_length = path_flat[next(iter(path_flat.keys()))].shape[0] path.update({ 'episode_index_forwards': np.arange( path_length, dtype=self.fields['episode_index_forwards'].dtype )[..., None], 'episode_index_backwards': np.arange( path_length, dtype=self.fields['episode_index_backwards'].dtype )[::-1, None], 'relabeled': np.array([True]*path_length)[:, None], }) self.add_samples(path)
def plan( cls, resources_current: typing.Mapping[str, ResourceGroup], resources_desired: typing.Mapping[str, ResourceGroup], ) -> "Plan": resources = cls._resources(resources_current, resources_desired) resources_current_flat = flatten_dict.flatten(resources_current, reducer=lambda *x: x[-1]) resources_desired_flat = flatten_dict.flatten(resources_desired, reducer=lambda *x: x[-1]) plan_dict = {} for resource in resources: try: current = resources_current_flat[resource].serialized except KeyError: current = {} try: desired = resources_desired_flat[resource].serialized except KeyError: desired = {} diff = list( dictdiffer.diff(current, desired, expand=True, dot_notation=False)) if diff: plan_dict.update({resource: cls._normalize(diff)}) plan = cls(plan_dict) return plan
def compare_dict_list(lhs, rhs, exclude=[], order=None): """ Compare 2 dict e should be used with pytest """ exc = [] for x in exclude: if isinstance(x, str): exc.append((x,)) else: exc.append(x) exclude = exc rhs = sorted(rhs, key=lambda x: x[order]) if order else rhs lhs = sorted(lhs, key=lambda x: x[order]) if order else lhs frhs = [flatten(x) for x in rhs] flhs = [flatten(x) for x in lhs] print() for ld, rd in zip(frhs, flhs): for k, v in ld.items(): if k in exclude: continue assert v == rd[k], f"{k}: {v} != {rd[k]}" return True
def normalize_dict_as_bag_of_words(master_db): ''' The goal of this function is to return a bag of words dataframe. parameters: ----------- master_db: dictionare containing any amount of columns, containing any amount of dicts, integer, text, etc. returns: -------- master_db: dictionare containing the renamed features, and their repetition value while maintaining their information context. ''' if verbose: print("Normalizing master_db..") master_db = [flatten(report, reducer='underscore') for report in master_db] for pos, report in enumerate(master_db): print("\r\t> Progress\t:{:.2%}".format((pos) / len(master_db)), end='', flush=True) for key in report: if not any([value in key for value in essential_keys]): report[key] = normalized_value(report[key]) print() master_db = [flatten(report, reducer='underscore') for report in master_db] return master_db
def _df(self): """Return df.""" data = [] for idx in range(len(self)): _data = self.__getitem__(idx, remove_internal_columns=False) assert isinstance(_data, dict) # Expand the contents of key `self.orient` if self.orient in _data.keys(): if isinstance(_data[self.orient], list): assert len(_data[self.orient]) == 1 if isinstance(_data[self.orient][0], dict): value = flatten(next(iter(_data[self.orient])), reducer='dot') _data = self._merger.merge(_data, value) else: _data[self.orient] = _data[self.orient][0] elif isinstance(_data[self.orient], dict): assert len(_data[self.orient]) == 1 key = next(iter(_data[self.orient].keys())) value = flatten(next(iter(_data[self.orient].values())), reducer='dot') _data[self.orient] = key _data = self._merger.merge(_data, value) else: pass data.append(_data) df = self._df_from_dicts(data) return df
def create_configuration_file_defaults(config_name, location_path, config_type, defaults): if not config_name: logger.warning('Cannot create config from None config') return None if config_type not in VALID_CONFIGURATION_TYPES: logger.warning( 'Cannot load invalid config type "{}"'.format(config_type)) return None if not location_path: location_path = pathlib.Path('~') config_file_path = location_path.joinpath('{}.{}'.format( config_name, config_type)) if '~' in str(config_file_path): config_file_path = config_file_path.expanduser() if config_file_path.is_file(): if config_type == CONFIGURATION_TYPE_JSON: configs = json.load(open(config_file_path)) else: logger.warning( 'Unable to load config file {} of unknown type {}'.format( config_file_path, config_type)) return None else: configs = {} expected_configs_keys = set(flatten_dict.flatten(defaults).keys()) existing_configs_keys = set(flatten_dict.flatten(configs).keys()) missing_keys = expected_configs_keys - existing_configs_keys if missing_keys: for key_list in missing_keys: pass if config_file_path.parent.is_dir(): config_file_path.touch() return configs
def test_integration(): for _ in range(100): for device in "cpu", "cuda": obs_space = create_random_space() act_space = create_random_space() buf = ReplayBuffer(obs_space, act_space, int(1e5), 1, device) print(buf.log_hyperparams()) print("OBSSPEC", obs_space) print("ACTSPEC", act_space) step = { "obs": torchify(obs_space.sample(), device), "act": torchify(act_space.sample(), device), "rew": torchify(1.0, device), "next_obs": torchify(obs_space.sample(), device), "done": torchify(0, device), } buf.add(step) step2 = buf.sample() step = flatten(step) step2 = flatten(step2) assert step.keys() == step2.keys() for k in step: assert torch.all(step[k].cpu() == step2[k].cpu()) print(buf.log_epoch())
def undo(self) -> None: section: TableauSection = self.get_section() cell = self.get_cell() # self.redo_command: str = "remove_one_column" prev = flatten(cell) content = flatten(self.content) to_update = unflatten({k: prev[k] for k in content}) section.update_cell(self.y, self.x, to_update, self.cursor_avant)
def r_flatten(json_dict): f_dict = flatten(json_dict,reducer=comma_reducer) while any(type(v) == list for v in f_dict.values()): for k,v in f_dict.items(): if type(v) == list: f_dict[k] = dict(('#'+str(p),e) for p,e in enumerate(v)) f_dict = flatten(f_dict,reducer=comma_reducer) return f_dict
def test_log_tanh(): obs_spec = Box(low=np.zeros(10, dtype=np.float32), high=np.ones(10, dtype=np.float32)) act_spec = Box(low=np.zeros(3, dtype=np.float32), high=np.ones(3, dtype=np.float32)) actor = TanhGaussianActor(obs_spec, act_spec, [60, 50]) obs = torch.rand((100, 10)) actor.action(obs) print(flatten(actor.log_hyperparams()).keys()) print(flatten(actor.log_epoch()).keys())
def _update_config(current_config_json, value={}): changes = False current_config = current_config_json.copy() flatten_config = flatten(current_config) for k, v in flatten(value).items(): if k in flatten_config: flatten_config[k] = v updated_config = unflatten(flatten_config) if flatten(current_config_json) != flatten_config: changes = True return updated_config, changes
def REPLACE_FULL_OBSERVATION(original_batch, resampled_batch, where_resampled, environment): batch_flat = flatten(original_batch) resampled_batch_flat = flatten(resampled_batch) goal_keys = [key for key in batch_flat.keys() if key[0] == 'goals'] for key in goal_keys: assert (batch_flat[key][where_resampled].shape == resampled_batch_flat[key].shape) batch_flat[key][where_resampled] = (resampled_batch_flat[key]) return unflatten(batch_flat)
def merge(self, other): """ Merges this search space with another. Useful only is ``self.reuse_parameters`` is ``True``. If ``other`` space instance has a layer which this one doesn't, such layers get's added to this space instance. If both ``other`` and ``self`` contain same layers, their parameters get averaged and written to this space' parameters. """ new_count, modified_count = 0, 0 flat_index = flatten(self.layer_index) for key in flatten(other.layer_index): if key in flat_index: l1 = self.get_layer(*key) l2 = other.get_layer(*key) assert l1.weight.shape == l2.weight.shape l1.weight.data.add_(l2.weight.data) l1.weight.data.div_(2) if self.bias: l1.bias.data.add_(l2.bias.data) l1.bias.data.div_(2) self.layers[self.get_index(*key)] = l1 modified_count += 1 usage_count = other._get_value(other.usage_count, *key) usage_count += self._get_value(self.usage_count, *key) creation_time = other._get_value(other.layer_created, *key) creation_time = min(creation_time, self._get_value(self.layer_created, *key)) last_used = other._get_value(other.last_used, *key) last_used = max(last_used, self._get_value(self.last_used, *key)) else: layer = other.get_layer(*key) self._set_value(self.layer_index, len(self.layers), *key) self.layers.append(layer) usage_count = other._get_value(other.usage_count, *key) creation_time = other._get_value(other.layer_created, *key) last_used = other._get_value(other.last_used, *key) new_count += 1 self._set_value(self.usage_count, usage_count, *key) self._set_value(self.layer_created, creation_time, *key) self._set_value(self.last_used, last_used, *key) self.log_info( f'Space was merged with another. {new_count} new layers have been added; ' f'{modified_count} modified.')
def missing_fields(input_dict, required_fields, verbose=False, throw=False): missing = {} flat_input_fields = flatten_dict.flatten(input_dict) flat_required_fields = flatten_dict.flatten(required_fields) for k, v in flat_required_fields.items(): # Not present or not set if not present_in(k, flat_input_fields) or not flat_input_fields[k]: missing[k] = None return flatten_dict.unflatten(missing)
def generate_oci_config(overwrite_with_empty=False, **kwargs): config = default_oci_config if kwargs: flat = flatten_dict.flatten(config) other_flat = flatten_dict.flatten(kwargs) for k, v in other_flat.items(): if not v and overwrite_with_empty: flat[k] = v if v: flat[k] = v config = flatten_dict.unflatten(flat) return config
def _update_config(config, update): config = flatten(config) update = flatten(update) for opt, val2 in update.items(): if isinstance(val2, list) and opt in config: val1 = config[opt] assert isinstance(val1, list) config[opt] = val1 + val2 # Append update for lists else: config[opt] = val2 return unflatten(config)
def _resources( cls, resources_current: typing.Mapping[str, ResourceGroup], resources_desired: typing.Mapping[str, ResourceGroup], ) -> typing.Set[str]: resources = set() resources_current_flat = flatten_dict.flatten(resources_current, reducer=lambda *x: x[-1]) resources_desired_flat = flatten_dict.flatten(resources_desired, reducer=lambda *x: x[-1]) resources = set(resources_current_flat.keys()).union( set(resources_desired_flat.keys())) return resources
def _relabel_batch(self, batch, indices, her_strategy): batch_size = indices.size batch['resampled_distances'] = np.full((batch_size, 1), np.float('inf')) batch['resampled'] = np.zeros((batch_size, 1), dtype='bool') if her_strategy: her_strategy_type = self._her_strategy['type'] goal_resampling_probability = self._her_strategy[ 'resampling_probability'] to_resample_mask = (np.random.rand(batch_size) < goal_resampling_probability) where_resampled = np.flatnonzero(to_resample_mask) to_resample_indices = indices[where_resampled] episode_first_distances = -1 * batch['episode_index_forwards'][ where_resampled] episode_last_distances = batch['episode_index_backwards'][ where_resampled] resampled_indices, resampled_distances = self._resample_indices( to_resample_indices, episode_first_distances, episode_last_distances, her_strategy_type) resampled_batch_flat = flatten( super(HindsightExperienceReplayPool, self).batch_by_indices(indices=resampled_indices, field_name_filter=None)) batch_flat = flatten(batch) goal_keys = [key for key in batch_flat.keys() if key[0] == 'goals'] for key in goal_keys: assert (batch_flat[key][where_resampled].shape == resampled_batch_flat[key].shape) batch_flat[key][where_resampled] = (resampled_batch_flat[key]) if self._reward_function: batch_flat[('rewards', )][where_resampled] = ( self._reward_function(resampled_batch_flat)) if self._terminal_function: batch_flat[('terminals', )][where_resampled] = ( self._terminal_function(resampled_batch_flat)) batch = unflatten(batch_flat) batch['resampled_distances'][where_resampled] = ( resampled_distances) batch['resampled'][where_resampled] = True return batch
def is_equal_to_expected(self, expected_result): wiremock_url = expected_result["mock_url"] body_type = expected_result["body_type"] expected_body_string = expected_result["body"] result_array = requests.get(wiremock_url).json()["requests"] if len(result_array) > 0: result_body_string = result_array[0]["request"]["body"] result_body = self.body_type_handlers[body_type](result_body_string) expected_body = self.body_type_handlers[body_type](expected_body_string) return self.is_equal_objects(flatten_dict.flatten(result_body, enumerate_types=(list,)), flatten_dict.flatten(expected_body, enumerate_types=(list,))) else: return True if expected_body_string == "TO_BE_IGNORED" else False
def _get_feed_dict(self, iteration, batch): """Construct a TensorFlow feed dictionary from a sample batch.""" batch_flat = flatten(batch) placeholders_flat = flatten(self._placeholders) feed_dict = { placeholders_flat[key]: batch_flat[key] for key in placeholders_flat.keys() if key in batch_flat.keys() } if iteration is not None: feed_dict[self._placeholders['iteration']] = iteration return feed_dict
def dump_experiment_result(args: Namespace, config: Config, output_dir: str, result: Dict[str, Any]): final_config_dict = asdict(config) flattened_experiment = flatten(final_config_dict, reducer="path") result["train_metrics"] = unpack_numpy_values(result["train_metrics"]) result["val_metrics"] = unpack_numpy_values(result["val_metrics"]) result["num_params"] = result["num_params"].item() flattened_result = flatten(result, reducer="path") flattened_experiment.update(flattened_result) flattened_experiment["run_id"] = args.run_id flattened_experiment["dir"] = output_dir with open(os.path.join(output_dir, "experiment_result.json"), "w") as json_file: json.dump(flattened_experiment, json_file) json_file.write("\n")
def REPLACE_FLAT_OBSERVATION(original_batch, resampled_batch, where_resampled, environment): batch_flat = flatten(original_batch) resampled_batch_flat = flatten(resampled_batch) observation_keys = [ key for key in batch_flat.keys() if key[0] == 'observations' or key[0] == 'next_observations' ] for key in observation_keys: state_size = int(batch_flat[key].shape[1] / 2) batch_flat[key][where_resampled][state_size:] = ( resampled_batch_flat[key][state_size:]) return unflatten(batch_flat)
def test_Basics(self): for i in self.TESTS['Basics']: assert flatten(i['input']) == i['answer'], i['input']
def test_Extra(self): for i in self.TESTS['Extra']: assert flatten(i['input']) == i['answer'], i['input']