def remap_item( item: Dict[str, Any], mappings: List[Union[ # tuple items: # 1. source path # 2. destination path # 3. default value (optional) # 4. formatter (optional) Tuple[List[str], List[str]], Tuple[List[str], List[str], Any], Tuple[List[str], List[str], Any, Callable[[Any], Any]], ]], ) -> Dict[str, Any]: output: Dict[str, Any] = {} for mapping in mappings: default = None formatter = identity # mypy does not understand the len checks hence the `type: ignore` comments # see https://github.com/python/mypy/issues/1178 source = mapping[0] destination = mapping[1] if len(mapping) >= 3: default = mapping[2] # type: ignore if len(mapping) >= 4: formatter = mapping[3] # type: ignore output = set_in(output, destination, formatter(get_in(item, source, default))) return output
def _get_fields(self, extensions): to_return = extensions.copy() result = request('DataExtensionField', FuelSDK.ET_DataExtension_Column, self.auth_stub) for field in result: extension_id = field.DataExtension.CustomerKey field = sudsobj_to_dict(field) field_name = field['Name'] if field.get('IsPrimaryKey'): to_return = _merge_in(to_return, [extension_id, 'key_properties'], field_name) field_schema = { 'type': [ 'null', _convert_extension_datatype(str(field.get('FieldType'))) ], 'description': str(field.get('Description')), } to_return = set_in( to_return, [extension_id, 'schema', 'properties', field_name], field_schema) return to_return
def split_dict_doppelgangers_by_keys(input_dict, keys): values = funcy.get_in(input_dict, keys) assert isinstance(values, list) paris = zip(itertools.repeat(input_dict), values) doppelgangers = list( map(lambda pair: funcy.set_in(pair[0], keys, pair[1]), paris)) return doppelgangers
def _get_fields(self, extensions): to_return = extensions.copy() result = request( 'DataExtensionField', FuelSDK.ET_DataExtension_Column, self.auth_stub) for field in result: extension_id = field.DataExtension.CustomerKey field = sudsobj_to_dict(field) field_name = field['Name'] if field.get('IsPrimaryKey'): to_return = _merge_in( to_return, [extension_id, 'key_properties'], field_name) field_schema = { 'type': [ 'null', _convert_extension_datatype(str(field.get('FieldType'))) ], 'description': str(field.get('Description')), } to_return = set_in( to_return, [extension_id, 'schema', 'properties', field_name], field_schema) # These fields are defaulted into the schema, do not add to metadata again. if field_name not in {'_CustomObjectKey', 'CategoryID'}: to_return[extension_id]['metadata'].append({ 'breadcrumb': ('properties', field_name), 'metadata': {'inclusion': 'available'} }) return to_return
from funcy import autocurry, ljuxt, rcompose as pipe, merge, lmapcat, lmap, get_in, set_in, identity, flatten, pluck set_with = autocurry(lambda path, fn, o: set_in( o, path.split('.') if isinstance(path, str) else path, fn(o))) map = autocurry(lmap) flatmap = autocurry(lmapcat) # pipe = rcompose def get(str): path = str.split('.') return lambda x: get_in(x, path) spread = lambda fn: lambda arr: fn(*arr) max_with = lambda fn: lambda arr: max(arr, key=fn) min_with = lambda fn: lambda arr: min(arr, key=fn) juxt = spread(ljuxt) def arr2d_from_dict_values(cols=[]): def inner(idx_dict): return ([v[c] for c in cols] for k, v in idx_dict.items()) return inner import csv def csv_from_arr2d(out_path="output.csv", cols=[]):
def replace1(old, new, path): parts = list(Path(path).parts) # NOTE: because of set_in implementation.. idx = parts.index(old) return str(Path(*F.set_in(parts, [idx], new)))
def update(self, updates): data = self.get().data for k, v in updates.items(): data = F.set_in(data, k.split('.'), v) self._write(data)