예제 #1
0
 def raw_lines(self, node=None, style=None):
     if node == None:
         self.initialize()
         style = ChainMap({"fontstyle": "n"})
         node = self.root
     for e in node.iterchildren():
         if e.tag in ["html", "body", "pdf2xml"]:
             yield from self.raw_lines(e, style)
             continue
         if e.tag in ["b", "i"]:
             sstart, send = self.__class__.HTML_MARKUP[e.tag]
             yield sstart
             yield from self._texts(e,
                                    style.new_child({"fontstyle": e.tag}))
             yield send
             continue
         if e.tag in ["a"]:
             sstart, send = self.__class__.HTML_MARKUP[e.tag]
             yield sstart
             yield from self._texts(e, style)
             yield send
             continue
         if e.tag == "page":
             yield from self._proc_page(e, style)
             yield page_symbol
             continue
         #if e.tag == "text":
         #    yield from self._proc_text(e, style)
         #    continue
         yield e
         yield from self.raw_lines(e, style)
예제 #2
0
 def __init__(self, **kwargs):
     # get dict fieldname:Field received as parameter
     parameters = {
         field: Field(value=Configuration.typed(field, value),
                      type=Configuration.ALL_FIELDS[field].type)
         for field, value in kwargs.items()
     }
     # take parameters in priority, else the ALL_FIELDS dict
     prioritized_fields = ChainMap(parameters, Configuration.ALL_FIELDS)
     # save values as fields
     for varname, field in prioritized_fields.items():
         value, ftype = field
         setattr(self, '_' + varname, ftype(value))
     # access and setter definition
     def field_access(instance, field):
         return getattr(instance, '_' + field)
     def field_setter(instance, value, field):
         ftype = Configuration.ALL_FIELDS[field].type
         if field in self.UNMUTABLE_FIELDS:
             LOGGER.warning('CONFIG: The unmutable field ' + field
                            + ' have been modified to ' + str(value))
         elif field in self.GENERATED_FIELDS:
             LOGGER.warning('CONFIG: The generated field ' + field
                            + ' have been modified to ' + str(value))
         setattr(instance, '_' + field, ftype(value))
         # regenerate generated data
     # add values as properties, eventually with a setter if field is mutable
     for varname in Configuration.ALL_FIELDS:
         new_prop = property(partial(field_access, field=varname))
         setattr(Configuration, varname, new_prop)
         if varname in Configuration.MUTABLE_FIELDS:
             setter = new_prop.setter(partial(field_setter, field=varname))
             setattr(Configuration, varname, setter)
     # add other data
     self.postprocess_data()
예제 #3
0
def load_settings(paths, default_config, oldsettings,
                  setting_callbacks, setting_types, refresh_only):
    """ Load settings from the main config file. """
    auto_settings, manual_settings\
             = read_config(paths['config_file'], default_config)
    settings = ChainMap(auto_settings, manual_settings)
    def revert_setting(key):
        if key in oldsettings:
            reverted_value = oldsettings[key]
        else:
            reverted_value = default_config[key]
        if key in auto_settings:
            auto_settings[key] = reverted_value
        elif key in manual_settings:
            manual_settings[key] = reverted_value
    error = False
    # Make sure the settings aren't f****d up yo
    for key, value in settings.items():
        if refresh_only and oldsettings.get(key) == value:
            continue
        # First make a simple check to see if the value is the right type
        if not valid_setting(key, value, setting_types):
            print('Invalid type for setting: "{}"'.format(key))
            error = True
            revert_setting(key)
        # Then do a live update and see if things blow up
        try:
            update_runtime_setting(key, value, setting_callbacks)
        except SettingsError as e:
            print(str(e))
            error = True
            revert_setting(key)
    error_text = 'Errors while reading the config. Check terminal output.'
    return settings, error_text if error else None
예제 #4
0
def store_selections(view, selections, replace=False, **settings):
	"""
	Save selections to storage.
	"""
	id = get_id(settings)

	if not replace:
		# Add old selections to selections, so as not to remove them
		selections = list(chain(selections, view.get_regions(id)))

	# Filter "settings" to only have "flags", "scope" and "icon"
	filtered = {"flags": parse_flags(**settings)}

	# Chain settings with preset to provide fallback
	settings = ChainMap(settings, presets[settings.get("preset", "")])

	if "scope" in settings:
		filtered["scope"] = settings["scope"]

	if "icon" in settings:
		filtered["icon"] = settings["icon"]

	# Done! Finish up!
	#
	# Wrap the call in a timeout to appease adzenith who was kind enough
	# to open my first bug report at github.com/Veedrac/Sublime-Extras/issues/1.
	# This should make sure that you can switch panes with the mouse.
	sublime.set_timeout(lambda: view.add_regions(id, selections, **filtered), 0)
예제 #5
0
def more():
    values = ChainMap()
    values['x'] = 1
    print(values)
    values = values.new_child()
    values['x'] = 2
    print(values)
    values = values.new_child()
    values['x'] = 3
    print(values, values['x'], values.parents['x'], values.parents.parents['x'])
예제 #6
0
 def fromDocAndFlags(document, flags):
     """
     @param document: input document
     @param flags: additional API parameters
     @return: S2ApiInput corresponding to given document and flags
     """
     docDict = {k: v for k, v in document._asdict().items() if v}
     data = ChainMap(docDict, flags)
     inputDict = {fldName : data.get(fldName, None) for fldName in S2ApiInput._fields}
     return S2ApiInput(**inputDict)
예제 #7
0
 def simple_cm():
     from collections import ChainMap
     c = ChainMap()
     c['one'] = 1
     c['two'] = 2
 
     cc = c.new_child()
     cc['one'] = 'one'
 
     return c, cc
예제 #8
0
 def get_raw_results(self):
     columns = []
     response_cols = []
     for p, responses in self._stored_responses.items():
         for i, response in responses.items():
             response = ChainMap(
                 response, {'participant_id': p, 'active_item_id': i})
             if not columns:
                 columns.extend(response.keys())
             response_cols.append([response[c] for c in columns])
     return columns, response_cols
예제 #9
0
 def test_bool(self):
     from collections import ChainMap
     c = ChainMap()
     assert not(bool(c))
 
     c['one'] = 1
     c['two'] = 2
     assert bool(c)
 
     cc = c.new_child()
     cc['one'] = 'one'
     assert cc
예제 #10
0
파일: context.py 프로젝트: benzrf/parthial
    def __contains__(self, k):
        """Check whether a variable has been assigned to.

        This is **not** the same kind of element-of as described in the
        class documentation.

        Args:
            k (str): The name of the variable to check.

        Returns:
            bool: Whether or not the variable has been assigned to.
        """
        chain = ChainMap(self.scopes, self.globals)
        return chain.__contains__(k)
예제 #11
0
    def instantiate_related_objects(self, related_model, related_objects, meta_attrs):

        new_objects = []
        for obj_attrs in related_objects:
            new_obj = related_model()

            input_attrs = ChainMap(obj_attrs, meta_attrs)
            for field_name, field_value in input_attrs.items():
                f = self.interpolate(field_name, meta_attrs)
                setattr(new_obj, field_name, f(field_value))

            new_objects.append(new_obj)

        return new_objects
예제 #12
0
파일: context.py 프로젝트: benzrf/parthial
    def __getitem__(self, k):
        """Look up a variable.

        Args:
            k (str): The name of the variable to look up.

        Returns:
            LispVal: The value assigned to the variable.

        Raises:
            KeyError: If the variable has not been assigned to.
        """
        chain = ChainMap(self.scopes, self.globals)
        return chain.__getitem__(k)
예제 #13
0
파일: cib.py 프로젝트: NEAT-project/neat
    def expand_rows(self, apply_extended=True):
        """Generate CIB rows by expanding all CIBs pointing to current CIB """
        paths = self.resolve_graph()

        # for storing expanded rows
        rows = []

        for path in paths:
            expanded_properties = (self.cib[uid].expand() for uid in path)
            for pas in itertools.product(*expanded_properties):
                chain = ChainMap(*pas)

                # For debugging purposes, add the path list to the chain.
                # Store as string to preserve path order (NEAT properties are not ordered).
                dbg_path = '<<'.join(uid for uid in path)

                # insert at position 0 to override any existing entries
                # chain.maps.insert(0, PropertyArray(NEATProperty(('cib_uids', dbg_path))))

                # convert back to normal PropertyArrays
                row = PropertyArray(*(p for p in chain.values()))
                row.meta['cib_uids'] = dbg_path
                rows.append(row)

        if not apply_extended:
            return rows

        if not self.cib.extenders:
            # no extender CIB nodes loaded
            return rows

        # TODO optimize
        extended_rows = rows.copy()
        for entry in rows:
            # TODO take priorities into account
            # iterate extender cib_nodes
            for uid, xs in self.cib.extenders.items():
                for pa in xs.expand():
                    if xs.match_entry(entry):
                        entry_copy = copy.deepcopy(entry)
                        chain = ChainMap(pa, entry_copy)
                        new_pa = PropertyArray(*(p for p in chain.values()))
                        try:
                            del new_pa['uid']
                        except KeyError:
                            pass
                        extended_rows.append(new_pa)

        return extended_rows
예제 #14
0
 def __init__(self, schema):
     self.count = -1
     self.lookup = ChainMap()
     self.type_of_var = {}
     self.schema = schema
     self.constants = set()
     self.backrefs = build_links(schema)
예제 #15
0
    def __init__(self, functions, name="module"):
        self.vars = ChainMap()
        self.vars.update({fn.instructions[0][1]: fn for fn in functions})

        # List of Python modules to search for external decls
        external_libs = [ 'math', 'os' ]
        self.external_libs = [ __import__(name) for name in external_libs]
예제 #16
0
 def __init__(self):
     self.symtab = ChainMap({
         'int': types.IntType,
         'float': types.FloatType,
         'string': types.StringType,
         'bool': types.BoolType,
     })
예제 #17
0
 def test_fromkeys(self):
     from collections import ChainMap
     keys = 'a b c'.split()
     c = ChainMap.fromkeys(keys)
     assert len(c) == 3
     assert c['a'] == None
     assert c['b'] == None
     assert c['c'] == None
예제 #18
0
    def __init__(self, *, settings=None, device=None):
        self.settings = settings or TextViewSettings()
        self._metrics = Qt.QFontMetricsF(self.settings.q_font)
        self.device = device
        self.reset()
        self._base_attrs = {}
        self._cur_lc_attrs = {}
        self._cur_attrs = {}

        self._attrs = ChainMap(self._cur_attrs, self._cur_lc_attrs, self._base_attrs)
예제 #19
0
class Module(object):
    def __init__(self, name, fullname, parent=None, reader=None):
        self.name = name
        self.fullname = fullname
        self.parent = parent
        self.reader = reader
        self.files = OrderedDict()
        self.reset()

    @property
    def package_name(self):
        return self.name

    def fulladdress(self, name):
        return "{}.{}".format(self.fullname, name)

    def read_file(self, name, file):
        self.files[name] = self.reader.read_file(file, parent=self)

    def normalize(self):
        self.reset()
        for file in self.files.values():
            file.normalize()
            self.new_child(file.members)

    def reset(self):
        self.members = ChainMap()

    def new_child(self, item):
        self.members = self.members.new_child(item)

    def __getitem__(self, name):
        return self.members[name]

    def get(self, name, default=None):
        return self.members.get(name, default)

    def __contains__(self, name):
        return name in self.members

    @property
    def world(self):
        return self.parent
예제 #20
0
파일: rpc.py 프로젝트: aio-libs/aiozmq
 def __init__(self, loop, *, error_table=None, translation_table=None):
     super().__init__(loop, translation_table=translation_table)
     self.calls = {}
     self.prefix = self.REQ_PREFIX.pack(os.getpid() % 0x10000,
                                        random.randrange(0x10000))
     self.counter = 0
     if error_table is None:
         self.error_table = _default_error_table
     else:
         self.error_table = ChainMap(error_table, _default_error_table)
예제 #21
0
파일: kb.py 프로젝트: smattin/george
 def __init__(self):
     """ mock knowledge base, use triple store ? """ 
     self.base = ChainMap(self.who
                         ,self.what
                         ,self.where
                         ,self.when
                         ,self.why
                         ,self.defaults
                         ,self.uq
                         )
예제 #22
0
class ScopeStack(object):
    def __init__(self):
        self.data = ChainMap({})
        self.special_forms = {}

    def __contains__(self, key):
        return key in self.data

    def set(self, key, value, special_form=False):
        if not special_form:
            self.data[key] = value
        else:
            self.special_forms[key] = value

        if not self.validate():
            raise RuntimeError("Special form overwriten")

    def unset(self, key):
        del self.data[key]

    def is_empty(self):
        return len(self.data) == 0

    def new_scope(self):
        self.data = self.data.new_child()

    def drop_scope(self):
        self.data = self.data.parents

    def get_scope_identifiers(self, root=False):
        first_level = self.data.maps[0] if len(self.data.maps) > 0 else {}
        merged_stmts = list(first_level.values())

        if root:
            merged_stmts = list(self.special_forms.values()) + merged_stmts

        return sorted(merged_stmts, key=lambda x: x.value)

    def validate(self):
        normal_scope = set(self.data.keys())
        special_form_scope = set(self.special_forms.keys())
        return len(normal_scope.intersection(special_form_scope)) == 0
예제 #23
0
 def __new__(self, name, bases, attrs):
     meta = ChainMap({})
     used = set()
     for b in bases:
         if hasattr(b, "meta"):
             meta = meta.new_child(b.meta)
             for cls in b.mro():
                 used.add(cls)
         else:
             for cls in reversed(b.mro()):
                 if cls in used:
                     continue
                 used.add(cls)
                 if not hasattr(cls, "Meta"):
                     continue
                 meta = meta.new_child(cls.Meta.__dict__)
     if "Meta" in attrs:
         meta = meta.new_child(attrs["Meta"].__dict__)
     attrs.update({"meta": meta})
     return super().__new__(self, name, bases, attrs)
예제 #24
0
파일: scan.py 프로젝트: vooum/ScanCommander
    def __init__(self,method='mcmc'):
        # self.variable_list={}

        self.scalar_list={}
        self.element_list={}
        self.follower_list={}
        self.dependent_list={}
        # self.matrix_list={}
        # self.free_parameter_list={}

        self.variable_list=ChainMap(
            self.scalar_list,self.element_list,self.follower_list,self.dependent_list
        )
        self.free_parameter_list=ChainMap(
            self.scalar_list,self.element_list
        )

        self.block_list={}

        self.method=method.lower()
def test_chain_map():
    from collections import ChainMap

    ChainMap()
    cm = ChainMap({"a": 1, "b": 2})

    assert cm.maps is not None

    assert cm.new_child().maps is not None
    assert cm.new_child({"c": 3}).maps is not None

    assert cm.parents.maps is not None

    cm["d"] = 4
    del cm["a"]
    assert cm["a"] is None
    assert cm["b"] == 2
    assert cm["d"] == 4
    assert list(iter(cm)) == ["b", "d", "c"]
    assert len(cm) == 3
예제 #26
0
class SymbolTable(object):
    '''
    Class representing a symbol table.  It should provide functionality
    for adding and looking up nodes associated with identifiers.
    '''
    def __init__(self):
        self.table = ChainMap()
        self.current_scope = self.table
        self.root_scope = self.table
        self.type_objects = {
            int: gonetype.int_type,
            float: gonetype.float_type,
            str: gonetype.string_type,
            bool: gonetype.bool_type,
            'int': gonetype.int_type,
            'float': gonetype.float_type,
            'string': gonetype.string_type,
            'bool': gonetype.bool_type
        }

    def add(self, symbol, data):
        self.current_scope[symbol] = data

    def get(self, symbol):
        if symbol in self.current_scope:
            return self.current_scope[symbol]
        return None

    def push_scope(self):
        self.current_scope = self.table.new_child()
        return self.current_scope

    def pop_scope(self):
        self.current_scope = self.current_scope.parents
        return self.current_scope

    def pprint(self):
        print("{}top".format("-" * 10))
        for symbol in self.table:
            print("{}: {}".format(symbol, self.table.get(symbol)))
        print("-" * 10)
def combine_map():
    a = {'x': 1, 'z': 3 }
    b = {'y': 2, 'z': 4 }
    c = ChainMap(a,b)
    print(c['x']) # Outputs 1 (from a)
    print(c['y']) # Outputs 2 (from b)
    print(c['z']) # Outputs 3 (from a)

    print(len(c))
    print(list(c.keys()))
    print(list(c.values()))

    c['z'] = 10
    c['w'] = 40
    del c['x']
    print(a)
    # del c['y']

    values = ChainMap()
    values['x'] = 1
    # Add a new mapping
    values = values.new_child()
    values['x'] = 2
    # Add a new mapping
    values = values.new_child()
    values['x'] = 3
    print(values)
    print(values['x'])
    # Discard last mapping
    values = values.parents
    print(values['x'])
    # Discard last mapping
    values = values.parents
    print(values['x'])
    print(values)
예제 #28
0
def prepare(func=None, param_types=dict()):
    if func is None:
        from __main__ import main as func
    param_types = ChainMap(param_types, getattr(func, "param_types", dict()))
    
    try:
        sig = signature(func)
    except (ValueError, TypeError):
        return (func, None, dict(), param_types)
    
    keyword_kinds = (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY)
    keywords = OrderedDict((param.name, param) for
        param in sig.parameters.values() if param.kind in keyword_kinds)
    
    # Explicit set() construction to work around Python 2's keys() lists
    missing = set(param_types.keys()).difference(sig.parameters.keys())
    if missing:
        missing = ", ".join(map(repr, missing))
        msg = "{}() missing parameters: {}".format(func.__name__, missing)
        raise TypeError(msg)
    
    return (func, sig, keywords, param_types)
예제 #29
0
def ch1_20():
    '''
    1.20 将多个映射合并为单个映射
    使用collections ChainMap解决
    '''

    print("\nch1_20:")

    from collections import ChainMap

    a={'x':1,'z':3}
    b={'y':2,'z':4}

    c = ChainMap(a,b) #使用原始的字典
    print(c['x'],c['y'],c['z'])

    print(list(c.keys()),list(c.values()))

    #用新建字典代替
    merged = dict(b)
    merged.update(a)
    print(merged['x'],merged['y'],merged['z'])
예제 #30
0
 def __init__(self):
     self.table = ChainMap()
     self.current_scope = self.table
     self.root_scope = self.table
     self.type_objects = {
         int: gonetype.int_type,
         float: gonetype.float_type,
         str: gonetype.string_type,
         bool: gonetype.bool_type,
         'int': gonetype.int_type,
         'float': gonetype.float_type,
         'string': gonetype.string_type,
         'bool': gonetype.bool_type
     }
예제 #31
0
    def __init__(self, source, target, model, connection_dict):
        """Initialize Connection object from model and overrides.

        Initialize the self.params and self.nest_params attributes, and all the
        other attributes as well :)

        Args:
            source (Layer): source Layer object
            target (Layer): target Layer object
            model (ConnectionModel): ConnectionModel object. Provide base
                'params' and 'nest_params' parameter dictionaries.
            connection_dict (dict): Dictionary defining the connection. The
                dictionary should have the form described in the class
                docstring. In particular, it may contain the following keys:
                    params (dict): "non-nest" parameter dictionary. Combined in
                        a ChainMap with `model.params`. All recognized
                        parameters are listed in global variable
                        `NON_NEST_CONNECTION_PARAMS`.
                    nest_params (dict): Parameters that may be passed to the
                        NEST kernel. Combined in a ChainMap with
                        model.nest_params. No parameter listed in global
                        variable `NON_NEST_CONNECTION_PARAMS` should be present
                        in this variable.
        """
        ##
        # Check the params and nest_params dictionaries and ChainMap them with
        # the ConnectionModel params and nest_params
        params = connection_dict.get('params', {})
        nest_params = connection_dict.get('nest_params', {})
        assert all([key in NON_NEST_CONNECTION_PARAMS for key in
                    params.keys()]), \
               (f'Unrecognized parameter in connection: {connection_dict}.'
                f'\nRecognized parameters: {NON_NEST_CONNECTION_PARAMS.keys()}')
        assert not any([key in NON_NEST_CONNECTION_PARAMS for key in
                        nest_params.keys()]), \
               (f'Reserved nest parameter in connection: {connection_dict}'
                f'\"Non-nest reserved parameters: {NON_NEST_CONNECTION_PARAMS.keys()}')
        self.params = dict(ChainMap(params, model.params))
        self.nest_params = dict(ChainMap(nest_params, model.nest_params))
        super().__init__(model.name, self.params)
        ##
        # Define the source/target population attributes
        self.model = model
        self.source = source
        self.source_population = connection_dict.get('source_population', None)
        self.target = target
        self.target_population = connection_dict.get('target_population', None)
        # By default, we consider the driver to be the source
        self.driver = 'source'
        self.driver_layer = self.source
        self.driver_population = self.source_population
        ##
        # Synapse model is retrieved either from nest_params or UnitConns.
        # The synapse model used in NEST might have a different name since we
        # need to change the default parameters of a synapse to specify the
        # weight recorder
        self._synapse_model = None
        self._nest_synapse_model = None
        ##
        # Synapse label is set in the defaults of the nest synapse model in
        # `create_nest_synapse_model` and can be used to query effectively the
        # connections of a certain preojection.
        self._synapse_label = None
        # Initialize the recorders
        self.recorders = [
            ConnectionRecorder(recorder_name, recorder_params) for
            recorder_name, recorder_params in self.params['recorders'].items()
        ]
        assert len(self.recorders) < 2  # Only a single recorder type so far...
        self.check()
for word in words:
    counts_dict[word[0]] = counts_dict.get(word[0], 0) + 1

print(counts_dict)
# yet another way
counts_dict = {}
counts_dict = defaultdict(int)
for word in words:
    counts_dict[word[0]] = counts_dict[word[0]] + 1

print(counts_dict)

# using chainmap

d = {'a': 1, 'b': 2}
d1 = {'a': 11, 'b': 22}
d2 = {'a': 33, 'c': 44}

print("chain map is")
print(ChainMap(d, d1, d2))

# List comprehension and generator expressions.

a = (el for el in range(4))  # creates a generator

print(a)

# Output: <generator object <genexpr> at 0x100b449d0>

# python popitem
예제 #33
0
# Chain Maps
from collections import ChainMap

default_connection = {'host': 'localhost', 'port': 4567}
connection = {'port': 5678}

conn = ChainMap(connection, default_connection)

conn['port']
conn['host']
예제 #34
0
 def __init__(self, app_state: collections.ChainMap) -> None:
     self.id = uuid.uuid4()
     self._state = app_state.new_child()
예제 #35
0
#	FileName : Chapter1_20.py
'''
合并多个字典或映射
关键点:
collections.ChainMap:合并多个字典但不产生新的字典数据,所以增删改会影响源字典
dict.update:合并多个字典并产生新的字典,数据独立存储
'''
from collections import ChainMap
if __name__ == '__main__':
    a = {'x': 1, 'z': 3}
    b = {'y': 2, 'z': 4}
    c = ChainMap(a, b)
    print(a)
    print(b)
    print(c)
    print(len(c))
    print(list(c.keys()))
    print(list(c.values()))

    c['z'] = 10
    c['w'] = 40
    del c['x']
    b['y'] = 20
    print(a)
    print(b)
    print(c)

    values = ChainMap()
    values['x'] = 1
    values = values.new_child()
    values['x'] = 2
예제 #36
0
 def _cat(maps: list):
     # make callbacks' return to a single map
     maps = [m for m in maps if m is not None]
     return dict(ChainMap(*maps))
예제 #37
0
 def get_document(self):
     hdr = self._get_doc_header()
     bdy = self._get_doc_body()
     return [ChainMap(*[hdr, dct]) for dct in bdy]
import json
from collections import ChainMap

file_name = 'south-park.json'
final_file = 'lines-by-character.json'

with open(file_name, "r") as read_file:
    data = json.load(read_file)

all_characters = list(set([line['character'] for line in data]))  # code smell
all_characters.sort()

lines_by_character = []
for character in all_characters:
    character_lines = []
    print(f'Current character: {character}')
    for line in data:
        if line['character'] == character:
            character_lines.append(line['line'])
    lines_by_character.append({character: character_lines})
    print(f'{character} done.')

# print(lines_by_character[0])

data = dict(ChainMap(*lines_by_character))
json_string = json.dumps(data, indent=4)

with open(final_file, "w") as outfile:
    outfile.write(json_string)
예제 #39
0
 def create_alias_replacement(
     self, imported_symbols: Dict[TypeAnnotationNode, TypeAnnotationNode]
 ) -> AliasReplacementVisitor:
     return AliasReplacementVisitor(
         ChainMap(imported_symbols, self.__project_specific_aliases,
                  self.__aliases))
예제 #40
0
class TILookup:
    """Threat Intel observable lookup from providers."""
    def __init__(
        self,
        primary_providers: Optional[List[TIProvider]] = None,
        secondary_providers: Optional[List[TIProvider]] = None,
        providers: Optional[List[str]] = None,
    ):
        """
        Initialize TILookup instance.

        Parameters
        ----------
        primary_providers : Optional[List[TIProvider]], optional
            Primary TI Providers, by default None
        secondary_providers : Optional[List[TIProvider]], optional
            Secondary TI Providers, by default None
        providers: Optional[List[str]], optional
            List of provider names to load, by default all available
            providers are loaded. To see the list of available providers
            call `TILookup.list_available_providers()`.
            Note: if primary_provides or secondary_providers is specified
            This will override the providers list.

        """
        self._providers: Dict[str, TIProvider] = {}
        self._secondary_providers: Dict[str, TIProvider] = {}
        self._providers_to_load = providers

        if primary_providers:
            for prov in primary_providers:
                self.add_provider(prov, primary=True)
        if secondary_providers:
            for prov in secondary_providers:
                self.add_provider(prov, primary=False)
        if not (primary_providers or secondary_providers):
            self._load_providers()

        self._all_providers = ChainMap(self._secondary_providers,
                                       self._providers)

    @property
    def loaded_providers(self) -> Dict[str, TIProvider]:
        """
        Return dictionary of loaded providers.

        Returns
        -------
        Dict[str, TIProvider]
            [description]

        """
        return self._all_providers  # type: ignore

    @property
    def provider_status(self) -> Iterable[str]:
        """
        Return loaded provider status.

        Returns
        -------
        Iterable[str]
            List of providers and descriptions.

        """
        prim = [
            f"{prov_name} - {prov.description} (primary)"
            for prov_name, prov in self._providers.items()
        ]
        sec = [
            f"{prov_name} - {prov.description} (secondary)"
            for prov_name, prov in self._secondary_providers.items()
        ]
        return prim + sec

    @property
    def configured_providers(self) -> List[str]:
        """
        Return a list of avaliable providers that have configuration details present.

        Returns
        -------
        List[str]
            List of TI Provider classes.

        """
        prim_conf = list(self._providers.keys())
        sec_conf = list(self._secondary_providers.keys())

        return prim_conf + sec_conf

    @property
    def available_providers(self) -> List[str]:
        """
        Return a list of builtin providers.

        Returns
        -------
        List[str]
            List of TI Provider classes.

        """
        return self._get_available_providers()

    @classmethod
    def _get_available_providers(cls):
        providers = []
        for provider_name in dir(tiproviders):
            provider_class = getattr(tiproviders, provider_name, None)
            if not (provider_class and isclass(provider_class)):
                continue
            # if it is a class - we only want to show concrete classes
            # that are sub-classes of TIProvider
            if issubclass(provider_class, tiproviders.TIProvider) and not bool(
                    getattr(provider_class, "__abstractmethods__", False)):
                providers.append(provider_class.__name__)
        return providers

    @classmethod
    def list_available_providers(
            cls,
            show_query_types=False,
            as_list: bool = False) -> Optional[List[str]]:  # type: ignore
        """
        Print a list of builtin providers with optional usage.

        Parameters
        ----------
        show_query_types : bool, optional
            Show query types supported by providers, by default False
        as_list : bool, optional
            Return list of providers instead of printing to stdout.
            Note: if you specify `show_query_types` this will be printed
            irrespective of this parameter setting.

        Returns
        -------
        Optional[List[str]]
            A list of provider names (if `return_list=True`)

        """
        providers = []
        for provider_name in cls._get_available_providers():
            provider_class = getattr(tiproviders, provider_name, None)
            if not as_list:
                print(provider_name)
            providers.append(provider_name)
            if show_query_types:
                provider_class.usage()
        if as_list:
            return providers
        return None

    def provider_usage(self):
        """Print usage of loaded providers."""
        print("Primary providers")
        print("-----------------")
        if self._providers:
            for prov_name, prov in self._providers.items():
                print(f"\nProvider class: {prov_name}")
                prov.usage()
        else:
            print("none")
        print("\nSecondary providers")
        print("-------------------")
        if self._secondary_providers:
            for prov_name, prov in self._secondary_providers.items():
                print(f"\nProvider class: {prov_name}")
                prov.usage()
        else:
            print("none")

    @classmethod
    def reload_provider_settings(cls):
        """Reload provider settings from config."""
        reload_settings()
        print(
            "Settings reloaded. Use reload_providers to update settings",
            "for loaded providers.",
        )

    def reload_providers(self):
        """
        Reload providers based on currrent settings in config.

        Parameters
        ----------
        clear_keyring : bool, optional
            Clears any secrets cached in keyring, by default False

        """
        self.reload_provider_settings()
        self._load_providers()

    def _load_providers(self):
        """Load provider classes based on config."""
        prov_settings = get_provider_settings()

        for provider_entry, settings in prov_settings.items():
            # Allow overriding provider name to use another class
            provider_name = settings.provider or provider_entry
            if self._providers_to_load and provider_name not in self._providers_to_load:
                continue
            provider_class: TIProvider = getattr(sys.modules[__name__],
                                                 provider_name, None)
            if not provider_class:
                warnings.warn(
                    f"Could not find provider class for {provider_name} " +
                    f"in config section {provider_entry}")
                continue

            # instantiate class sending args from settings to init
            try:
                provider_instance = provider_class(**(settings.args))
            except MsticpyConfigException as mp_ex:
                # If the TI Provider didn't load, raise an exception
                raise MsticpyUserConfigError(
                    f"Could not load TI Provider {provider_name}",
                    *mp_ex.args,
                    "To avoid loading this provider please use the 'providers' parameter"
                    + " to TILookup() to specify which providers to load.",
                    title="TIProvider configuration error",
                    help_uri=
                    "https://msticpy.readthedocs.io/en/latest/data_acquisition/"
                    + "TIProviders.html#configuration-file",
                ) from mp_ex

            # set the description from settings, if one is provided, otherwise
            # use class docstring.
            provider_instance.description = (settings.description
                                             or provider_instance.__doc__)

            self.add_provider(provider=provider_instance,
                              name=provider_name,
                              primary=settings.primary)

    def add_provider(self,
                     provider: TIProvider,
                     name: str = None,
                     primary: bool = True):
        """
        Add a TI provider to the current collection.

        Parameters
        ----------
        provider : TIProvider
            Provider instance
        name : str, optional
            The name to use for the provider (overrides the class name
            of `provider`)
        primary : bool, optional
            "primary" or "secondary" if False, by default "primary"

        """
        if not name:
            name = provider.__class__.__name__
        if primary:
            self._providers[name] = provider
        else:
            self._secondary_providers[name] = provider

    # pylint: disable=too-many-arguments
    def lookup_ioc(
        self,
        observable: str = None,
        ioc_type: str = None,
        ioc_query_type: str = None,
        providers: List[str] = None,
        prov_scope: str = "primary",
        **kwargs,
    ) -> Tuple[bool, List[Tuple[str, LookupResult]]]:
        """
        Lookup single IoC in active providers.

        Parameters
        ----------
        observable : str
            IoC observable
            (`ioc` is also an alias for observable)
        ioc_type : str, optional
            One of IoCExtract.IoCType, by default None
            If none, the IoC type will be inferred
        ioc_query_type: str, optional
            The ioc query type (e.g. rep, info, malware)
        providers: List[str]
            Explicit list of providers to use
        prov_scope : str, optional
            Use "primary", "secondary" or "all" providers, by default "primary"
        kwargs :
            Additional arguments passed to the underlying provider(s)

        Returns
        -------
        Tuple[bool, List[Tuple[str, LookupResult]]]
            The result returned as a tuple(bool, list):
            bool indicates whether a TI record was found in any provider
            list has an entry for each provider result

        """
        if not observable and "ioc" in kwargs:
            observable = kwargs["ioc"]
        if not observable:
            raise ValueError("observable or ioc parameter must be supplied.")

        result_list: List[Tuple[str, LookupResult]] = []
        selected_providers = self._select_providers(providers, prov_scope)
        if not selected_providers:
            raise MsticpyUserConfigError(
                _NO_PROVIDERS_MSSG,
                title="No Threat Intel Provider configuration found.",
                help_uri=_TI_HELP_URI,
            )

        ioc_type = ioc_type or TIProvider.resolve_ioc_type(observable)
        for prov_name, provider in selected_providers.items():
            provider_result: LookupResult = provider.lookup_ioc(
                ioc=observable,
                ioc_type=ioc_type,
                query_type=ioc_query_type,
                **kwargs)
            result_list.append((prov_name, provider_result))
        overall_result = any(res.result for _, res in result_list)
        return overall_result, result_list

    def lookup_iocs(
        self,
        data: Union[pd.DataFrame, Mapping[str, str], Iterable[str]],
        obs_col: str = None,
        ioc_type_col: str = None,
        ioc_query_type: str = None,
        providers: List[str] = None,
        prov_scope: str = "primary",
        **kwargs,
    ) -> pd.DataFrame:
        """
        Lookup a collection of IoCs.

        Parameters
        ----------
        data : Union[pd.DataFrame, Mapping[str, str], Iterable[str]]
            Data input in one of three formats:
            1. Pandas dataframe (you must supply the column name in
            `obs_col` parameter)
            2. Mapping (e.g. a dict) of [observable, IoCType]
            3. Iterable of observables - IoCTypes will be inferred
        obs_col : str, optional
            DataFrame column to use for observables, by default None
        ioc_type_col : str, optional
            DataFrame column to use for IoCTypes, by default None
        ioc_query_type: str, optional
            The ioc query type (e.g. rep, info, malware)
        providers: List[str]
            Explicit list of providers to use
        prov_scope : str, optional
            Use "primary", "secondary" or "all" providers, by default "primary"
        kwargs :
            Additional arguments passed to the underlying provider(s)

        Returns
        -------
        pd.DataFrame
            DataFrame of results

        """
        result_list: List[pd.DataFrame] = []
        selected_providers = self._select_providers(providers, prov_scope)
        if not selected_providers:
            raise MsticpyUserConfigError(
                _NO_PROVIDERS_MSSG,
                title="No Threat Intel Provider configuration found.",
                help_uri=_TI_HELP_URI,
            )

        for prov_name, provider in selected_providers.items():
            provider_result = provider.lookup_iocs(
                data=data,
                obs_col=obs_col,
                ioc_type_col=ioc_type_col,
                query_type=ioc_query_type,
                **kwargs,
            )
            if provider_result is None or provider_result.empty:
                continue
            if not kwargs.get("show_not_supported", False):
                provider_result = provider_result[
                    provider_result["Status"] !=
                    TILookupStatus.not_supported.value]
            if not kwargs.get("show_bad_ioc", False):
                provider_result = provider_result[
                    provider_result["Status"] !=
                    TILookupStatus.bad_format.value]
            provider_result["Provider"] = prov_name
            result_list.append(provider_result)

        if not result_list:
            print("No IoC matches")
        return pd.concat(result_list, sort=False)

    @staticmethod
    def result_to_df(
        ioc_lookup: Tuple[bool, List[Tuple[str,
                                           LookupResult]]]) -> pd.DataFrame:
        """
        Return DataFrame representation of IoC Lookup response.

        Parameters
        ----------
        ioc_lookup : Tuple[bool, List[Tuple[str, LookupResult]]]
            Output from `lookup_ioc`

        Returns
        -------
        pd.DataFrame
            The response as a DataFrame with a row for each
            provider response.

        """
        return (pd.DataFrame({
            r_item[0]: pd.Series(attr.asdict(r_item[1]))
            for r_item in ioc_lookup[1]
        }).T.rename(columns=LookupResult.column_map()).drop("SafeIoc", axis=1))

    def _select_providers(
            self,
            providers: List[str] = None,
            prov_scope: str = "primary") -> Dict[str, TIProvider]:
        """
        Return required subset of providers.

        Parameters
        ----------
        providers : List[str], optional
            Explicit list of provider names, by default None
        prov_scope : str, optional
            Provider scope, by default "primary"
            Other values are "all" and "secondary"

        Returns
        -------
        Dict[str, TIProvider]
            Dictionary of provider names and instances.

        """
        if providers:
            selected_providers = {
                prov_name: prov
                for prov_name, prov in self._all_providers.items()
                if prov_name in providers
            }
        else:
            if prov_scope == "all":
                selected_providers = self._all_providers  # type: ignore
            elif prov_scope == "primary":
                selected_providers = self._providers
            else:
                selected_providers = self._secondary_providers
        return selected_providers

    @staticmethod
    def browse_results(data: pd.DataFrame,
                       severities: Optional[List[str]] = None,
                       **kwargs):
        """
        Return TI Results list browser.

        Parameters
        ----------
        data : pd.DataFrame
            TI Results data from TIProviders
        severities : Optional[List[str]], optional
            A list of the severity classes to show.
            By default these are ['warning', 'high'].
            Pass ['information', 'warning', 'high'] to see all
            results.

        Other Parameters
        ----------------
        kwargs :
            passed to SelectItem constuctor.

        Returns
        -------
        SelectItem
            SelectItem browser for TI Data.

        """
        return browse_results(data=data, severities=severities, **kwargs)
예제 #41
0
def combine_spaces(spaces: List[List[Dict[str, Any]]]) -> List[Dict[str, Any]]:
    return [dict(ChainMap(*combination)) for combination in product(*spaces)]
예제 #42
0
    def __init__(
        self,
        access_key_id=None,
        account_name=None,
        cleanup=None,
        config=None,
        description=None,
        distro_name=None,
        early_exit=None,
        history_log=None,
        image_id=None,
        inject=None,
        instance_type=None,
        log_level=None,
        no_default_test_dirs=None,
        cloud_config=None,
        region=None,
        results_dir=None,
        running_instance_id=None,
        secret_access_key=None,
        security_group_id=None,
        ssh_key_name=None,
        ssh_private_key_file=None,
        ssh_user=None,
        subnet_id=None,
        test_dirs=None,
        test_files=None,
        timeout=None,
        collect_vm_info=None,
        enable_secure_boot=None,
        enable_uefi=None,
        log_callback=None,
        prefix_name=None,
        retry_count=None
    ):
        """Initialize EC2 cloud framework class."""
        super(EC2Cloud, self).__init__(
            'ec2',
            cleanup,
            config,
            description,
            distro_name,
            early_exit,
            history_log,
            image_id,
            inject,
            instance_type,
            log_level,
            no_default_test_dirs,
            cloud_config,
            region,
            results_dir,
            running_instance_id,
            test_dirs,
            test_files,
            timeout,
            collect_vm_info,
            ssh_private_key_file,
            ssh_user,
            subnet_id,
            enable_secure_boot,
            enable_uefi,
            log_callback,
            prefix_name,
            retry_count
        )
        # Get command line values that are not None
        cmd_line_values = self._get_non_null_values(locals())

        self.zone = None
        self.account_name = account_name

        if not self.account_name:
            self.logger.debug(
                'No account provided. To use the EC2 config file an '
                'account name is required.'
            )

        if not self.region:
            raise EC2CloudException(
                'Region is required to connect to EC2.'
            )
        elif self.region[-1].isalpha():
            self.zone = self.region
            self.region = self.region[:-1]

        config_file = self.cloud_config or EC2_CONFIG_FILE

        ec2_config = {}
        try:
            ec2_config = ipa_utils.get_config_values(
                config_file,
                ''.join(['region-', self.region]),
                ''.join(['account-', self.account_name])
            )
            self.logger.debug(
                'Using EC2 config file: %s' % config_file
            )
        except Exception:
            self.logger.debug(
                'EC2 config file not found: %s' % config_file
            )

        self.ec2_config = defaultdict(
            lambda: None,
            ChainMap(cmd_line_values, ec2_config, self.ipa_config)
        )

        self.access_key_id = self.ec2_config['access_key_id']
        self.secret_access_key = self.ec2_config['secret_access_key']
        self.security_group_id = self.ec2_config['security_group_id']
        self.ssh_key_name = self.ec2_config['ssh_key_name']
        self.subnet_id = self.ec2_config['subnet_id']

        self.ssh_user = (
            cmd_line_values.get('ssh_user') or
            ec2_config.get('user') or
            self.ipa_config.get('ssh_user') or
            EC2_DEFAULT_USER
        )

        self.ssh_private_key_file = (
            cmd_line_values.get('ssh_private_key_file') or
            ec2_config.get('ssh_private_key') or
            self.ipa_config.get('ssh_private_key_file')
        )

        if not self.ssh_private_key_file:
            raise EC2CloudException(
                'SSH private key file is required to connect to instance.'
            )
        else:
            self.ssh_private_key_file = os.path.expanduser(
                self.ssh_private_key_file
            )
예제 #43
0
 def installed(self):
     """Provides a view into all installed mods (including dependencies)."""
     return ChainMap(self.mods, self.dependencies)
예제 #44
0
def eval(x, env=None):
    """
    Avalia expressão no ambiente de execução dado.
    """

    # Cria ambiente padrão, caso o usuário não passe o argumento opcional "env"
    if env is None:
        env = ChainMap({}, global_env)

    # Avalia tipos atômicos
    if isinstance(x, Symbol):
        return env[x]
    elif isinstance(x, (int, float, bool, str)):
        return x

    # Avalia formas especiais e listas
    head, *args = x

    # Comando (if <test> <then> <other>)
    # Ex: (if (even? x) (quotient x 2) x)
    if head == Symbol.IF:
        (condition, then, alternative) = args
        expression = (then if eval(condition, env) else alternative)
        return eval(expression, env)

    # Comando (define <symbol> <expression>)
    # Ex: (define x (+ 40 2))
    elif head == Symbol.DEFINE:
        variable, value_or_expression = args
        new_thing = eval(value_or_expression, env)
        env[Symbol(variable)] = new_thing
        return None

    # Comando (quote <expression>)
    # (quote (1 2 3))
    elif head == Symbol.QUOTE:
        result = []
        arguments = args[0]
        if isinstance(arguments, list):
            for x in args[0]:
                if isinstance(x, (int, float, bool, str)):
                    result.append(eval(x, env))
                else:
                    result.append(Symbol(x))
        else:
            return arguments
        return result

    # Comando (let <expression> <expression>)
    # (let ((x 1) (y 2)) (+ x y))
    elif head == Symbol.LET:
        sub_env = ChainMap(
            {}, global_env)  # pegar as funções sem ter que copiar todo o env
        declarations, expr = args
        for declaration in declarations:
            eval([Symbol.DEFINE, declaration[0], declaration[1]], sub_env)

        result = eval(expr, sub_env)
        return result

    # Comando (lambda <vars> <body>)
    # (lambda (x) (+ x 1))
    elif head == Symbol.LAMBDA:
        if len(args) == 1:
            print(args[0])
            parameters, expr = args[0]
        else:
            parameters, expr = args
        result = None
        print("parameters: ", parameters)
        if any(
                isinstance(parameter, (float, int, bool))
                for parameter in parameters):
            raise TypeError
        local_ctx = ChainMap({}, global_env)

        def new_fun(*arguments):
            arguments = list(arguments)
            for parameter_number in range(len(parameters)):
                if len(arguments) > 0:
                    local_ctx[parameters[parameter_number]] = arguments[
                        parameter_number]
                else:
                    local_ctx[parameters[parameter_number]] = arguments
            return eval(expr, local_ctx)

        return new_fun

    # Lista/chamada de funções
    # (sqrt 4)
    elif head == Symbol.ADD:
        x, y = args
        return eval(x, env) + eval(y, env)

    elif head == Symbol.SUB:
        x, y = args
        return eval(x, env) - eval(y, env)
    else:
        env_function = eval(head, env)
        arguments = (eval(arg, env) for arg in x[1:])
        return env_function(*arguments)
예제 #45
0
    def from_dict(cls, d, href=None, root=None):
        id = d['id']
        geometry = d['geometry']
        bbox = d['bbox']
        properties = d['properties']
        stac_extensions = d.get('stac_extensions')
        collection_id = None
        if 'collection' in d.keys():
            collection_id = d['collection']

        datetime = properties.get('datetime')
        if datetime is None:
            raise STACError(
                'Item dict is missing a "datetime" property in the "properties" field'
            )
        datetime = dateutil.parser.parse(datetime)

        item = Item(id=id,
                    geometry=geometry,
                    bbox=bbox,
                    datetime=datetime,
                    properties=properties,
                    stac_extensions=stac_extensions,
                    collection=collection_id)

        for l in d['links']:
            item.add_link(Link.from_dict(l))

        for k, v in d['assets'].items():
            asset = Asset.from_dict(v)
            asset.set_owner(item)
            item.assets[k] = asset

        # Find the collection, merge properties if there are
        # common properties to merge.
        collection_to_merge = None
        if collection_id is not None and root is not None:
            collection_to_merge = root._resolved_objects.get_by_id(
                collection_id)
        else:
            collection_link = item.get_single_link('collection')
            if collection_link is not None:
                # If there's a relative collection link, and we have an href passed
                # in, we can resolve the collection from the link. If not,
                # we'll skip merging in collection properties.
                if collection_link.link_type == LinkType.RELATIVE and \
                   not collection_link.is_resolved():
                    if href is not None:
                        collection_link = collection_link.clone()
                        collection_link.target = make_absolute_href(
                            collection_link.target, href)
                    else:
                        collection_link = None

                if collection_link is not None:
                    collection_to_merge = collection_link.resolve_stac_object(
                        root=root).target
                    if item.collection_id is None:
                        item.collection_id = collection_to_merge.id

        if collection_to_merge is not None:
            if collection_to_merge.properties is not None:
                item.properties = dict(
                    ChainMap(item.properties, collection_to_merge.properties))

        return item
예제 #46
0
def get_points_round(round_matches):
    matches_selected = round_matches.sort_values('gameweek')
    dicts = matches_selected.apply(
        lambda x: get_points_per_score(x['teamsData']), axis=1)
    return dict(ChainMap(*list(dicts)))
예제 #47
0
d.append('singh')
print(d)
d.appendleft('asmika')
print(d)
d.pop()  #use popleft
print(d)
"""chainmap is a dictionary like class for creating a single view
of multiple mappings
"""
#Chainmap

from collections import ChainMap

a = {1: 'Hadoop', 2: 'BigData'}
b = {3: 'Java', 4: 'Python'}
temp = ChainMap(a, b)
print(temp)
"""Counter is a dictionary subclass for counting hashable objects
"""
#Counter
from collections import Counter

a = [1, 2, 1, 3, 4, 2, 5, 1, 3, 7, 3, 1, 5, 7, 2]
count = Counter(a)
print(count)
print(list(count.elements()))

print(count.most_common())
sub = {2: 1, 6: 1}
print(count.subtract(sub))
print(count.most_common())
예제 #48
0
print("\nSabor de la fruta:", frut.sabor)

print("\n", "----------" * 7, "\n")

# Importar de collections, ChainMap
from collections import ChainMap

# ChainMap: encapsula varios diccionarios en una unidad
print("\nChainMap\n")

# Ejercicio 10: juntar varios dicionarios
print("\n- Ejercicio 10")

# Creación de los diccionarios
di1 = {"nombre": "César", "deporte": "natación"}
di2 = {"juez": "Sonia", "edad": 32}
di3 = {"premio": "trofeo", "lugar": "primer puesto"}

# Mostrar los diccionarios
print("\nDiccionario 1:", di1)
print("\nDiccionario 2:", di2)
print("\nDiccionario 3:", di3)

# Uso de ChainMap
chain = ChainMap(di1, di3, di2)

# Mostrar el resultado
print(
    "\nUsando ChainMap con los diccionarios anteriores, empezando por el 1, continuando con el 3 y terminando con el 2:\n",
    "\n", chain)
예제 #49
0
#!/usr/bin/env python
# coding=utf-8
'''
合并多个字典或映射
有多个字典或者映射,将它们从逻辑上合并为一个单一的映射后执行某些操作,比如查找值或者检查某些键是否存在。
'''

a = {'x': 1, 'z': 3}
b = {'y': 2, 'z': 4}

from collections import ChainMap
c = ChainMap(a, b)
print(c['x'])  # Outputs 1 (from a)
print(c['y'])  # Outputs 2 (from b)
print(c['z'])  # Outputs 3 (from a)

len(c)
#3
list(c.keys())
#['x', 'y', 'z']
list(c.values())
#[1, 2, 3]

c['z'] = 10
c['w'] = 40
del c['x']
print a
#{'w': 40, 'z': 10}

values = ChainMap()
values['x'] = 1
예제 #50
0
mul6 = {'6*6': '24', '6*7': '2A', '6*8': '30', '6*9': '36', '6*A': '3C', '6*B': '42', '6*C': '48', '6*D': '4E',
        '6*E': '54', '6*F': '5A'}
mul7 = {'7*7': '31', '7*8': '38', '7*9': '3F', '7*A': '46', '7*B': '4D', '7*C': '54', '7*D': '5B', '7*E': '62',
        '7*F': '69'}
mul8 = {'8*8': '40', '8*9': '48', '8*A': '50', '8*B': '58', '8*C': '60', '8*D': '68', '8*E': '70', '8*F': '78'}
mul9 = {'9*9': '51', '9*A': '5A', '9*B': '63', '9*C': '6C', '9*D': '75', '9*E': '7E', '9*F': '87'}
mulA = {'A*A': '64', 'A*B': '6E', 'A*C': '78', 'A*D': '82', 'A*E': '8C', 'A*F': '96'}
mulB = {'B*B': '79', 'B*C': '84', 'B*D': '8F', 'B*E': '9A', 'B*F': 'A5'}
mulC = {'C*C': '90', 'C*D': '9C', 'C*E': 'A8', 'C*F': 'B4'}
mulD = {'D*D': 'A9', 'D*E': 'B6', 'D*F': 'C3'}
mulE = {'E*E': 'C4', 'E*F': 'D2'}
mulF = {'F*F': 'E1'}

# Объеденение всех словарей с помощью ChainMap
operations = ChainMap(plus0, plus1, plus2, plus3, plus4, plus5, plus6, plus7, plus8, plus9, plusA, plusB, plusC, plusD,
                      plusE, plusF, mul0, mul1, mul2, mul3, mul4, mul5, mul6, mul7, mul8, mul9, mulA, mulB, mulC, mulD,
                      mulE, mulF)


# Функция арифметической операции над двумя цифрами
def hex_digit_calc(digit1: str, digit2: str, sign: str):
    try:
        return operations[digit1 + sign + digit2]
    except KeyError:
        try:
            return operations[digit2 + sign + digit1]
        except KeyError:
            return None


# Функция сложения двух чисел
예제 #51
0
 def _fd(self):
     return dict(ChainMap(*[getattr(i, '_fd', {}) for i in self._args_diff]))
예제 #52
0
import argparse
import os
from collections import ChainMap

defaults = {"debug": False}

parser = argparse.ArgumentParser()
parser.add_argument('--debug')
args = parser.parse_args()
cli_args = {key: value for key, value in vars(args).items() if value}

config = ChainMap(cli_args, os.environ, defaults)

print(config.get('debug'))
예제 #53
0
d = defaultdict(lambda: 'N/A')
print(d.get('key', 'test'))
print(d['key'])

# 按插入顺序排序的字典
print(OrderedDict({'a': 1, 'b': 2, 'd': 3, 'c': 4}))

# 按定义的顺序查找
defaults = {'color': 'red', 'user': '******'}
# 命令行参数解析
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--user')
parser.add_argument('-c', '--color')
namespace = parser.parse_args()
command_line_args = {k: v for k, v in vars(namespace).items() if v}
chain_map = ChainMap(command_line_args, os.environ, defaults)
print(chain_map['user'])

# 计数器
c = Counter()
for ch in 'programming':
    c[ch] = c[ch] + 1
print(c)

# ==================================================BASE64编码=================================================

b = base64.encodebytes(b'yoo, cool')
print(b)
print(base64.decodebytes(b))
print(base64.urlsafe_b64encode(b'/user?name=guest'))
예제 #54
0
    async def _process_command(self, command):
        '''Process a command from a client, and return the server response'''
        def get_db_entry():
            chan = self.circuit.channels_sid[command.sid]
            db_entry = self.context[chan.name]
            return chan, db_entry

        if command is ca.DISCONNECTED:
            raise DisconnectedCircuit()
        elif isinstance(command, ca.VersionRequest):
            return [ca.VersionResponse(ca.DEFAULT_PROTOCOL_VERSION)]
        elif isinstance(command, ca.SearchRequest):
            pv_name = command.name
            try:
                self.context[pv_name]
            except KeyError:
                if command.reply == ca.DO_REPLY:
                    return [
                        ca.NotFoundResponse(
                            version=ca.DEFAULT_PROTOCOL_VERSION,
                            cid=command.cid)
                    ]
            else:
                return [
                    ca.SearchResponse(self.context.port, None, command.cid,
                                      ca.DEFAULT_PROTOCOL_VERSION)
                ]
        elif isinstance(command, ca.CreateChanRequest):
            try:
                db_entry = self.context[command.name]
            except KeyError:
                self.log.debug('Client requested invalid channel name: %s',
                               command.name)
                return [ca.CreateChFailResponse(cid=command.cid)]

            access = db_entry.check_access(self.client_hostname,
                                           self.client_username)

            return [
                ca.AccessRightsResponse(cid=command.cid, access_rights=access),
                ca.CreateChanResponse(data_type=db_entry.data_type,
                                      data_count=db_entry.max_length,
                                      cid=command.cid,
                                      sid=self.circuit.new_channel_id()),
            ]
        elif isinstance(command, ca.HostNameRequest):
            self.client_hostname = command.name
        elif isinstance(command, ca.ClientNameRequest):
            self.client_username = command.name
        elif isinstance(command, (ca.ReadNotifyRequest, ca.ReadRequest)):
            chan, db_entry = get_db_entry()
            try:
                data_type = command.data_type
            except ValueError:
                raise ca.RemoteProtocolError('Invalid data type')

            # If we are in the middle of processing a Write[Notify]Request,
            # allow a bit of time for that to (maybe) finish. Some requests
            # may take a long time, so give up rather quickly to avoid
            # introducing too much latency.
            await self.write_event.wait(timeout=WRITE_LOCK_TIMEOUT)

            metadata, data = await db_entry.auth_read(
                self.client_hostname,
                self.client_username,
                data_type,
                user_address=self.circuit.address)

            old_version = self.circuit.protocol_version < 13
            if command.data_count > 0 or old_version:
                data = data[:command.data_count]

            # This is a pass-through if arr is None.
            data = apply_arr_filter(chan.channel_filter.arr, data)
            # If the timestamp feature is active swap the timestamp.
            # Information must copied because not all clients will have the
            # timestamp filter
            if chan.channel_filter.ts and command.data_type in ca.time_types:
                time_type = type(metadata)
                now = ca.TimeStamp.from_unix_timestamp(time.time())
                metadata = time_type(
                    **ChainMap({'stamp': now},
                               dict((field, getattr(metadata, field))
                                    for field, _ in time_type._fields_)))
            notify = isinstance(command, ca.ReadNotifyRequest)
            data_count = db_entry.calculate_length(data)
            return [
                chan.read(data=data,
                          data_type=command.data_type,
                          data_count=data_count,
                          status=1,
                          ioid=command.ioid,
                          metadata=metadata,
                          notify=notify)
            ]
        elif isinstance(command, (ca.WriteRequest, ca.WriteNotifyRequest)):
            chan, db_entry = get_db_entry()
            client_waiting = isinstance(command, ca.WriteNotifyRequest)

            async def handle_write():
                '''Wait for an asynchronous caput to finish'''
                try:
                    write_status = await db_entry.auth_write(
                        self.client_hostname,
                        self.client_username,
                        command.data,
                        command.data_type,
                        command.metadata,
                        user_address=self.circuit.address)
                except Exception as ex:
                    self.log.exception('Invalid write request by %s (%s): %r',
                                       self.client_username,
                                       self.client_hostname, command)
                    cid = self.circuit.channels_sid[command.sid].cid
                    response_command = ca.ErrorResponse(
                        command,
                        cid,
                        status=ca.CAStatus.ECA_PUTFAIL,
                        error_message=('Python exception: {} {}'
                                       ''.format(type(ex).__name__, ex)))
                    await self.send(response_command)
                else:
                    if client_waiting:
                        if write_status is None:
                            # errors can be passed back by exceptions, and
                            # returning none for write_status can just be
                            # considered laziness
                            write_status = True

                        response_command = chan.write(
                            ioid=command.ioid,
                            status=write_status,
                            data_count=db_entry.length)
                        await self.send(response_command)
                finally:
                    maybe_awaitable = self.write_event.set()
                    # The curio backend makes this an awaitable thing.
                    if maybe_awaitable is not None:
                        await maybe_awaitable

            self.write_event.clear()
            await self._start_write_task(handle_write)
        elif isinstance(command, ca.EventAddRequest):
            chan, db_entry = get_db_entry()
            # TODO no support for deprecated low/high/to
            sub = Subscription(mask=command.mask,
                               channel_filter=chan.channel_filter,
                               channel=chan,
                               circuit=self,
                               data_type=command.data_type,
                               data_count=command.data_count,
                               subscriptionid=command.subscriptionid,
                               db_entry=db_entry)
            sub_spec = SubscriptionSpec(db_entry=db_entry,
                                        data_type=command.data_type,
                                        mask=command.mask,
                                        channel_filter=chan.channel_filter)
            self.subscriptions[sub_spec].append(sub)
            self.context.subscriptions[sub_spec].append(sub)

            # If we are in the middle of processing a Write[Notify]Request,
            # allow a bit of time for that to (maybe) finish. Some requests
            # may take a long time, so give up rather quickly to avoid
            # introducing too much latency.
            if not self.write_event.is_set():
                await self.write_event.wait(timeout=WRITE_LOCK_TIMEOUT)

            await db_entry.subscribe(self.context.subscription_queue, sub_spec,
                                     sub)
        elif isinstance(command, ca.EventCancelRequest):
            chan, db_entry = get_db_entry()
            removed = await self._cull_subscriptions(
                db_entry,
                lambda sub: sub.subscriptionid == command.subscriptionid)
            if removed:
                _, removed_sub = removed[0]
                data_count = removed_sub.data_count
            else:
                data_count = db_entry.length
            return [
                chan.unsubscribe(command.subscriptionid,
                                 data_type=command.data_type,
                                 data_count=data_count)
            ]
        elif isinstance(command, ca.EventsOnRequest):
            # Immediately send most recent updates for all subscriptions.
            most_recent_updates = list(self.most_recent_updates.values())
            self.most_recent_updates.clear()
            if most_recent_updates:
                await self.send(*most_recent_updates)
            maybe_awaitable = self.events_on.set()
            # The curio backend makes this an awaitable thing.
            if maybe_awaitable is not None:
                await maybe_awaitable
            self.circuit.log.info("Client at %s:%d has turned events on.",
                                  *self.circuit.address)
        elif isinstance(command, ca.EventsOffRequest):
            # The client has signaled that it does not think it will be able to
            # catch up to the backlog. Clear all updates queued to be sent...
            self.unexpired_updates.clear()
            # ...and tell the Context that any future updates from ChannelData
            # should not be added to this circuit's queue until further notice.
            self.events_on.clear()
            self.circuit.log.info("Client at %s:%d has turned events off.",
                                  *self.circuit.address)
        elif isinstance(command, ca.ClearChannelRequest):
            chan, db_entry = get_db_entry()
            await self._cull_subscriptions(
                db_entry, lambda sub: sub.channel == command.sid)
            return [chan.clear()]
        elif isinstance(command, ca.EchoRequest):
            return [ca.EchoResponse()]
예제 #55
0
def _flatten(list_of_dicts):
    list_of_dicts = [d for d in list_of_dicts if d != []]
    return dict(ChainMap(*list_of_dicts))
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @time   : 18-10-12 下午9:09
# @author : Feng_Hui
# @email  : [email protected]
# how to use chainmap
# https://docs.python.org/3/library/collections.html#collections.ChainMap
from collections import ChainMap

dict1 = {'a': 1, 'b': 2}
dict2 = {'c': 3, 'd': 4}
chain_map = ChainMap(dict1, dict2)
print(chain_map, type(chain_map))
print(chain_map.get('a'))
print(chain_map.maps)  # mapping list
dict2['e'] = 5  # 更新dict2
print(chain_map.maps)
print(chain_map.parents)  # 生成一个除第一个dict的其他所有dict的新ChainMap
print(chain_map.new_child())  # 生成一个包含之前的ChainMap的新ChainMap,默认第一个dict为{}
dict3 = {'f': 6}
print(chain_map.new_child(dict3))
예제 #57
0
파일: scm.py 프로젝트: mellanox-sonar/talks
 def __call__(self, *params):
     args = {name: val for name, val in zip(self.args, params)}
     env = ChainMap(args, self.env)
     return evaluate(self.body, env)
예제 #58
0
파일: scm.py 프로젝트: mellanox-sonar/talks
        body = lispify(self.body)
        return f'(lambda ({args}) {body})'


def begin(*args):
    if args:
        return args[-1]


builtin = ChainMap({
    '+': operator.add,
    '-': operator.sub,
    '*': operator.mul,
    '/': operator.truediv,
    '>': operator.gt,
    '<': operator.lt,
    '>=': operator.ge,
    '<=': operator.le,
    '=': operator.eq,
    '%': operator.mod,
    'begin': begin,
})


def evaluate(expr, env):
    # a
    if isinstance(expr, str):  # variable
        return env[expr]

    # 2.3
    if not isinstance(expr, list):  # constant literal
예제 #59
0
    def evaluate(
        self,
        model,
        distributed=False,
        half=False,
        trt_file=None,
        decoder=None,
        test_size=None,
    ):
        """
        VOC average precision (AP) Evaluation. Iterate inference on the test dataset
        and the results are evaluated by COCO API.

        NOTE: This function will change training mode to False, please save states if needed.

        Args:
            model : model to evaluate.

        Returns:
            ap50_95 (float) : COCO style AP of IoU=50:95
            ap50 (float) : VOC 2007 metric AP of IoU=50
            summary (sr): summary info of evaluation.
        """
        # TODO half to amp_test
        tensor_type = torch.cuda.HalfTensor if half else torch.cuda.FloatTensor
        model = model.eval()
        if half:
            model = model.half()
        ids = []
        data_list = {}
        progress_bar = tqdm if is_main_process() else iter

        inference_time = 0
        nms_time = 0
        n_samples = len(self.dataloader) - 1

        if trt_file is not None:
            from torch2trt import TRTModule

            model_trt = TRTModule()
            model_trt.load_state_dict(torch.load(trt_file))

            x = torch.ones(1, 3, test_size[0], test_size[1]).cuda()
            model(x)
            model = model_trt

        for cur_iter, (imgs, _, info_imgs, ids) in enumerate(
            progress_bar(self.dataloader)
        ):
            with torch.no_grad():
                imgs = imgs.type(tensor_type)

                # skip the the last iters since batchsize might be not enough for batch inference
                is_time_record = cur_iter < len(self.dataloader) - 1
                if is_time_record:
                    start = time.time()

                outputs = model(imgs)
                if decoder is not None:
                    outputs = decoder(outputs, dtype=outputs.type())

                if is_time_record:
                    infer_end = time_synchronized()
                    inference_time += infer_end - start

                outputs = postprocess(
                    outputs, self.num_classes, self.confthre, self.nmsthre
                )
                if is_time_record:
                    nms_end = time_synchronized()
                    nms_time += nms_end - infer_end

            data_list.update(self.convert_to_voc_format(outputs, info_imgs, ids))

        statistics = torch.cuda.FloatTensor([inference_time, nms_time, n_samples])
        if distributed:
            data_list = gather(data_list, dst=0)
            data_list = ChainMap(*data_list)
            torch.distributed.reduce(statistics, dst=0)

        eval_results = self.evaluate_prediction(data_list, statistics)
        synchronize()
        return eval_results
예제 #60
0
            full_pos_dist[k] = pd.DataFrame(full_pos_dist[k])

    if debug_mode:
        import pdb
        pdb.set_trace()
        x = 1

    return full_pos_dist


all_distances, all_magnetometer = multiple_floors_train_predict(
    config, df, models_group_name, mode, holdout_df, test_floors,
    overwrite_models, test_type_mapping, only_public_test_preds,
    test_waypoint_times, debug_fn)
if debug_fn is None:
    all_distances = dict(ChainMap(*[o for o in all_distances if o]))

    record_time = str(datetime.datetime.now())[:19]
    Path(preds_folder).mkdir(parents=True, exist_ok=True)
    file_ext = models_group_name + ' - ' + mode + save_ext + (
        ' - full distances - ') + record_time + '.pickle'
    full_predictions_path = preds_folder / file_ext
    with open(full_predictions_path, 'wb') as handle:
        pickle.dump(all_distances, handle, protocol=pickle.HIGHEST_PROTOCOL)

    if mode == 'valid' and store_valid_magn_z:
        combined_magnetometer = pd.concat(all_magnetometer)
        combined_magnetometer.sort_values(
            ['site', 'floor', 'fn', 'center_sensor_time'], inplace=True)
        magn_ext = models_group_name + save_ext + (
            ' - magnetometer - ') + record_time + '.csv'