예제 #1
0
# liste1=[1,2,3,4,5,1,2,3,4,5,6,7,8,1,2,3,2,1]
# liste_frekvens=Counter(liste1)
# print(liste_frekvens[1])
# print(list(liste_frekvens.elements()))
# print(list(liste_frekvens.most_common()))
# trekk_fra={1:2,2:2}
# liste_frekvens.subtract(trekk_fra)
# print(liste_frekvens)

liste2 = ['a', 'b', 'c', 'a', 'b', 'a', 'c', 'a']
deq = deque(liste2)
print(deq)
deq.append('d')
deq.appendleft('e')
print(deq)
# deq.pop()
# deq.clear()
# print(deq.count('a'))

dict1 = {'a': 1, 'b': 2, 'c': 3}
dict2 = {'d': 4, 'e': 5, 'f': 6}

chain_map = ChainMap(dict1, dict2)
print(chain_map.maps)

student = namedtuple('student', 'fornavn, etternavn, alder')
s1 = student('Magnus', 'Warland', '20')
s2 = student('Theo', 'Warland', '8')
print(s2)
예제 #2
0
list(itertools.islice(dq, 3, 9))

dq2 = deque([], maxlen=3)
for i in range(6):
    dq2.append(i)
    print(dq2)

# ChainMap
from collections import ChainMap
defaults = {
    'theme': 'Default',
    'language': 'eng',
    'showIndex': True,
    'showFooter': True
}
cm = ChainMap(defaults)
cm2 = cm.new_child({'theme': 'bluesky'})
cm
cm2
cm2['theme']
cm2.pop('theme')
cm2['theme']

cm2.maps[0] = {'theme': 'desert', 'showIndex': False}
cm2
cm2['showIndex']

# named tuples
from collections import defaultdict
dd = defaultdict(int)
words = str.split('red blue green red yellow blue red green green red')
예제 #3
0
from collections import ChainMap

d1 = {"a": "aaa", "b": "bbbb", "f": "v1111"}
d2 = {"c": "ccc", "d": "dddd", "f": "v2222"}

for key, value in d1.items():
    print(key, value)

for key, value in d2.items():
    print(key, value)

print("************")
# d2的新值不会覆盖旧值
d3 = ChainMap(d1, d2)
for key, value in d3.items():
    print(key, value)

print(d3.maps)
print(d3.maps[0]["a"])
    def delete(self, endpoint, session=None):
        headers = Api.base_headers
        if session:
            headers = ChainMap(session.auth, Api.base_headers)

        requests.delete(self.baseurl + endpoint, headers=headers)
예제 #5
0
def config():
    check_sha = "0694156a5f80fd1647ed997470d3d03ed87f9d9f"
    return ChainMap(DEFAULT_CONFIG).new_child({"check_sha": check_sha})
예제 #6
0
 def proxy_dict(self,orig_dict):
     self._proxy_dict = ChainMap(*orig_dict)
예제 #7
0
 def __init__(self, data: Optional[dict] = None):
     if data is None:
         data = {}
     self.core = ChainMap(data)
예제 #8
0
def scan_and_fly(xstart, xstop, xnum, ystart, ystop, ynum, dwell, *,
                 delta=None, shutter=True,
                 xmotor=hf_stage.x, ymotor=hf_stage.y,
                 xs=xs, ion=sclr1, align=False,
                 flying_zebra=flying_zebra, md=None):
    """

    Read IO from SIS3820.
    Zebra buffers x(t) points as a flyer.
    Xpress3 is our detector.
    The aerotech has the x and y positioners.
    delta should be chosen so that it takes about 0.5 sec to reach the gate??
    ymotor  slow axis
    xmotor  fast axis
    """
    c2pitch_kill = EpicsSignal("XF:05IDA-OP:1{Mono:HDCM-Ax:P2}Cmd:Kill-Cmd")
    if md is None:
        md = {}

    # If delta is None, set delta based on time for acceleration
    if delta is None:
        # delta = 0.002  # old default value
        v = (xstop - xstart) / (xnum-1) / dwell  # compute "stage speed"
        t_acc = 1.0  # acceleration time, default 1.0 s
        delta = t_acc * v  # distance the stage will travel in t_acc
    
    yield from abs_set(ymotor, ystart, wait=True) # ready to move
    yield from abs_set(xmotor, xstart - delta, wait=True) # ready to move

    if shutter is True:
        yield from mv(shut_b, 'Open')

    if align:
        fly_ps = PeakStats(dcm.c2_pitch.name, i0.name)
        align_scan = scan([sclr1], dcm.c2_pitch, -19.320, -19.360, 41)
        align_scan = bp.subs_wrapper(align_scan, fly_ps)
        yield from align_scan
        yield from abs_set(dcm.c2_pitch, fly_ps.max[0], wait=True)
        #ttime.sleep(10)
        #yield from abs_set(c2pitch_kill, 1)

    md = ChainMap(md, {
        'plan_name': 'scan_and_fly',
        'detectors': [zebra.name, xs.name, ion.name],
        'dwell': dwell,
        'shape': (xnum, ynum),
        'scaninfo': {'type': 'XRF_fly',
                     'raster': False,
                     'fast_axis': flying_zebra._fast_axis},
        # 'scaninfo': {'type': 'E_tomo',
        #              'raster': False,
        #              'fast_axis': flying_zebra._fast_axis},
        'scan_params': [xstart, xstop, xnum, ystart, ystop, ynum, dwell],
        'scan_input': [xstart, xstop, xnum, ystart, ystop, ynum, dwell],
        'delta': delta
        }
    )

    if (xs.name == 'xs2'):
        md['scaninfo']['type'] = 'XRF_E_tomo_fly'

    @stage_decorator([xs])
    def fly_each_step(detectors, motor, step, firststep):
        "See http://nsls-ii.github.io/bluesky/plans.html#the-per-step-hook"
        # First, let 'scan' handle the normal y step, including a checkpoint.
        yield from one_1d_step(detectors, motor, step)

        # Now do the x steps.
        v = (xstop - xstart) / (xnum-1) / dwell  # compute "stage speed"
        yield from abs_set(xmotor, xstart - delta, wait=True) # ready to move
        yield from abs_set(xmotor.velocity, v, wait=True)  # set the "stage speed"

        yield from abs_set(xs.hdf5.num_capture, xnum, wait=True)
        yield from abs_set(xs.settings.num_images, xnum, wait=True)
        yield from abs_set(ion.nuse_all,xnum)
        # arm the Zebra (start caching x positions)


        yield from kickoff(flying_zebra, xstart=xstart, xstop=xstop, xnum=xnum, dwell=dwell, wait=True)
        yield from abs_set(ion.erase_start, 1) # arm SIS3820, note that there is a 1 sec delay in setting X into motion
                                               # so the first point *in each row* won't normalize...
        yield from bps.trigger(xs, group='row')
        #if firststep == True:
        #    ttime.sleep(0.)
        yield from bps.sleep(1.5)
        yield from abs_set(xmotor, xstop+1*delta, group='row')  # move in x
        yield from bps.wait(group='row')
        # yield from abs_set(xs.settings.acquire, 0)  # stop acquiring images
        yield from abs_set(ion.stop_all, 1)  # stop acquiring scaler
        yield from complete(flying_zebra)  # tell the Zebra we are done
        yield from collect(flying_zebra)  # extract data from Zebra
        if ('e_tomo' in xmotor.name):
            v_return = 4
            v_max = xmotor.velocity.high_limit
            if (v_return > v_max):
                xmotor.velocity.set(v_max)
            else:
                xmotor.velocity.set(v_return)
        else:
            yield from abs_set(xmotor.velocity, 1.0, wait=True)  # set the "stage speed"

    def at_scan(name, doc):
        scanrecord.current_scan.put(doc['uid'][:6])
        scanrecord.current_scan_id.put(str(doc['scan_id']))
        scanrecord.current_type.put(md['scaninfo']['type'])
        scanrecord.scanning.put(True)
        scanrecord.time_remaining.put((dwell*xnum + 3.8)/3600)

    def finalize_scan(name, doc):
        logscan_detailed('xrf_fly')
        scanrecord.scanning.put(False)
        scanrecord.time_remaining.put(0)

    #@subs_decorator([LiveTable([ymotor]), RowBasedLiveGrid((ynum, xnum), ion.name, row_key=ymotor.name), LiveZebraPlot()])
    #@subs_decorator([LiveTable([ymotor]), LiveGrid((ynum, xnum), sclr1.mca1.name)])
    @subs_decorator([LiveGrid((ynum, xnum+1),
                              xs.channel1.rois.roi01.value.name,
                              extent=(xstart, xstop, ystop, ystart))])
    @subs_decorator({'start': at_scan})
    @subs_decorator({'stop': finalize_scan})
    # monitor values from xs
    @monitor_during_decorator([xs.channel1.rois.roi01.value])
    #@monitor_during_decorator([xs], run=False)  # monitor values from xs
    @stage_decorator([flying_zebra])  # Below, 'scan' stage ymotor.
    @run_decorator(md=md)
    def plan():
        yield from bps.mov(xs.total_points, xnum)
        # added to "prime" the detector
        #yield from abs_set(xs.settings.trigger_mode, 'TTL Veto Only')

        yield from bps.mov(xs.external_trig, True)
        ystep = 0

        for step in np.linspace(ystart, ystop, ynum):
            yield from abs_set(scanrecord.time_remaining,
                               (ynum - ystep) * ( dwell * xnum + 3.8 ) / 3600.)
            ystep = ystep + 1
            # 'arm' the xs for outputting fly data
            yield from bps.mov(xs.fly_next, True)
#            print('h5 armed\t',time.time())
            if step == ystart:
                firststep = True
            else:
                firststep = False
            yield from fly_each_step([], ymotor, step, firststep)
#            print('return from step\t',time.time())
        yield from bps.mov(xs.external_trig, False,
                          ion.count_mode, 1)
        if shutter is True:
            yield from mv(shut_b, 'Close')

    return (yield from plan())
예제 #9
0
    return {'train': acc_train_df, 'valid': valid_df, 'words': this_class}


recompute = False

print("setup train-val split ")

word_list = {}

if recompute:
    grouped_value_dict = result.groupby([
        'label'
    ]).progress_apply(lambda x: iterate_by_label(x.name, x, count)).values
    train_df = pd.concat([g['train'] for g in grouped_value_dict])
    valid_df = pd.concat([g['valid'] for g in grouped_value_dict])
    word_list = dict(ChainMap(*[g['words'] for g in grouped_value_dict]))
    train_df.to_pickle(save_dir + "/all_train_df_1000_txfr.pkl")
    valid_df.to_pickle(save_dir + "/all_valid_df_1000_txfr.pkl")
    with open(save_dir + '/word_list.pkl', 'wb') as f:
        pickle.dump(word_list, f)
else:
    train_df = pd.read_pickle(save_dir + "/all_train_df_1000_txfr.pkl")
    valid_df = pd.read_pickle(save_dir + "/all_valid_df_1000_txfr.pkl")
    with open(save_dir + '/word_list.pkl', 'rb') as f:
        word_list = pickle.load(f)

# ++++++++++++ Examine the size of valid_df, train_df and potential batches

print("valid_df = {}, train_df = {} ".format(len(valid_df['text']),
                                             len(train_df['text'])))
예제 #10
0
keywords = "laranjal boulos -fake"
date = '2020-11-01'
until = '2020-11-28'


for tweet in tweepy.Cursor(api.search,
                           q = keywords,
                           since = date,
                           until = until,
                           #lang = "pt",
                           tweet_mode='extended').items(300000):

    # Write a row to the CSV file. I use encode UTF-8
    #csvWriter.writerow([tweet.created_at,tweet.user.screen_name,tweet.user.location, tweet.full_text.encode('utf-8')])
    entities = tweet.entities.get('user_mentions')
    entities = dict(ChainMap(*entities))

    person_dict = {'User': tweet.user.screen_name,
    #'Location': tweet.user.location,
    'Tweeter' : tweet.full_text.encode('ascii',errors='ignore'),
    'Mentions': entities.get('screen_name')
    }
    person_dict2 = {
    'Mentions': entities.get('screen_name'),
    'User': tweet.user.screen_name
    }
    
    csvWriter.writerow(person_dict)
    csvWriter2.writerow(person_dict2)

예제 #11
0
 def __init__(self, body: [ExprStmt]):
     BlockStmt.__init__(self, body)
     from collections import ChainMap
     self.types = ChainMap()
예제 #12
0
파일: bench.py 프로젝트: seungweonpark/taf
 def load(self, config):
     if not set(config).issubset(self._ATTRIBUTES):
         raise AttributeError
     self._config = ChainMap(config, self._ATTRIBUTES)
예제 #13
0
 def _fd(self):
     return dict(ChainMap(*[getattr(i, '_fd', {})
                            for i in self._args_diff]))
예제 #14
0
def gather_validators(type_: 'ModelOrDc') -> Dict[str, classmethod]:
    all_attributes = ChainMap(*[cls.__dict__ for cls in type_.__mro__])
    return {k: v for k, v in all_attributes.items() if hasattr(v, '__validator_config')}
예제 #15
0
 def run_batch_from_func_name(self, func_name) -> Dict:
     results = []
     for fx_name, hook_result in self._internals.items():
         func = getattr(hook_result, func_name)
         results.append(func(include_forked_originals=False))
     return dict(ChainMap(*sum(results, [])))
예제 #16
0
print(od)

# ChainMap
# 本身也是一个dict,但是查找的时候,
# 会按照顺序在内部的dict依次查找。

from collections import ChainMap
import os, argparse

# structure parameter
defaults = {'color': 'red', 'user': '******'}

# structure cmd parameter
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--user')
parser.add_argument('-c', '--color')
namespace = parser.parse_args()
command_line_args = {k: v for k, v in vars(namespace).items() if v}

# combine ChainMap
combined = ChainMap(command_line_args, os.environ, defaults)
print('color = %s' % combined['color'])
print('user = %s' % combined['user'])

# Counter
from collections import Counter
c = Counter()
for ch in 'programming':
    c[ch] = c[ch] + 1
print(c)
예제 #17
0
# example.py
#
# Example of combining dicts into a chainmap

a = {'x': 1, 'z': 3 }
b = {'y': 2, 'z': 4 }

# (a) Simple example of combining
from collections import ChainMap
c = ChainMap(a,b) #order of passed dicts matters!!!


print(c['x'])      # Outputs 1  (from a)
print(c['y'])      # Outputs 2  (from b)
print(c['z'])      # Outputs 3  (from a)

# Output some common values
print('len(c):', len(c))
print('c.keys():', list(c.keys()))
print('c.values():', list(c.values()))

# Modify some values
c['z'] = 10 # only value under key 'z' in a will be affected
c['w'] = 40
del c['x']
print("a:", a)


# Example of stacking mappings (like scopes)
values = ChainMap()
values['x'] = 1
예제 #18
0
파일: core.py 프로젝트: groutr/h5netcdf
    def __init__(self, path, mode="a", invalid_netcdf=None, phony_dims=None, **kwargs):
        if h5py.__version__ >= LooseVersion("3.0.0"):
            self.decode_vlen_strings = kwargs.pop("decode_vlen_strings", None)
        try:
            if isinstance(path, str):
                if path.startswith(("http://", "https://", "hdf5://")):
                    if no_h5pyd:
                        raise ImportError(
                            "No module named 'h5pyd'. h5pyd is required for "
                            "opening urls: {}".format(path)
                        )
                    try:
                        with h5pyd.File(path, "r") as f:  # noqa
                            pass
                        self._preexisting_file = True
                    except IOError:
                        self._preexisting_file = False
                    self._h5file = h5pyd.File(path, mode, **kwargs)
                else:
                    self._preexisting_file = os.path.exists(path)
                    self._h5file = h5py.File(path, mode, **kwargs)
            else:  # file-like object
                if h5py.__version__ < LooseVersion("2.9.0"):
                    raise TypeError(
                        "h5py version ({}) must be greater than 2.9.0 to load "
                        "file-like objects.".format(h5py.__version__)
                    )
                else:
                    self._preexisting_file = mode in {"r", "r+", "a"}
                    self._h5file = h5py.File(path, mode, **kwargs)
        except Exception:
            self._closed = True
            raise
        else:
            self._closed = False

        self._mode = mode
        self._root = self
        self._h5path = "/"
        self.invalid_netcdf = invalid_netcdf
        # If invalid_netcdf is None, we'll disable writing _NCProperties only
        # if we actually use invalid NetCDF features.
        self._write_ncproperties = invalid_netcdf is not True

        # phony dimension handling
        self._phony_dims_mode = phony_dims
        if phony_dims is not None:
            self._phony_dim_count = 0
            if phony_dims not in ["sort", "access"]:
                raise ValueError(
                    "unknown value %r for phony_dims\n"
                    "Use phony_dims=%r for sorted naming, "
                    "phony_dims=%r for per access naming."
                    % (phony_dims, "sort", "access")
                )

        # string decoding
        if h5py.__version__ >= LooseVersion("3.0.0"):
            if "legacy" in self._cls_name:
                if self.decode_vlen_strings is not None:
                    msg = (
                        "'decode_vlen_strings' keyword argument is not allowed in h5netcdf "
                        "legacy API."
                    )
                    raise TypeError(msg)
                self.decode_vlen_strings = True
            else:
                if self.decode_vlen_strings is None:
                    msg = (
                        "String decoding changed with h5py >= 3.0. "
                        "See https://docs.h5py.org/en/latest/strings.html for more details. "
                        "Currently backwards compatibility with h5py < 3.0 is kept by "
                        "decoding vlen strings per default. This will change in future "
                        "versions for consistency with h5py >= 3.0. To silence this "
                        "warning set kwarg ``decode_vlen_strings=False``. Setting "
                        "``decode_vlen_strings=True`` forces vlen string decoding."
                    )
                    warnings.warn(msg, FutureWarning, stacklevel=0)
                    self.decode_vlen_strings = True

        # These maps keep track of dimensions in terms of size (might be
        # unlimited), current size (identical to size for limited dimensions),
        # their position, and look-up for HDF5 datasets corresponding to a
        # dimension.
        self._dim_sizes = ChainMap()
        self._current_dim_sizes = ChainMap()
        self._dim_order = ChainMap()
        self._all_h5groups = ChainMap(self._h5group)
        super(File, self).__init__(self, self._h5path)
        # initialize all groups to detect/create phony dimensions
        # mimics netcdf-c style naming
        if phony_dims == "sort":
            self._determine_phony_dimensions()
예제 #19
0
def apply_blocks(molecule, blocks):
    """
    Generate a new :class:`~vermouth.molecule.Molecule` based on the residue
    names and other attributes of `molecule` from `blocks`.

    Parameters
    ----------
    molecule: vermouth.molecule.Molecule
        The molecule to process.
    blocks: dict[str, vermouth.molecule.Block]
        The blocks known.

    Returns
    -------
    vermouth.molecule.Molecule
        A new molecule with attributes from the old `molecule`, as well as all
        interactions described by `blocks`.
    """
    graph_out = Molecule(
        force_field=molecule.force_field,
        meta=molecule.meta.copy()
    )
    residue_graph = make_residue_graph(molecule)

    # nrexcl may not be defined, but if it is we probably want to keep it
    try:
        graph_out.nrexcl = molecule.nrexcl
    except AttributeError:
        graph_out.nrexcl = None

    old_to_new_idxs = {}
    at_idx = 0
    charge_group_offset = 0
    for res_idx in residue_graph:
        residue = residue_graph.nodes[res_idx]
        res_graph = residue['graph']
        resname = residue['resname']
        block = blocks[resname]
        atname_to_idx = {}

        if graph_out.nrexcl is None:
            if hasattr(block, 'nrexcl'):
                graph_out.nrexcl = block.nrexcl
        else:
            if (hasattr(block, 'nrexcl')
                    and block.nrexcl is not None
                    and block.nrexcl != graph_out.nrexcl):
                raise ValueError('Not all blocks share the same value for "nrexcl".')

        for block_idx in block:
            atname = block.nodes[block_idx]['atomname']
            atom = list(res_graph.find_atoms(atomname=atname))
            assert len(atom) == 1, (block.name, atname, atom)
            old_to_new_idxs[atom[0]] = at_idx
            atname_to_idx[atname] = at_idx
            attrs = molecule.nodes[atom[0]]
            graph_out.add_node(at_idx, **ChainMap(block.nodes[atname], attrs))
            graph_out.nodes[at_idx]['graph'] = molecule.subgraph(atom)
            graph_out.nodes[at_idx]['charge_group'] += charge_group_offset
            graph_out.nodes[at_idx]['resid'] = attrs['resid']
            at_idx += 1
        charge_group_offset = graph_out.nodes[at_idx - 1]['charge_group']
        for idx, jdx, data in block.edges(data=True):
            idx = atname_to_idx[idx]
            jdx = atname_to_idx[jdx]
            graph_out.add_edge(idx, jdx, **data)
        for inter_type, interactions in block.interactions.items():
            for interaction in interactions:
                atom_idxs = []
                for atom_name in interaction.atoms:
                    atom_index = graph_out.find_atoms(atomname=atom_name,
                                                      resname=residue['resname'],
                                                      resid=residue['resid'])
                    atom_index = list(atom_index)
                    if not atom_index:
                        msg = ('Could not find a atom named "{}" '
                               'with resname being "{}" '
                               'and resid being "{}".')
                        raise ValueError(msg.format(atom_name, residue['resname'], residue['resid']))
                    atom_idxs.extend(atom_index)
                interactions = interaction._replace(atoms=atom_idxs)
                graph_out.add_interaction(inter_type, *interactions)

    # This makes edges between residues. We need to do this, since they can't
    # come from the blocks and we need them to find the links locations.
    # TODO This should not be done here, but by do_mapping, which might *also*
    #      do it at the moment
    for res_idx, res_jdx in residue_graph.edges():
        for old_idx, old_jdx in product(residue_graph.nodes[res_idx]['graph'],
                                        residue_graph.nodes[res_jdx]['graph']):
            try:
                # Usually termini, PTMs, etc
                idx = old_to_new_idxs[old_idx]
                jdx = old_to_new_idxs[old_jdx]
            except KeyError:
                continue
            if molecule.has_edge(old_idx, old_jdx):
                graph_out.add_edge(idx, jdx)
    return graph_out
예제 #20
0
# example.py
#
# Example of combining dicts into a chainmap

a = {'x': 1, 'z': 3 }
b = {'y': 2, 'z': 4 }

# (a) Simple example of combining
from collections import ChainMap
c = ChainMap(a,b)
print(c['x'])      # Outputs 1  (from a)
print(c['y'])      # Outputs 2  (from b)
print(c['z'])      # Outputs 3  (from a)

# Output some common values
print('len(c):', len(c))
print('c.keys():', list(c.keys()))
print('c.values():', list(c.values()))

# Modify some values
c['z'] = 10
c['w'] = 40
del c['x']
print("a:", a)


# Example of stacking mappings (like scopes)
values = ChainMap()
values['x'] = 1

# Add a new mapping
예제 #21
0
from collections import ChainMap

from pyjexl.parser import (ArrayLiteral, BinaryExpression,
                           ConditionalExpression, Identifier, Literal,
                           ObjectLiteral, Transform, UnaryExpression,
                           FilterExpression)

from . import default_config, DefaultParser

_ops = ChainMap(default_config.binary_operators,
                default_config.unary_operators)


def test_literal():
    assert DefaultParser().parse('1') == Literal(1.0)


def test_binary_expression():
    assert DefaultParser().parse('1+2') == BinaryExpression(operator=_ops['+'],
                                                            left=Literal(1),
                                                            right=Literal(2))


def test_binary_expression_priority_right():
    assert DefaultParser().parse('2+3*4') == BinaryExpression(
        operator=_ops['+'],
        left=Literal(2),
        right=BinaryExpression(
            operator=_ops['*'],
            left=Literal(3),
            right=Literal(4),
예제 #22
0
파일: request.py 프로젝트: forkkit/snuba
 def body(self):
     return ChainMap(self.query.get_body(), *self.extensions.values())
예제 #23
0
 def bind(self, **kwargs):
     return self.__class__(self.logger, ChainMap(kwargs, self.extra))
예제 #24
0
d.update(d1)
d.update(d2)
d.update(d3)
print(d)
# {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6}

# But in a way this is wasteful because we had to copy the data into a new dictionary.
# Instead we can use ChainMap:

from collections import ChainMap

d1 = {'a': 1, 'b': 2}
d2 = {'c': 3, 'd': 4}
d3 = {'e': 5, 'f': 6}

d = ChainMap(d1, d2, d3)
print(d)
# ChainMap({'a': 1, 'b': 2}, {'c': 3, 'd': 4}, {'e': 5, 'f': 6})

print(isinstance(d, dict))
# False

# So, the result is not a dictionary, but it is a mapping type that we can use almost like a dictionary:

d['a']
# 1

d['c']
# 3

for k, v in d.items():
예제 #25
0
 def combine(self):
     self._comb = ChainMap(self._st,self._cap)
     print(self._comb)
     return  self._comb
예제 #26
0
import bson
from collections import ChainMap
import bson.json_util as u
import json

person = ChainMap({"name": "foo"}, ChainMap({"age": 20}, {"_id": bson.ObjectId()}))

try:
    print(json.dumps(person))
except Exception as e:
    print("hmm", e)

print("----------------------------------------")

print(json.dumps(person, indent=2, ensure_ascii=False, default=str))
print("----------------------------------------")


def default(d):
    if hasattr(d, "keys"):
        return dict(d)
    else:
        return str(d)


print(json.dumps(person, indent=2, ensure_ascii=False, default=default))

print("----------------------------------------")
print(u.dumps(person, indent=2, ensure_ascii=False))
예제 #27
0
number = int(input('Количество предприятий для анализа: '))

prod_list = []
profit_q1_list = []
profit_q2_list = []
profit_q3_list = []
profit_q4_list = []

for i in range(1, number + 1):
    prod_list.append(input(f'Название предприятия {i}: '))
    profit_q1_list.append(int(input(f'Доход за 1 квартал на предприятии {i}: ')))
    profit_q2_list.append(int(input(f'Доход за 2 квартал на предприятии {i}: ')))
    profit_q3_list.append(int(input(f'Доход за 3 квартал на предприятии {i}: ')))
    profit_q4_list.append(int(input(f'Доход за 4 квартал на предприятии {i}: ')))

prod_map = ChainMap()
for i in range(number):
    prod_map = prod_map.new_child({'name': prod_list[i],
                                   'q1': profit_q1_list[i], 'q2': profit_q2_list[i],
                                   'q3': profit_q3_list[i], 'q4': profit_q4_list[i]})

print(prod_map)

"""
Как использовать ChainMap в дальнейших рассчетах, не придумал.
Она не позволяет обратиться к конкретному элементу конкретного словаря; а их количество мы не знаем.
"""

year_profits = []
for i in range(number):
    year_profit = profit_q1_list[i] + profit_q2_list[i] + profit_q3_list[i] + profit_q4_list[i]
예제 #28
0
def main(argv=None):
    """ Scan a binary file for certain open source libraries that may have CVEs """
    argv = argv or sys.argv

    # Reset logger level to info
    LOGGER.setLevel(logging.INFO)

    parser = argparse.ArgumentParser(
        prog="cve-bin-tool",
        description=textwrap.dedent("""
            The CVE Binary Tool scans for a number of common, vulnerable open source
            components (openssl, libpng, libxml2, expat and a few others) to let you know
            if a given directory or binary file includes common libraries with known
            vulnerabilities.
            """),
        epilog=textwrap.fill(
            f'Available checkers: {", ".join(VersionScanner.available_checkers())}'
        ) + "\n\nPlease disclose issues responsibly!",
        formatter_class=argparse.RawDescriptionHelpFormatter,
    )
    input_group = parser.add_argument_group("Input")
    input_group.add_argument("directory",
                             help="directory to scan",
                             nargs="?",
                             default=None)
    input_group.add_argument(
        "-e",
        "--exclude",
        action=StringToListAction,
        help="Comma separated Exclude directory path",
        default=None,
    )

    input_group.add_argument(
        "-i",
        "--input-file",
        action="store",
        default="",
        help="provide input filename",
    )
    input_group.add_argument("-C",
                             "--config",
                             action="store",
                             default="",
                             help="provide config file")

    output_group = parser.add_argument_group("Output")
    output_group.add_argument("-q",
                              "--quiet",
                              action="store_true",
                              help="suppress output")
    output_group.add_argument(
        "-l",
        "--log",
        help="log level (default: info)",
        dest="log_level",
        action="store",
        choices=["debug", "info", "warning", "error", "critical"],
    )
    output_group.add_argument(
        "-o",
        "--output-file",
        action="store",
        help="provide output filename (default: output to stdout)",
    )
    output_group.add_argument(
        "--html-theme",
        action="store",
        help="provide custom theme directory for HTML Report",
    )
    output_group.add_argument(
        "-f",
        "--format",
        action="store",
        choices=["csv", "json", "console", "html"],
        help="update output format (default: console)",
    )
    output_group.add_argument(
        "-c",
        "--cvss",
        action="store",
        help=
        "minimum CVSS score (as integer in range 0 to 10) to report (default: 0)",
    )
    output_group.add_argument(
        "-S",
        "--severity",
        action="store",
        choices=["low", "medium", "high", "critical"],
        help="minimum CVE severity to report (default: low)",
    )
    parser.add_argument("-V", "--version", action="version", version=VERSION)
    parser.add_argument(
        "-u",
        "--update",
        action="store",
        choices=["now", "daily", "never", "latest"],
        help="update schedule for NVD database (default: daily)",
    )
    parser.add_argument(
        "-x",
        "--extract",
        action="store_true",
        help="autoextract compressed files",
    )
    parser.add_argument(
        "--disable-version-check",
        action="store_true",
        help="skips checking for a new version",
    )

    checker_group = parser.add_argument_group("Checkers")
    checker_group.add_argument(
        "-s",
        "--skips",
        dest="skips",
        action=StringToListAction,
        type=str,
        help="comma-separated list of checkers to disable",
    )
    checker_group.add_argument(
        "-r",
        "--runs",
        dest="runs",
        action=StringToListAction,
        type=str,
        help="comma-separated list of checkers to enable",
    )
    defaults = {
        "directory": "",
        "exclude": [],
        "input_file": "",
        "log_level": "info",
        "format": "console",
        "cvss": 0,
        "severity": "low",
        "update": "daily",
        "extract": True,
        "disable_version_check": False,
        "skips": "",
        "runs": "",
        "quiet": False,
        "output_file": "",
        "html_theme": "",
    }

    with ErrorHandler(mode=ErrorMode.NoTrace):
        raw_args = parser.parse_args(argv[1:])
        args = {key: value for key, value in vars(raw_args).items() if value}

    configs = {}
    if args.get("config"):
        conf = ConfigParser(args["config"])
        configs = conf.parse_config()

    args = ChainMap(args, configs, defaults)

    # logging and error related settings
    if args["log_level"]:
        LOGGER.setLevel(args["log_level"].upper())

    if args["quiet"]:
        LOGGER.setLevel(logging.CRITICAL)

    if 0 < LOGGER.level <= 10:
        error_mode = ErrorMode.FullTrace
    elif LOGGER.level >= 50:
        error_mode = ErrorMode.NoTrace
    else:
        error_mode = ErrorMode.TruncTrace

    if platform.system() != "Linux":
        warning_nolinux = """
                          **********************************************
                          Warning: this utility was developed for Linux.
                          You may need to install additional utilities
                          to use it on other operating systems.
                          **********************************************
                          """
        LOGGER.warning(warning_nolinux)

    # Database update related settings
    # Connect to the database
    cvedb_orig = CVEDB(version_check=not args["disable_version_check"],
                       error_mode=error_mode)

    # if OLD_CACHE_DIR (from cvedb.py) exists, print warning
    if os.path.exists(OLD_CACHE_DIR):
        LOGGER.warning(
            f"Obsolete cache dir {OLD_CACHE_DIR} is no longer needed and can be removed."
        )

    # Clear data if -u now is set
    if args["update"] == "now":
        cvedb_orig.clear_cached_data()

    if args["update"] == "latest":
        cvedb_orig.refresh_cache_and_update_db()

    # update db if needed
    if args["update"] != "never":
        cvedb_orig.get_cvelist_if_stale()
    else:
        LOGGER.warning("Not verifying CVE DB cache")
        if not cvedb_orig.nvd_years():
            with ErrorHandler(mode=error_mode, logger=LOGGER):
                raise EmptyCache(cvedb_orig.cachedir)

    # CVE Database validation
    if not cvedb_orig.check_cve_entries():
        with ErrorHandler(mode=error_mode, logger=LOGGER):
            raise CVEDataMissing("No data in CVE Database")

    # Input validation
    if not args["directory"] and not args["input_file"]:
        parser.print_usage()
        with ErrorHandler(logger=LOGGER, mode=ErrorMode.NoTrace):
            raise InsufficientArgs(
                "Please specify a directory to scan or an input file required")

    if args["directory"] and not os.path.exists(args["directory"]):
        parser.print_usage()
        with ErrorHandler(logger=LOGGER, mode=ErrorMode.NoTrace):
            raise FileNotFoundError("Directory/File doesn't exist")

    # Checkers related settings
    skips = args["skips"]
    if args["runs"]:
        runs = args["runs"]
        skips = list(
            map(
                lambda checker: checker.name,
                filter(
                    lambda checker: checker.name not in runs,
                    pkg_resources.iter_entry_points("cve_bin_tool.checker"),
                ),
            ))

    # CSVScanner related settings
    score = 0
    if args["severity"]:
        # Set minimum CVSS score based on severity
        cvss_score = {"low": 0, "medium": 4, "high": 7, "critical": 9}
        score = cvss_score[args["severity"]]
    if int(args["cvss"]) > 0:
        score = int(args["cvss"])

    with CVEScanner(score=score) as cve_scanner:
        triage_data: TriageData
        total_files: int = 0
        parsed_data: Dict[ProductInfo, TriageData] = {}

        if args["input_file"]:
            input_engine = InputEngine(args["input_file"],
                                       logger=LOGGER,
                                       error_mode=error_mode)
            parsed_data = input_engine.parse_input()
            if not args["directory"]:
                for product_info, triage_data in parsed_data.items():
                    LOGGER.warning(f"{product_info}, {triage_data}")
                    cve_scanner.get_cves(product_info, triage_data)
        if args["directory"]:
            version_scanner = VersionScanner(
                should_extract=args["extract"],
                exclude_folders=args["exclude"],
                error_mode=error_mode,
            )
            version_scanner.remove_skiplist(skips)
            version_scanner.print_checkers()
            for scan_info in version_scanner.recursive_scan(args["directory"]):
                if scan_info:
                    product_info, path = scan_info
                    LOGGER.debug(f"{product_info}: {path}")
                    triage_data = parsed_data.get(product_info,
                                                  {"default": {}})
                    # Ignore paths from triage_data if we are scanning directory
                    triage_data["paths"] = {path}
                    cve_scanner.get_cves(product_info, triage_data)
            total_files = version_scanner.total_scanned_files

        LOGGER.info("")
        LOGGER.info("Overall CVE summary: ")
        LOGGER.info(
            f"There are {cve_scanner.products_with_cve} products with known CVEs detected"
        )
        if cve_scanner.products_with_cve > 0:
            affected_string = ", ".join(
                map(
                    lambda product_version: "".join(str(product_version)),
                    cve_scanner.affected(),
                ))
            LOGGER.info(f"Known CVEs in {affected_string}:")

            # Creates a Object for OutputEngine
            output = OutputEngine(
                all_cve_data=cve_scanner.all_cve_data,
                scanned_dir=args["directory"],
                filename=args["output_file"],
                themes_dir=args["html_theme"],
                products_with_cve=cve_scanner.products_with_cve,
                products_without_cve=cve_scanner.products_without_cve,
                total_files=total_files,
            )

            if not args["quiet"]:
                output.output_file(args["format"])

        # Use the number of products with known cves as error code
        # as requested by folk planning to automate use of this script.
        # If no cves found, then the program exits cleanly.
        return cve_scanner.products_with_cve
예제 #29
0
print(dl)

# ChainMap():
from collections import ChainMap

## Definimos algunos diccionarios contienen autos con sus respectivas marcas, modelos y años:
print('\n## Definimos algunos diccionarios contienen autos con sus respectivas marcas, modelos y años:')

auto1 = {"marca1": "Ford", "modelo1": "Mustang", "año1": 1964}

auto2 = {"marca2": "Bentley", "modelo2": "Continental GT", "año2": 2003}

auto3 = {"marca3": "Ferrari 458", "modelo3": "458", "año3": 2009}

## encapsulamos todos los diccionarios en una sola unidad:
bd_autos = ChainMap(auto1, auto2, auto3)

print('\n## encapsulamos todos los diccionarios en una sola unidad:')
print(bd_autos)

## Acceder a claves y valores desde ChainMap:

### Acceso a valores usando el nombre de la clave:
print('\n### Acceso a valores usando el nombre de la clave:')
print(bd_autos['marca1'])

### Accediendo a los valores usando values():
print('\n### Accediendo a los valores usando values():')
print(bd_autos.values())

### Accediendo a llaves usando keys():
예제 #30
0
# Например, пользователь ввёл A2 и C4F. Нужно сохранить их как [‘A’, ‘2’] и [‘C’, ‘4’, ‘F’] соответственно.
# Сумма чисел из примера: [‘C’, ‘F’, ‘1’], произведение - [‘7’, ‘C’, ‘9’, ‘F’, ‘E’].

import sys
from collections import ChainMap
from collections import deque

dic1 = {}
dic2 = {}
dic3 = {}
for k, v in enumerate([str(i)
                       for i in range(10)] + ['A', 'B', 'C', 'D', 'E', 'F']):
    dic1[k] = v
    dic2[v] = k
    dic3[str(k)] = v
dct = ChainMap(dic1, dic2, dic3)

fir = input('Первое число в 16-ричной системе: ')
sec = input('Второе число в 16-ричной системе: ')

# проверка корректности введенного числа
z = 0
for i in fir.upper():
    if i not in dct:
        z += 1
for j in sec.upper():
    if j not in dct:
        z += 2
if z != 0:
    if z == 1:
        print('Первое число введено не корректно.')