Exemple #1
0
    def _get_raw_data(self):
        """
        Parses log file
        :return: tuple(
                       [ipaddress, lease end time, ...],
                       time to parse leases file
                      )
        """
        try:
            with open(self.leases_path, 'rt') as dhcp_leases:
                time_start = time()
                part1 = filterfalse(find_lease, dhcp_leases)
                part2 = filterfalse(find_ends, dhcp_leases)
                raw_result = dict(zip(part1, part2))
                time_end = time()

                file_parse_time = round((time_end - time_start) * 1000)

        except Exception as e:
            self.error("Failed to parse leases file:", str(e))
            return None

        else:
            result = (raw_result, file_parse_time)
            return result
Exemple #2
0
def getPitches(pitchNames=('C', 'E', 'G'), bassPitch='C3', maxPitch='C8'):
    '''
    Given a list of pitchNames, a bassPitch, and a maxPitch, returns a sorted list of
    pitches between the two limits (inclusive) which correspond to items in pitchNames.

    >>> from music21.figuredBass import segment
    >>> from music21 import pitch

    >>> pitches = segment.getPitches()
    >>> print(', '.join([p.nameWithOctave for p in pitches]))
    C3, E3, G3, C4, E4, G4, C5, E5, G5, C6, E6, G6, C7, E7, G7, C8

    >>> pitches = segment.getPitches(['G', 'B', 'D', 'F'], bassPitch=pitch.Pitch('B2'))
    >>> print(', '.join([p.nameWithOctave for p in pitches]))
    B2, D3, F3, G3, B3, D4, F4, G4, B4, D5, F5, G5, B5, D6, F6, G6, B6, D7, F7, G7, B7

    >>> pitches = segment.getPitches(['F##', 'A#', 'C#'], bassPitch=pitch.Pitch('A#3'))
    >>> print(', '.join([p.nameWithOctave for p in pitches]))
    A#3, C#4, F##4, A#4, C#5, F##5, A#5, C#6, F##6, A#6, C#7, F##7, A#7
    '''
    if isinstance(bassPitch, str):
        bassPitch = pitch.Pitch(bassPitch)
    if isinstance(maxPitch, str):
        maxPitch = pitch.Pitch(maxPitch)

    iter1 = itertools.product(pitchNames, range(maxPitch.octave + 1))
    iter2 = map(lambda x: pitch.Pitch(x[0] + str(x[1])), iter1)
    iter3 = itertools.filterfalse(lambda samplePitch: bassPitch > samplePitch, iter2)
    iter4 = itertools.filterfalse(lambda samplePitch: samplePitch > maxPitch, iter3)
    allPitches = list(iter4)
    allPitches.sort()
    return allPitches
    def write(self, new_cache):
        # if new_cache is empty goa might not be running, avoid wiping contacts
        if not new_cache:
            return

        # update contacts
        new_diffs = list(itertools.filterfalse(lambda x: x in self.cache, new_cache))

        for old_item in self.cache:
            old_num = ''.join(re.findall(r'\d+', old_item['number']))

            for new_item in new_diffs:
                new_num = ''.join(re.findall(r'\d+', new_item['number']))

                if old_item['name'] in ('', new_item['name']) and old_num == new_num:
                    self.cache[self.cache.index(old_item)].update(new_item)
                    new_diffs.remove(new_item)

        # remove old folks
        old_diffs = list(itertools.filterfalse(lambda x: x in new_cache, self.cache))

        for old_item in old_diffs:
            if old_item['origin'] != 'kdeconnect':
                self.cache.remove(old_item);

        # add new folks
        for new_item in new_diffs:
            self.cache.append(new_item)

        with open(self.cache_path, 'w') as cache_file:
            json.dump(new_cache, cache_file)
Exemple #4
0
    def assertFiles(self, obj, field_path, fn, compression=None, filter=lambda _: False):  # pylint: disable=invalid-name
        """Compare output file of a processor to the given correct file.

        :param obj: Data object which includes file that we want to
            compare.
        :type obj: :obj:`resolwe.flow.models.Data`

        :param field_path: Path to file name in Data object.
        :type field_path: :obj:`str`

        :param fn: File name (and relative path) of file to which we
            want to compare. Name/path is relative to ``tests/files``
            folder of a Django application.
        :type fn: :obj:`str`

        :param compression: If not None, files will be uncompressed with
            the appropriate compression library before comparison.
            Currently supported compression formats are "gzip" and
            "zip".
        :type compression: :obj:`str`

        :param filter: Function for filtering the contents of output files. It
            is used in :obj:`itertools.filterfalse` function and takes one
            parameter, a line of the output file. If it returns `True`, the
            line is excluded from comparison of the two files.
        :type filter: :obj:`function`

        """
        open_kwargs = {}
        if compression is None:
            open_fn = open
            # by default, open() will open files as text and return str
            # objects, but we need bytes objects
            open_kwargs['mode'] = 'rb'
        elif compression == 'gzip':
            open_fn = gzip.open
        elif compression == 'zip':
            open_fn = zipfile.ZipFile.open
        else:
            raise ValueError("Unsupported compression format.")

        field = dict_dot(obj.output, field_path)
        output = os.path.join(settings.FLOW_EXECUTOR['DATA_PATH'], str(obj.pk), field['file'])
        with open_fn(output, **open_kwargs) as output_file:
            output_contents = b"".join([line for line in filterfalse(filter, output_file)])
        output_hash = hashlib.sha256(output_contents).hexdigest()

        wanted = os.path.join(self.files_path, fn)

        if not os.path.isfile(wanted):
            shutil.copyfile(output, wanted)
            self.fail(msg="Output file {} missing so it was created.".format(fn))

        with open_fn(wanted, **open_kwargs) as wanted_file:
            wanted_contents = b"".join([line for line in filterfalse(filter, wanted_file)])
        wanted_hash = hashlib.sha256(wanted_contents).hexdigest()
        self.assertEqual(wanted_hash, output_hash,
                         msg="File contents hash mismatch: {} != {}".format(
                             wanted_hash, output_hash) + self._debug_info(obj))
Exemple #5
0
  def _initial_update(self, files):
    oldfiles = {f[0] for f in self._db.execute('select filename from pkginfo where pkgrepo = ?', (self.name,))}
    oldfiles.update(f[0] for f in self._db.execute('select filename from sigfiles where pkgrepo = ?', (self.name,)))

    for f in sorted(filterfalse(filterPkg, files - oldfiles), key=pkgsortkey):
      self.dispatch(f, 'add')

    for f in sorted(filterfalse(filterPkg, oldfiles - files), key=pkgsortkey):
      self.dispatch(f, 'remove')
Exemple #6
0
    def check_files(self, **kwargs):
        """Check files listed with the 'localfiles' fields."""
        # Get configuration options
        ignore = kwargs.get('ignore', [])
        filters = kwargs.get('filter', [])

        # Sets for recording entries:
        # - ok: has 'localfile' field, file exists
        # - other: hardcopies or online articles
        # - missing: no 'localfile' field
        # - broken: 'localfile' field exists, but is wrong
        sets = {k: set() for k in ['ok', 'other', 'missing', 'broken']}

        # Get the set of files in the current directory
        r = re.compile('(' + ')|('.join(ignore) + ')')
        files = filterfalse(os.path.isdir, iglob('**', recursive=True))
        files = sorted(filterfalse(r.search, files))

        # Iterate through database entries
        for e in self.db.entries:
            if e.has_file:
                if e.file_exists:
                    sets['ok'].add(e['ID'])
                    for fn in e.file_rel_path():
                        try:
                            files.remove(fn)
                        except ValueError:
                            if os.path.exists(fn):
                                # File exists, but has perhaps been filtered or
                                # is outside the tree.
                                continue
                            else:
                                raise
                else:
                    sets['broken'] |= {(e['ID'], lf) for lf in e['localfile']}
            else:
                # Apply user filters
                done = False
                for f in filters:
                    if f['field'] in e and f['value'] in e[f['field']]:
                        sets[f['sort']].add(e['ID'])
                        done = True
                        break
                if not done:
                    sets['missing'].add(e['ID'])

        # Output
        output_format = kwargs.get('format', 'plain')
        if output_format == 'plain':
            output = self._check_files_plain
        elif output_format == 'csv':
            output = self._check_files_csv
        output(sorted(sets['ok']), sorted(sets['other']),
               sorted(sets['missing']), sorted(sets['broken']),
               files)
Exemple #7
0
    def getPitches(self, bassPitch, notationString=None, maxPitch=None):
        '''
        Takes in a bassPitch, a notationString, and a maxPitch representing the highest
        possible pitch that can be returned. Returns a sorted list of pitches which
        correspond to the pitches of each specific pitch name found through getPitchNames
        that fall between the bassPitch and the maxPitch, inclusive of both.

        if maxPitch is None, then B5 s used instead.

        >>> from music21.figuredBass import realizerScale
        >>> fbScale = realizerScale.FiguredBassScale()
        
        Root position triad

        >>> [str(p) for p in fbScale.getPitches('C3') ]
        ['C3', 'E3', 'G3', 'C4', 'E4', 'G4', 'C5', 'E5', 'G5']

        First inversion triad

        >>> [str(p) for p in fbScale.getPitches('D3', '6') ]
        ['D3', 'F3', 'B3', 'D4', 'F4', 'B4', 'D5', 'F5', 'B5']
        
        Root position seventh chord, showing MaxPitch
        
        >>> fbScale.getPitches(pitch.Pitch('G3'), '7', 'F4')
        [<music21.pitch.Pitch G3>, <music21.pitch.Pitch B3>, 
         <music21.pitch.Pitch D4>, <music21.pitch.Pitch F4>]
        '''
        if maxPitch is None:
            maxPitch = pitch.Pitch('B5')
        
        bassPitch = convertToPitch(bassPitch)
        maxPitch = convertToPitch(maxPitch)
        pitchNames = self.getPitchNames(bassPitch, notationString)
        iter1 = itertools.product(pitchNames, range(maxPitch.octave + 1))
        if six.PY3:
            iter2 = map( # pylint: disable=bad-builtin
                        lambda x: pitch.Pitch(x[0] + str(x[1])), iter1) 
            iter3 = itertools.filterfalse( # @UndefinedVariable
                                          lambda samplePitch: bassPitch > samplePitch, iter2) 
            iter4 = itertools.filterfalse( # @UndefinedVariable
                                          lambda samplePitch: samplePitch > maxPitch, iter3) 
        else:
            iter2 = itertools.imap( # @UndefinedVariable
                            lambda x: pitch.Pitch(x[0] + str(x[1])), iter1)  
            iter3 = itertools.ifilterfalse(  # @UndefinedVariable
                            lambda samplePitch: bassPitch > samplePitch, iter2) 
            iter4 = itertools.ifilterfalse(  # @UndefinedVariable
                            lambda samplePitch: samplePitch > maxPitch, iter3)  
        allPitches = list(iter4)
        allPitches.sort()
        return allPitches
def main():
    s = input()
    l = list(filterfalse(lambda x: not x.islower(), s))
    u = list(filterfalse(lambda x: not x.isupper(), s))
    d = list(filterfalse(lambda x: not x.isdigit(), s))
    o = list(filterfalse(lambda x: int(x) % 2 == 0, d))
    e = list(filterfalse(lambda x: int(x) % 2 != 0, d))

    l.sort()
    u.sort()
    o.sort()
    e.sort()

    print(*(l + u + o + e), sep="")
Exemple #9
0
    def read_data(self, filename):
        count = 0
        #fields = [['name', 'RA', 'type1','type3', 'mag1', 'mag3', 'T1', 'P_orb', 'P3', 'EB', 'Spectr2', 'M1_M2', 'Incl', 'M1', 'M2'],
                    #['Alt_name', 'DEC', 'type2','type4', 'mag2', 'mag4', 'T2', 'P2', 'P4', 'SB', 'Spectr1', 'e_M1_M2', 'e_Incl', 'e_M1', 'e_M2']]
        
        fields = ['name', 'alias', 'flag1', 'flag2', 'ra', 'dec', 
                  'type1', 'type2', 'type3', 'type4', 'mag1', 'mag2', 'mag3', 'mag4',
                  'T1', 'T2', 'P0', 'P2', 'P3', 'P4', 'EB', 'SB', 'spectr2', 'spectr1',
                  'q', 'q_E', 'Incl',  'Incl_E', 'M1', 'M1_E', 'M2', 'M2_E'] 
        
        data = []
        with open(filename,'r') as fp:
            
            datalines = itt.filterfalse( lambda s: s.startswith(('\n','-')), fp )
            header = [next(datalines), next(datalines)]#consume(datalines, 2)       #header
            
            splitter = lambda line: map( str.strip, line.strip('\n|\r').split('|') )
            for i, lpair in enumerate( grouper(datalines, 2) ):
                data.append( interleave( *map( splitter, lpair ) ) )
                #data.append( dat )

        #Interleaving data and fields (every 2 lines refer to a single catalogue object)
        #data = [interleave(*s) for s in zip(*data)]
        #fields = interleave(fields)

        print('Data for %i objects successfully read.\n\n' %(i+1))

        return np.array(fields), np.array(data).T
Exemple #10
0
    def update_candidates(self):
        pattern = ''
        sources = ''
        self.__selected_candidates = []
        self.__candidates = []
        for name, all, candidates in self.__denite.filter_candidates(
                self.__context):
            self.__candidates += candidates
            sources += '{}({}/{}) '.format(name, len(candidates), len(all))

            if pattern == '':
                matchers = self.__denite.get_source(name).matchers
                pattern = next(filterfalse(
                    lambda x: x == '',
                    [self.__denite.get_filter(x).convert_pattern(
                        self.__context['input']) for x in matchers
                     if self.__denite.get_filter(x)]), '')
        self.__matched_pattern = pattern
        self.__candidates_len = len(self.__candidates)
        if self.__context['reversed']:
            self.__candidates.reverse()

        if self.__denite.is_async():
            sources = '[async] ' + sources
        self.__statusline_sources = sources
Exemple #11
0
    def search_path(self, begword, endword):
        """Search shortest way from word to word
        (Поиск кратчайшего пути от узла begword до endword)

        graph.search_path(begword, endword) -> [str]
        if path is found raise KeyError
        exceptions:
            KeyError
        """
        if begword not in self:
            self.add_edge(begword)
        if endword not in self:
            self.add_edge(endword)
        if begword == endword:
            return [begword]

        tup = (begword,)
        queue, steps = deque((tup,)), set(tup)
        while queue:
            path = queue.popleft()            
            for link in filterfalse(steps.__contains__,  self[path[-1]]):
                new_path = path + (link,)
                if link == endword:
                    return new_path
                steps.add(link)
                queue.append(new_path)
        raise KeyError('Not Found: ' + endword)
 def test_itertools_filterfalse(self):
     """
     Tests whether itertools.filterfalse is available.
     """
     from itertools import filterfalse
     not_div_by_3 = filterfalse(lambda x: x % 3 == 0, range(8))
     self.assertEqual(list(not_div_by_3), [1, 2, 4, 5, 7])
    def add_batch(self, batch, state_hash=None, required=False):
        with self._condition:
            if self._final:
                raise SchedulerError("Scheduler is finalized. Cannot take"
                                     " new batches")
            preserve = required
            if not required:
                # If this is the first non-required batch, it is preserved for
                # the schedule to be completed (i.e. no empty schedules in the
                # event of unschedule_incomplete_batches being called before
                # the first batch is completed).
                preserve = _first(
                    filterfalse(lambda sb: sb.required,
                                self._batch_by_id.values())) is None

            batch_signature = batch.header_signature
            self._batch_by_id[batch_signature] = \
                _AnnotatedBatch(batch, required=required, preserve=preserve)

            if state_hash is not None:
                self._required_state_hashes[batch_signature] = state_hash
            batch_length = len(batch.transactions)
            for idx, txn in enumerate(batch.transactions):
                if idx == batch_length - 1:
                    self._last_in_batch.append(txn.header_signature)
                self._txn_to_batch[txn.header_signature] = batch_signature
                self._txn_queue.append(txn)
            self._condition.notify_all()
Exemple #14
0
def partition(pred, iterable):
    """
    Use a predicate to partition entries into false entries and true entries
    """
    # partition(is_odd, range(10)) --> 0 2 4 6 8   and  1 3 5 7 9
    t1, t2 = itertools.tee(iterable)
    return itertools.filterfalse(pred, t1), filter(pred, t2)
Exemple #15
0
def unique_everseen(iterable, key=None):
    """
    List unique elements, preserving order.

    Remember all elements ever seen.
    http://docs.python.org/library/itertools.html#recipes
    """
    # unique_everseen('AAAABBBCCDAABBB') --> A B C D
    # unique_everseen('ABBCcAD', str.lower) --> A B C D
    try:
        from itertools import ifilterfalse
    except:
        from itertools import filterfalse

    seen = set()
    seen_add = seen.add
    if key is None:
        try:
            for element in ifilterfalse(seen.__contains__, iterable):
                seen_add(element)
                yield element
        except:
            for element in filterfalse(seen.__contains__, iterable):
                seen_add(element)
                yield element
    else:
        for element in iterable:
            k = key(element)
            if k not in seen:
                seen_add(k)
                yield element
  def partition(cls, iterable, pred):
    """
    Use a predicate to partition items into false and true entries.
    """

    t1, t2 = itertools.tee(iterable)
    return cls(itertools.filterfalse(pred, t1), filter(pred, t2))
Exemple #17
0
    def _get_data(self):
        """
        Parse new log lines
        :return: dict
        """
        try:
            raw = self._get_raw_data()
            if raw is None:
                return None
            elif not raw:
                return self.data
        except (ValueError, AttributeError):
            return None

        # Fail2ban logs looks like
        # 2016-12-25 12:36:04,711 fail2ban.actions[2455]: WARNING [ssh] Ban 178.156.32.231
        data = dict(
            zip(
                self.jails_list,
                [len(list(filterfalse(lambda line: (jail + '] Ban') not in line, raw))) for jail in self.jails_list]
            ))

        for jail in data:
            self.data[jail] += data[jail]

        return self.data
Exemple #18
0
 def update(self):
     """Update the nello lock properties."""
     self._nello_lock.update()
     # Location identifiers
     location_id = self._nello_lock.location_id
     short_id = self._nello_lock.short_id
     address = self._nello_lock.address
     self._name = 'Nello {}'.format(short_id)
     self._device_attrs = {
         ATTR_ADDRESS: address,
         ATTR_LOCATION_ID: location_id
     }
     # Process recent activity
     activity = self._nello_lock.activity
     if self._activity:
         # Filter out old events
         new_activity = list(
             filterfalse(lambda x: x in self._activity, activity))
         if new_activity:
             for act in new_activity:
                 activity_type = act.get('type')
                 if activity_type == 'bell.ring.denied':
                     event_data = {
                         'address': address,
                         'date': act.get('date'),
                         'description': act.get('description'),
                         'location_id': location_id,
                         'short_id': short_id
                     }
                     self.hass.bus.fire(EVENT_DOOR_BELL, event_data)
     # Save the activity history so that we don't trigger an event twice
     self._activity = activity
Exemple #19
0
    def test_filterfalse_wrongargs(self):
        import itertools

        it = itertools.filterfalse(0, [1])
        raises(TypeError, next, it)

        raises(TypeError, itertools.filterfalse, bool, None)
Exemple #20
0
    def __eq__(self, r):
        res = True

        if self is r:
            return True
        elif not isinstance(r, self.__class__):
            res = NotImplemented
        else:
            keys_to_cmp = [
                           '_is_complete',
                           '_db_location',
                           '_file_exts',
                           '_folder_configs',
                           '_index_blacklist',
                           '_search_std_incl_folders',
                           '_std_incl_folders'
                          ]
            ldict = self.__dict__
            rdict = r.__dict__

            results = list(filterfalse(lambda k: ldict.get(k, None) == rdict.get(k, None),
                                              keys_to_cmp))

            # if results is empty, all keys evaluated to equal
            res = bool(not results)

            if DEBUG_INDEXERCONFIG and not res:
                for key in results:
                    print("%s failed: '%s' != '%s'" %
                                    (key, ldict.get(key, None), rdict.get(key, None)))

        return res
Exemple #21
0
    def compute_testing_installability(self):
        """Computes the installability of packages in testing

        This method computes the installability of all packages in
        testing and caches the result.  This has the advantage of
        making "is_installable" queries very fast for all packages
        in testing.
        """

        check_inst = self._check_inst
        cbroken = self._cache_broken
        cache_inst = self._cache_inst
        eqv_table = self._eqv_table
        testing = self._testing
        tcopy = [x for x in testing]
        for t in filterfalse(cache_inst.__contains__, tcopy):
            if t in cbroken:
                continue
            res = check_inst(t)
            if t in eqv_table:
                eqv = (x for x in eqv_table[t] if x in testing)
                if res:
                    cache_inst.update(eqv)
                else:
                    eqv_set = frozenset(eqv)
                    testing -= eqv_set
                    cbroken |= eqv_set
Exemple #22
0
    def compute_installability(self):
        """Computes the installability of all the packages in the suite

        This method computes the installability of all packages in
        the suite and caches the result.  This has the advantage of
        making "is_installable" queries very fast for all packages
        in the suite.
        """

        universe = self._universe
        check_inst = self._check_inst
        cbroken = self._cache_broken
        cache_inst = self._cache_inst
        suite_contents = self._suite_contents
        tcopy = [x for x in suite_contents]
        for t in filterfalse(cache_inst.__contains__, tcopy):
            if t in cbroken:
                continue
            res = check_inst(t)
            if t in universe.equivalent_packages:
                eqv = (x for x in universe.packages_equivalent_to(t) if x in suite_contents)
                if res:
                    cache_inst.update(eqv)
                else:
                    eqv_set = frozenset(eqv)
                    suite_contents -= eqv_set
                    cbroken |= eqv_set
def apply_move(paths, force, dry_run):
    """ Core process of moving files. """
    assert isinstance(paths, list)
    assert len(paths)

    not_existed = [pair.dst for pair in itertools.filterfalse(lambda p:
        not p.dst.exists(), paths)]

    if len(not_existed) and not force:
        raise RuntimeError(
            'The following file(s) already exists:\n'
            '{}\n'
            'Use `--force` to override.'
            .format('\n  '.join(not_existed))
          )

    for pair in paths:
        assert pair.src.is_absolute()
        assert pair.dst.is_absolute()

        try:
            dst_dir = pair.dst.parent
            if not dst_dir.exists():
                print('Creating directory: `{}`'.format(dst_dir))
                if not dry_run:
                    dst_dir.mkdir(parents=True)
            
            print('Moving `{}` to `{}`'.format(pair.src, pair.dst))
            if not dry_run:
                shutil.move(str(pair.src), str(pair.dst))

        except OSError as ex:
            raise RuntimeError(ex)
Exemple #24
0
 def _parse_all_merged_entities(self):
     """set self._all_merged_entities to the longest possible(wrapping)
     tokens including non-entity tokens
     """
     self._all_merged_entities = list(filterfalse(
         lambda token: self._is_wrapped(token, self.all_entities),
         self.all_entities))
Exemple #25
0
def unique_everseen(iterable, key=None):
    """
    The generator to list unique elements, preserving the order. Remember all
    elements ever seen. This was taken from the itertools recipes.

    Args:
        iterable: An iterable to process.
        key: Optional function to run when checking elements (e.g., str.lower)

    Returns:
        Generator: Yields a generator object.
    """

    seen = set()
    seen_add = seen.add

    if key is None:

        for element in filterfalse(seen.__contains__, iterable):

            seen_add(element)
            yield element

    else:

        for element in iterable:

            k = key(element)

            if k not in seen:

                seen_add(k)
                yield element
def filter_by_min_spt(dataset, item_count, min_spt, trans_count):
	"""
	returns:
		(i) filterd dataset (remove items w/ freq < min_spt)
		(ii) filtered item counter
	pass in:
		(i) dataset (the raw dataset)
		(ii) dict w/ items for keys, values are item frequency,
			returned by call to item_counter, for queries this
			is probably an f-list, built by item_counter
		(iii) min_spt (float, eg, 0.03 means each item must appear in
			at least 3% of the dataset)
		(iv) total number of transactions
	removes any item from every transaction if that item's total freq
	is below min_spt
	to call this fn, bind the call to two variables, like so:
	filtered_trans, item_count_dict = filter_by_min_spt(...)
	"""
	excluded_items = get_items_below_min_spt(dataset, min_spt, trans_count)
	if not excluded_items:
		# if all items are above min_spt, ie, there are no items to exclude
		# so just return original args
		return dataset, item_count
	else:
		# there is at least one item to exclude
		# now build the expression required by 'IT.filterfalse' from the
		# list of excluded items
		filter_str = build_min_spt_filter_str(dataset, min_spt, trans_count)
		# remove those items below min_spt threshold items
		tx = [IT.filterfalse(lambda q: eval(filter_str), trans)
				for trans in dataset]
		ic = {k:v for k, v in item_count.items() if (v/trans_count) >= min_spt}
		return list(map(list, tx)), ic
Exemple #27
0
    def new(self):
        if len(self.sessions) > self.maxsession:
            raise MaxDatabaseConnection

        self.sessions.add(self.dbsession(self.uri))

        self.itersession = itertools.filterfalse(getfree, self.sessions)
Exemple #28
0
    def __init__(self, session=DbSession, uri='pq://localhost/postgres', maxconnect=10):

        # session to pooled
        if isinstance(session, DbSession):
            self.dbsession = session
        else:
            raise InvalidDatabaseSessionType

        # DB connect uri
        self.uri = uri

        # set max connection limits
        if maxconnect < 5:
            self.maxsession = 5
        else:
            self.maxsession = maxconnect

        # least db connection count
        self.minsession = int(self.maxsession / 5)

        # our dbsession
        self.sessions = set()

        # create initial dbsession #minsession
        for x in range(self.minsession):
            self.sessions.add(self.dbsession(self.uri))

        # init first session
        self.itersession = itertools.filterfalse(getfree, self.sessions)
Exemple #29
0
    def clean_article_ids(self):
        article_ids = self.cleaned_data["article_ids"].split("\n")
        article_ids = filter(bool, map(str.strip, article_ids))

        # Parse all article ids as integer
        try:
            article_ids = list(map(int, article_ids))
        except ValueError:
            offender = repr(next(filterfalse(str.isnumeric, article_ids)))
            raise ValidationError("{offender} is not an integer".format(**locals()))

        # Check if they can be chosen
        articlesets = self.cleaned_data["articlesets"]
        distinct_args = ["id"] if db_supports_distinct_on() else []
        all_articles = Article.objects.filter(articlesets_set__in=articlesets).distinct(*distinct_args)
        chosen_articles = Article.objects.filter(id__in=article_ids).distinct(*distinct_args)
        intersection = all_articles & chosen_articles

        if chosen_articles.count() != intersection.count():
            # Find offenders (skipping non-existing, which we can only find when
            # fetching all possible articles)
            existing = all_articles.values_list("id", flat=True)
            offenders = chosen_articles.exclude(id__in=existing).values_list("id", flat=True)
            raise ValidationError(
                ("Articles {offenders} not in chosen articlesets or some non-existent"
                 .format(**locals())), code="invalid")

        return article_ids
Exemple #30
0
def bat_graph(cave):
    nodes = cave.bats
    inter_bat_edges = [Edge(x, y) for x in nodes for y in nodes if not x is y]
    bat_alpha_edges = [Edge(x, cave.alpha) for x in nodes]
    edges = inter_bat_edges + bat_alpha_edges
    arcs_no_walls = filterfalse(lambda edge: any([wall.intersects(edge) for wall in cave.walls]), edges)
    return Graph(nodes, frozenset(arcs_no_walls))
Exemple #31
0
    def _run(self, env: Union[Dict[str, str], MergedMap] = None) -> None:
        logging.debug("\033[1;31m--------- %s [%s] ---------\033[0m" %
                      (self.name, self._compiled))

        if self._compiled:
            return

        self._pre_run()

        cwd = self.cwd
        if not isempty(self.sourcedir):
            src = '%s/%s/' % (self.sourcedir, self.name)
            if os.path.exists(src) and src != cwd and not os.path.exists(cwd):
                Command(['rsync', '-avH', '--delete', '--quiet', src,
                         cwd])._run()

        # in seq: 1. os env; 2. global environment; 3. self pre-environment; 4. local func env arguments
        self._build_env.update(os.environ.copy(), mode=BuildEnv_Mode.MERGE)
        self._build_env.update(Environment.global_env,
                               mode=BuildEnv_Mode.MERGE)
        if env is not None:
            self._build_env.update(env, mode=BuildEnv_Mode.MERGE)
        #use_env.merge(self._pre_environment, merge=MergeMapType.AsString)
        #use_env.merge(Environment.global_env, merge=MergeMapType.AsString)

        #use_env.merge(os.environ.copy(), merge=MergeMapType.AsString)

        def sticky_env() -> None:
            self._build_env.update({'PREFIX': Environment.working.prefix})

        def func(stage: Stage, item: Union[Command, Env]) -> None:
            use_env = self._build_env.curr_env
            # Each command is run in a clean isolated environment, and
            # their enviroment is only updated by explicitly user-defined env
            if isinstance(item, Command):
                if stage == Stage.BuildScript:
                    #logging.debug('> Current Directory: %s' % os.path.abspath(cwd))
                    #logging.debug('Command [shell=%s, dry_run=%s]: %s' % (c.shell, self.dry_run, str(c)))
                    if use_env is not None:
                        logging.debug('> CFLAGS: %s' % use_env.get('CFLAGS'))
                        logging.debug('> LDFLAGS: %s' % use_env.get('LDFLAGS'))
                item._run(env=use_env, dry_run=Environment._dry_run)
            elif type(item) is Env:
                val = item.parse(cwd=cwd, env=use_env)
                logging.debug('> Update Current Unit Env by: %s' % val)
                self._build_env.update(val, mode=BuildEnv_Mode.MERGE)

            sticky_env()

        for stage, item in itertools.filterfalse(lambda x: x[1] is None,
                                                 self.__gen_commands()):
            if stage == Stage.BuildScript:
                print(item.cwd)
            sticky_env()
            logging.debug('"\033[1;34m [%s] \033[0m" PREFIX: %s' %
                          (stage, self._build_env.curr_env.get('PREFIX')))
            func(stage, item)

        for env in self._export_env:
            val = env.parse(cwd=cwd, env=self._build_env.curr_env)
            self._build_env.update(val)
            Environment.global_env.update(val)

        self._compiled = True

        self._post_run()
    def request_match(self, fls_comp_id, fd_comp_id, game_week, season, limit=None):
        """
        Retrieve the match details
        :param fls_comp_id: FLS comp id
        :param fd_comp_id: footballdata comp id
        :param game_week: 1, 2, 3....n
        :param season: Identifier for the season e.g. 2018-2019 (for football-data API)
        :param limit: result set limiter
        :return: Matches over a given gameweek for a given competition and season
        :rtype: list
        """
        # Placeholder
        # competition_id = 2002
        # In FLS, 2 is id for premier league, 2021 in football-data
        # fls_comp_id = 2
        # fd_comp_id = 2021

        if isinstance(season, str):
            season = int(season.split("-")[0])

        joint_matches = []
        fantasy_matches = self.fantasy.request_matches()
        fantasy_matches = list(filter(lambda x: x[Match.FANTASY_GAME_WEEK] == int(game_week), fantasy_matches))
        fd_matches = self.fd.request_competition_match(competition_id=fd_comp_id,
                                                       **{fdf.MATCHDAY: game_week, fdf.SEASON: season})


        game_week_start = dt.strptime(fd_matches[0][Match.MATCH_UTC_DATE], '%Y-%m-%dT%H:%M:%SZ')
        game_week_end = game_week_start + timedelta(days=4)  # 4 days per game week
        fls_matches = self.fls.request_matches(**{flsf.FROM_DATETIME: game_week_start,
                                                  flsf.TO_DATETIME: game_week_end})

        fls_matches = list(filterfalse(lambda x: x[Match.FLS_API_COMPETITION_ID] != fls_comp_id, fls_matches))

        for match in fd_matches:
            match_start_datetime = dt.strptime(match[Match.MATCH_UTC_DATE], '%Y-%m-%dT%H:%M:%SZ')
            match_start_datetime = dt.strftime(match_start_datetime, '%Y-%d-%mT%H:%M:%SZ')
            home_team = match[Match.HOME_TEAM]
            away_team = match[Match.AWAY_TEAM]
            home_score = match[Match.FULL_TIME_HOME_SCORE]
            away_score = match[Match.FULL_TIME_AWAY_SCORE]
            for fls_match in fls_matches:
                if fls_match[Match.HOME_TEAM] in home_team and fls_match[Match.AWAY_TEAM] in away_team and \
                        home_score == fls_match[Match.FULL_TIME_HOME_SCORE] and \
                        away_score == fls_match[Match.FULL_TIME_AWAY_SCORE]:
                    temp_dict = {**match, **fls_match}
                    adv_match_details = self.fls.request_match_details(match_id=fls_match[Match.FLS_MATCH_ID])
                    temp_dict2 = {**temp_dict, **adv_match_details}
                    for f_match in fantasy_matches:
                        f_home_team = self.fantasy.name_to_id(f_match[Match.FANTASY_HOME_TEAM_ID])
                        f_away_team = self.fantasy.name_to_id(f_match[Match.FANTASY_AWAY_TEAM_ID])
                        parsed_date = dt.strftime(dt.strptime(f_match[Match.START_TIME], '%Y-%m-%dT%H:%M:%SZ'),
                                                  '%Y-%d-%mT%H:%M:%SZ')
                        if parsed_date == match_start_datetime:
                            if f_home_team in home_team and f_away_team in away_team and \
                                    home_score == f_match[Match.FULL_TIME_HOME_SCORE] and \
                                    away_score == f_match[Match.FULL_TIME_AWAY_SCORE]:
                                final_dict = {**f_match, **temp_dict2}
                                joint_matches.append(final_dict)

        if limit:
            return joint_matches[:limit]

        return joint_matches
Exemple #33
0
 def __iter__(self):
     return itertools.filterfalse(self.pred, self.data)
Exemple #34
0
def second_order_param_shift(tape,
                             dev_wires,
                             argnum=None,
                             shift=np.pi / 2,
                             gradient_recipes=None):
    r"""Generate the second-order CV parameter-shift tapes and postprocessing methods required
    to compute the gradient of a gate parameter with respect to an
    expectation value.

    .. note::

        The 2nd order method can handle also first-order observables, but
        1st order method may be more efficient unless it's really easy to
        experimentally measure arbitrary 2nd order observables.

    .. warning::

        The 2nd order method can only be executed on devices that support the
        :class:`~.PolyXP` observable.

    Args:
        tape (.QuantumTape): quantum tape to differentiate
        dev_wires (.Wires): wires on the device the parameter-shift method is computed on
        argnum (int or list[int] or None): Trainable parameter indices to differentiate
            with respect to. If not provided, the derivative with respect to all
            trainable indices are returned.
        shift (float): The shift value to use for the two-term parameter-shift formula.
            Only valid if the operation in question supports the two-term parameter-shift
            rule (that is, it has two distinct eigenvalues) and ``gradient_recipes``
            is ``None``.
        gradient_recipes (tuple(list[list[float]] or None)): List of gradient recipes
            for the parameter-shift method. One gradient recipe must be provided
            per trainable parameter.

    Returns:
        tuple[list[QuantumTape], function]: A tuple containing a
        list of generated tapes, in addition to a post-processing
        function to be applied to the evaluated tapes.
    """
    argnum = argnum or list(tape.trainable_params)
    gradient_recipes = gradient_recipes or [None] * len(argnum)

    gradient_tapes = []
    shapes = []
    obs_indices = []
    gradient_values = []

    for idx, _ in enumerate(tape.trainable_params):
        t_idx = list(tape.trainable_params)[idx]
        op = tape._par_info[t_idx]["op"]

        if idx not in argnum:
            # parameter has zero gradient
            shapes.append(0)
            obs_indices.append([])
            gradient_values.append([])
            continue

        shapes.append(1)

        # get the gradient recipe for the trainable parameter
        recipe = gradient_recipes[argnum.index(idx)]
        recipe = recipe or _get_operation_recipe(tape, idx, shift=shift)
        recipe = _process_gradient_recipe(recipe)
        coeffs, multipliers, shifts = recipe

        if len(shifts) != 2:
            # The 2nd order CV parameter-shift rule only accepts two-term shifts
            raise NotImplementedError(
                "Taking the analytic gradient for order-2 operators is "
                f"unsupported for operation {op} which has a "
                "gradient recipe of more than two terms.")

        shifted_tapes = generate_shifted_tapes(tape, idx, shifts, multipliers)

        # evaluate transformed observables at the original parameter point
        # first build the Heisenberg picture transformation matrix Z
        Z0 = op.heisenberg_tr(dev_wires, inverse=True)
        Z2 = shifted_tapes[0]._par_info[t_idx]["op"].heisenberg_tr(dev_wires)
        Z1 = shifted_tapes[1]._par_info[t_idx]["op"].heisenberg_tr(dev_wires)

        # derivative of the operation
        Z = Z2 * coeffs[0] + Z1 * coeffs[1]
        Z = Z @ Z0

        # conjugate Z with all the descendant operations
        B = np.eye(1 + 2 * len(dev_wires))
        B_inv = B.copy()

        succ = tape.graph.descendants_in_order((op, ))
        operation_descendents = itertools.filterfalse(
            qml.circuit_graph._is_observable, succ)
        observable_descendents = filter(qml.circuit_graph._is_observable, succ)

        for BB in operation_descendents:
            if not BB.supports_heisenberg:
                # if the descendant gate is non-Gaussian in parameter-shift differentiation
                # mode, then there must be no observable following it.
                continue

            B = BB.heisenberg_tr(dev_wires) @ B
            B_inv = B_inv @ BB.heisenberg_tr(dev_wires, inverse=True)

        Z = B @ Z @ B_inv  # conjugation

        g_tape = tape.copy(copy_operations=True)
        constants = []

        # transform the descendant observables into their derivatives using Z
        transformed_obs_idx = []

        for obs in observable_descendents:
            # get the index of the descendent observable
            idx = tape.observables.index(obs)
            transformed_obs_idx.append(idx)

            transformed_obs = _transform_observable(obs, Z, dev_wires)

            A = transformed_obs.parameters[0]
            constant = None

            # Check if the transformed observable corresponds to a constant term.
            if len(A.nonzero()[0]) == 1:
                if A.ndim == 2 and A[0, 0] != 0:
                    constant = A[0, 0]

                elif A.ndim == 1 and A[0] != 0:
                    constant = A[0]

            constants.append(constant)

            g_tape._measurements[idx] = qml.measure.MeasurementProcess(
                qml.operation.Expectation,
                _transform_observable(obs, Z, dev_wires))

        if not any(i is None for i in constants):
            # Check if *all* transformed observables corresponds to a constant term.
            # term. If this is the case for all transformed observables on the tape,
            # then <psi|A|psi> = A<psi|psi> = A,
            # and we can avoid the device execution.
            shapes[-1] = 0
            obs_indices.append(transformed_obs_idx)
            gradient_values.append(constants)
            continue

        gradient_tapes.append(g_tape)
        obs_indices.append(transformed_obs_idx)
        gradient_values.append(None)

    def processing_fn(results):
        grads = []
        start = 0

        if not results:
            results = [np.zeros([tape.output_dim])]

        interface = qml.math.get_interface(results[0])
        iterator = enumerate(zip(shapes, gradient_values, obs_indices))

        for i, (shape, grad_value, obs_ind) in iterator:

            if shape == 0:
                # parameter has zero gradient
                g = qml.math.zeros_like(results[0], like=interface)

                if grad_value:
                    g = qml.math.scatter_element_add(g,
                                                     obs_ind,
                                                     grad_value,
                                                     like=interface)

                grads.append(g)
                continue

            obs_result = results[start:start + shape]
            start = start + shape

            # compute the linear combination of results and coefficients
            obs_result = qml.math.stack(obs_result[0])
            g = qml.math.zeros_like(obs_result, like=interface)

            if qml.math.get_interface(g) not in ("tensorflow", "autograd"):
                obs_ind = (obs_ind, )

            g = qml.math.scatter_element_add(g,
                                             obs_ind,
                                             obs_result[obs_ind],
                                             like=interface)
            grads.append(g)

        # The following is for backwards compatibility; currently,
        # the device stacks multiple measurement arrays, even if not the same
        # size, resulting in a ragged array.
        # In the future, we might want to change this so that only tuples
        # of arrays are returned.
        for i, g in enumerate(grads):
            g = qml.math.convert_like(g, results[0])
            if hasattr(g, "dtype") and g.dtype is np.dtype("object"):
                grads[i] = qml.math.hstack(g)

        return qml.math.T(qml.math.stack(grads))

    return gradient_tapes, processing_fn
Exemple #35
0
def compute():
    ans = next(itertools.filterfalse(test_goldbach, itertools.count(9, 2)))
    return str(ans)
Exemple #36
0
  async def preProcessForComparison(results, target_size, size_tolerance_prct):
    """ Process results to prepare them for future comparison and sorting. """
    # find reference (=image most likely to match target cover ignoring factors like size and format)
    reference = None
    for result in results:
      if result.source_quality is CoverSourceQuality.REFERENCE:
        if ((reference is None) or
            (CoverSourceResult.compare(result,
                                       reference,
                                       target_size=target_size,
                                       size_tolerance_prct=size_tolerance_prct) > 0)):
          reference = result

    # remove results that are only refs
    results = list(itertools.filterfalse(operator.attrgetter("is_only_reference"), results))

    # remove duplicates
    no_dup_results = []
    for result in results:
      is_dup = False
      for result_comp in results:
        if ((result_comp is not result) and
            (result_comp.urls == result.urls) and
            (__class__.compare(result,
                               result_comp,
                               target_size=target_size,
                               size_tolerance_prct=size_tolerance_prct) < 0)):
          is_dup = True
          break
      if not is_dup:
        no_dup_results.append(result)
    dup_count = len(results) - len(no_dup_results)
    if dup_count > 0:
      logging.getLogger("Cover").info("Removed %u duplicate results" % (dup_count))
      results = no_dup_results

    if reference is not None:
      logging.getLogger("Cover").info("Reference is: %s" % (reference))
      reference.is_similar_to_reference = True

      # calculate sigs
      futures = []
      for result in results:
        coroutine = result.updateSignature()
        future = asyncio.ensure_future(coroutine)
        futures.append(future)
      if reference.is_only_reference:
        assert(reference not in results)
        coroutine = reference.updateSignature()
        future = asyncio.ensure_future(coroutine)
        futures.append(future)
      if futures:
        await asyncio.wait(futures)
      for future in futures:
        future.result()  # raise pending exception if any

      # compare other results to reference
      for result in results:
        if ((result is not reference) and
                (result.thumbnail_sig is not None) and
                (reference.thumbnail_sig is not None)):
          result.is_similar_to_reference = __class__.areImageSigsSimilar(result.thumbnail_sig,
                                                                         reference.thumbnail_sig)
          if result.is_similar_to_reference:
            logging.getLogger("Cover").debug("%s is similar to reference" % (result))
          else:
            logging.getLogger("Cover").debug("%s is NOT similar to reference" % (result))
    else:
      logging.getLogger("Cover").warning("No reference result found")

    return results
Exemple #37
0
def _install_wheel(
    name,  # type: str
    wheel_zip,  # type: ZipFile
    wheel_path,  # type: str
    scheme,  # type: Scheme
    pycompile=True,  # type: bool
    warn_script_location=True,  # type: bool
    direct_url=None,  # type: Optional[DirectUrl]
    requested=False,  # type: bool
):
    # type: (...) -> None
    """Install a wheel.

    :param name: Name of the project to install
    :param wheel_zip: open ZipFile for wheel being installed
    :param scheme: Distutils scheme dictating the install directories
    :param req_description: String used in place of the requirement, for
        logging
    :param pycompile: Whether to byte-compile installed Python files
    :param warn_script_location: Whether to check that scripts are installed
        into a directory on PATH
    :raises UnsupportedWheel:
        * when the directory holds an unpacked wheel with incompatible
          Wheel-Version
        * when the .dist-info dir does not match the wheel
    """
    info_dir, metadata = parse_wheel(wheel_zip, name)

    if wheel_root_is_purelib(metadata):
        lib_dir = scheme.purelib
    else:
        lib_dir = scheme.platlib

    # Record details of the files moved
    #   installed = files copied from the wheel to the destination
    #   changed = files changed while installing (scripts #! line typically)
    #   generated = files newly generated during the install (script wrappers)
    installed = {}  # type: Dict[RecordPath, RecordPath]
    changed = set()  # type: Set[RecordPath]
    generated = []  # type: List[str]

    def record_installed(srcfile, destfile, modified=False):
        # type: (RecordPath, str, bool) -> None
        """Map archive RECORD paths to installation RECORD paths."""
        newpath = _fs_to_record_path(destfile, lib_dir)
        installed[srcfile] = newpath
        if modified:
            changed.add(_fs_to_record_path(destfile))

    def all_paths():
        # type: () -> Iterable[RecordPath]
        names = wheel_zip.namelist()
        # If a flag is set, names may be unicode in Python 2. We convert to
        # text explicitly so these are valid for lookup in RECORD.
        decoded_names = map(ensure_text, names)
        for name in decoded_names:
            yield cast("RecordPath", name)

    def is_dir_path(path):
        # type: (RecordPath) -> bool
        return path.endswith("/")

    def assert_no_path_traversal(dest_dir_path, target_path):
        # type: (str, str) -> None
        if not is_within_directory(dest_dir_path, target_path):
            message = (
                "The wheel {!r} has a file {!r} trying to install"
                " outside the target directory {!r}"
            )
            raise InstallationError(
                message.format(wheel_path, target_path, dest_dir_path)
            )

    def root_scheme_file_maker(zip_file, dest):
        # type: (ZipFile, str) -> Callable[[RecordPath], File]
        def make_root_scheme_file(record_path):
            # type: (RecordPath) -> File
            normed_path = os.path.normpath(record_path)
            dest_path = os.path.join(dest, normed_path)
            assert_no_path_traversal(dest, dest_path)
            return ZipBackedFile(record_path, dest_path, zip_file)

        return make_root_scheme_file

    def data_scheme_file_maker(zip_file, scheme):
        # type: (ZipFile, Scheme) -> Callable[[RecordPath], File]
        scheme_paths = {}
        for key in SCHEME_KEYS:
            encoded_key = ensure_text(key)
            scheme_paths[encoded_key] = ensure_text(
                getattr(scheme, key), encoding=sys.getfilesystemencoding()
            )

        def make_data_scheme_file(record_path):
            # type: (RecordPath) -> File
            normed_path = os.path.normpath(record_path)
            try:
                _, scheme_key, dest_subpath = normed_path.split(os.path.sep, 2)
            except ValueError:
                message = (
                    "Unexpected file in {}: {!r}. .data directory contents"
                    " should be named like: '<scheme key>/<path>'."
                ).format(wheel_path, record_path)
                raise InstallationError(message)

            try:
                scheme_path = scheme_paths[scheme_key]
            except KeyError:
                valid_scheme_keys = ", ".join(sorted(scheme_paths))
                message = (
                    "Unknown scheme key used in {}: {} (for file {!r}). .data"
                    " directory contents should be in subdirectories named"
                    " with a valid scheme key ({})"
                ).format(
                    wheel_path, scheme_key, record_path, valid_scheme_keys
                )
                raise InstallationError(message)

            dest_path = os.path.join(scheme_path, dest_subpath)
            assert_no_path_traversal(scheme_path, dest_path)
            return ZipBackedFile(record_path, dest_path, zip_file)

        return make_data_scheme_file

    def is_data_scheme_path(path):
        # type: (RecordPath) -> bool
        return path.split("/", 1)[0].endswith(".data")

    paths = all_paths()
    file_paths = filterfalse(is_dir_path, paths)
    root_scheme_paths, data_scheme_paths = partition(
        is_data_scheme_path, file_paths
    )

    make_root_scheme_file = root_scheme_file_maker(
        wheel_zip,
        ensure_text(lib_dir, encoding=sys.getfilesystemencoding()),
    )
    files = map(make_root_scheme_file, root_scheme_paths)

    def is_script_scheme_path(path):
        # type: (RecordPath) -> bool
        parts = path.split("/", 2)
        return (
            len(parts) > 2 and
            parts[0].endswith(".data") and
            parts[1] == "scripts"
        )

    other_scheme_paths, script_scheme_paths = partition(
        is_script_scheme_path, data_scheme_paths
    )

    make_data_scheme_file = data_scheme_file_maker(wheel_zip, scheme)
    other_scheme_files = map(make_data_scheme_file, other_scheme_paths)
    files = chain(files, other_scheme_files)

    # Get the defined entry points
    distribution = pkg_resources_distribution_for_wheel(
        wheel_zip, name, wheel_path
    )
    console, gui = get_entrypoints(distribution)

    def is_entrypoint_wrapper(file):
        # type: (File) -> bool
        # EP, EP.exe and EP-script.py are scripts generated for
        # entry point EP by setuptools
        path = file.dest_path
        name = os.path.basename(path)
        if name.lower().endswith('.exe'):
            matchname = name[:-4]
        elif name.lower().endswith('-script.py'):
            matchname = name[:-10]
        elif name.lower().endswith(".pya"):
            matchname = name[:-4]
        else:
            matchname = name
        # Ignore setuptools-generated scripts
        return (matchname in console or matchname in gui)

    script_scheme_files = map(make_data_scheme_file, script_scheme_paths)
    script_scheme_files = filterfalse(
        is_entrypoint_wrapper, script_scheme_files
    )
    script_scheme_files = map(ScriptFile, script_scheme_files)
    files = chain(files, script_scheme_files)

    for file in files:
        file.save()
        record_installed(file.src_record_path, file.dest_path, file.changed)

    def pyc_source_file_paths():
        # type: () -> Iterator[str]
        # We de-duplicate installation paths, since there can be overlap (e.g.
        # file in .data maps to same location as file in wheel root).
        # Sorting installation paths makes it easier to reproduce and debug
        # issues related to permissions on existing files.
        for installed_path in sorted(set(installed.values())):
            full_installed_path = os.path.join(lib_dir, installed_path)
            if not os.path.isfile(full_installed_path):
                continue
            if not full_installed_path.endswith('.py'):
                continue
            yield full_installed_path

    def pyc_output_path(path):
        # type: (str) -> str
        """Return the path the pyc file would have been written to.
        """
        return importlib.util.cache_from_source(path)

    # Compile all of the pyc files for the installed files
    if pycompile:
        with captured_stdout() as stdout:
            with warnings.catch_warnings():
                warnings.filterwarnings('ignore')
                for path in pyc_source_file_paths():
                    # Python 2's `compileall.compile_file` requires a str in
                    # error cases, so we must convert to the native type.
                    path_arg = ensure_str(
                        path, encoding=sys.getfilesystemencoding()
                    )
                    success = compileall.compile_file(
                        path_arg, force=True, quiet=True
                    )
                    if success:
                        pyc_path = pyc_output_path(path)
                        assert os.path.exists(pyc_path)
                        pyc_record_path = cast(
                            "RecordPath", pyc_path.replace(os.path.sep, "/")
                        )
                        record_installed(pyc_record_path, pyc_path)
        logger.debug(stdout.getvalue())

    maker = PipScriptMaker(None, scheme.scripts)

    # Ensure old scripts are overwritten.
    # See https://github.com/pypa/pip/issues/1800
    maker.clobber = True

    # Ensure we don't generate any variants for scripts because this is almost
    # never what somebody wants.
    # See https://bitbucket.org/pypa/distlib/issue/35/
    maker.variants = {''}

    # This is required because otherwise distlib creates scripts that are not
    # executable.
    # See https://bitbucket.org/pypa/distlib/issue/32/
    maker.set_mode = True

    # Generate the console and GUI entry points specified in the wheel
    scripts_to_generate = get_console_script_specs(console)

    gui_scripts_to_generate = list(starmap('{} = {}'.format, gui.items()))

    generated_console_scripts = maker.make_multiple(scripts_to_generate)
    generated.extend(generated_console_scripts)

    generated.extend(
        maker.make_multiple(gui_scripts_to_generate, {'gui': True})
    )

    if warn_script_location:
        msg = message_about_scripts_not_on_PATH(generated_console_scripts)
        if msg is not None:
            logger.warning(msg)

    generated_file_mode = 0o666 & ~current_umask()

    @contextlib.contextmanager
    def _generate_file(path, **kwargs):
        # type: (str, **Any) -> Iterator[BinaryIO]
        with adjacent_tmp_file(path, **kwargs) as f:
            yield f
        os.chmod(f.name, generated_file_mode)
        replace(f.name, path)

    dest_info_dir = os.path.join(lib_dir, info_dir)

    # Record pip as the installer
    installer_path = os.path.join(dest_info_dir, 'INSTALLER')
    with _generate_file(installer_path) as installer_file:
        installer_file.write(b'pip\n')
    generated.append(installer_path)

    # Record the PEP 610 direct URL reference
    if direct_url is not None:
        direct_url_path = os.path.join(dest_info_dir, DIRECT_URL_METADATA_NAME)
        with _generate_file(direct_url_path) as direct_url_file:
            direct_url_file.write(direct_url.to_json().encode("utf-8"))
        generated.append(direct_url_path)

    # Record the REQUESTED file
    if requested:
        requested_path = os.path.join(dest_info_dir, 'REQUESTED')
        with open(requested_path, "w"):
            pass
        generated.append(requested_path)

    record_text = distribution.get_metadata('RECORD')
    record_rows = list(csv.reader(record_text.splitlines()))

    rows = get_csv_rows_for_installed(
        record_rows,
        installed=installed,
        changed=changed,
        generated=generated,
        lib_dir=lib_dir)

    # Record details of all files installed
    record_path = os.path.join(dest_info_dir, 'RECORD')

    with _generate_file(record_path, **csv_io_kwargs('w')) as record_file:
        # The type mypy infers for record_file is different for Python 3
        # (typing.IO[Any]) and Python 2 (typing.BinaryIO). We explicitly
        # cast to typing.IO[str] as a workaround.
        writer = csv.writer(cast('IO[str]', record_file))
        writer.writerows(_normalized_outrows(rows))
def filter_finished_tasks(tasks_info, finished_task_ids):
    # Allow for O(1) lookups on average to speed up the process
    finished_task_ids = set(finished_task_ids)

    return list(
        itertools.filterfalse(lambda x: x[0] in finished_task_ids, tasks_info))
Exemple #39
0
def partition(gen, pred):
    t1, t2 = tee(gen)

    return filter(pred, t2), filterfalse(pred, t1)
Exemple #40
0
    def migrate(self, expression, name_migration_map=None):
        """Migrate an expression created for a different constraint set to self.
        Returns an expression that can be used with this constraintSet

        All the foreign variables used in the expression are replaced by
        variables of this constraint set. If the variable was replaced before
        the replacement is taken from the provided migration map.

        The migration mapping is updated with new replacements.

        :param expression: the potentially foreign expression
        :param name_migration_map: mapping of already migrated variables. maps from string name of foreign variable to its currently existing migrated string name. this is updated during this migration.
        :return: a migrated expression where all the variables are local. name_migration_map is updated

        """
        if name_migration_map is None:
            name_migration_map = {}

        #  name_migration_map -> object_migration_map
        #  Based on the name mapping in name_migration_map build an object to
        #  object mapping to be used in the replacing of variables
        #  inv: object_migration_map's keys should ALWAYS be external/foreign
        #  expressions, and its values should ALWAYS be internal/local expressions
        object_migration_map = {}

        # List of foreign vars used in expression
        foreign_vars = itertools.filterfalse(self.is_declared,
                                             get_variables(expression))
        for foreign_var in foreign_vars:
            # If a variable with the same name was previously migrated
            if foreign_var.name in name_migration_map:
                migrated_name = name_migration_map[foreign_var.name]
                native_var = self.get_variable(migrated_name)
                assert (
                    native_var is not None
                ), "name_migration_map contains a variable that does not exist in this ConstraintSet"
                object_migration_map[foreign_var] = native_var
            else:
                # foreign_var was not found in the local declared variables nor
                # any variable with the same name was previously migrated
                # let's make a new unique internal name for it
                migrated_name = foreign_var.name
                if migrated_name in self._declarations:
                    migrated_name = self._make_unique_name(
                        f"{foreign_var.name}_migrated")
                # Create and declare a new variable of given type
                if isinstance(foreign_var, Bool):
                    new_var = self.new_bool(name=migrated_name)
                elif isinstance(foreign_var, BitVec):
                    new_var = self.new_bitvec(foreign_var.size,
                                              name=migrated_name)
                elif isinstance(foreign_var, Array):
                    # Note that we are discarding the ArrayProxy encapsulation
                    new_var = self.new_array(
                        index_max=foreign_var.index_max,
                        index_bits=foreign_var.index_bits,
                        value_bits=foreign_var.value_bits,
                        name=migrated_name,
                    ).array
                else:
                    raise NotImplementedError(
                        f"Unknown expression type {type(foreign_var)} encountered during expression migration"
                    )
                # Update the var to var mapping
                object_migration_map[foreign_var] = new_var
                # Update the name to name mapping
                name_migration_map[foreign_var.name] = new_var.name

        #  Actually replace each appearance of migrated variables by the new ones
        migrated_expression = replace(expression, object_migration_map)
        return migrated_expression
Exemple #41
0
 def partition(pred, iterable):
     """From itertools documentation"""
     t1, t2 = tee(iterable)
     return list(filterfalse(pred, t1)), list(filter(pred, t2))
Exemple #42
0
def _filter(flist, skip):
    return list(filterfalse(skip.__contains__, flist))
Exemple #43
0
def partition(pred: Callable, iterable: Iterable):
    """Use a predicate to partition entries into false entries and true entries"""
    iter_1, iter_2 = tee(iterable)
    return filterfalse(pred, iter_1), filter(pred, iter_2)
Exemple #44
0
def iter_2(lst):
    return list(itertools.filterfalse(lambda i: len(i) < 5, lst))
from itertools import filterfalse, chain

# False, 0, [], None
f = filterfalse(None, chain([None, 0, []], range(10)))

for i in f:
    print(i)
Exemple #46
0
print(list(it))

## dropwhile - skips items from an iterator until a function returns True
values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
less_than_seven = lambda x: x < 7
it = itertools.dropwhile(less_than_seven, values)
print(list(it))

## filterfalse - returns all items which are False
values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
evens = lambda x: x % 2 == 0

filter_result = filter(evens, values)
print('Filter:      ', list(filter_result))

filter_false_result = itertools.filterfalse(evens, values)
print('Filter false:', list(filter_false_result))

#Producing combinations of Items from iterators
## accumulate - folds an item from the iterator into a running value
values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
sum_reduce = itertools.accumulate(values)
print('sum:   ', list(sum_reduce))


def sum_modulo_20(first, second):
    output = first + second
    return output % 20


modulo_reduce = itertools.accumulate(values, sum_modulo_20)
Exemple #47
0
def partition(pred, list_):
    # Partition the items in lists / non-lists
    list_ = [x for x in list_ if x]
    lists = list(filter(pred, list_))
    not_lists = itertools.filterfalse(pred, list_)
    return (lists, not_lists)
def PartitionIterator(pred: typing.Callable[[object], bool],
                      stream: typing.Iterable[object]):

    (t1, t2) = itertools.tee(stream)

    return (itertools.filterfalse(pred, t1), filter(pred, t2))
Exemple #49
0
def get_assigned_names_excluding_class_attributes(tree):
    assigned_items = get_assigned_vars(tree, names_only=False)
    return {
        getattr(node, 'id', None)
        for node in filterfalse(is_class_attribute, assigned_items)
    }
Exemple #50
0
def _partition(pred, iterable):
    t1, t2 = itertools.tee(iterable)
    return (list(itertools.filterfalse(pred, t1)), list(filter(pred, t2)))
def list_subtract(l1, l2):
    return list(filterfalse(lambda x: x in l2, l1))
Exemple #52
0
# ============
# Main script.
# ============
REGEX1 = re.compile(
    r"\b(0[1-9])(\.D[1-9]\.T(?:0[1-9]|[1-5]\d)\.([A-Z0-9]{3,4}))",
    re.IGNORECASE)
REGEX2 = re.compile(r"\bRECYCLE\b", re.IGNORECASE)
TEMPDIR = Path(os.path.expandvars("%_TMPDIR%"))

# -----
collection = csv.reader(arguments.collection,
                        dialect=CustomDialect)  # type: Any
collection = [compress(item, [0, 0, 1, 0, 0]) for item in collection]
collection = list(
    filterfalse(match_(REGEX2.search), chain.from_iterable(collection)))

# -----
ape = [
    (file,
     Path(file).parent.parent / "1.Monkey's Audio" / f"{Path(file).stem}.ape")
    for file in collection
]  # type: List[Tuple[str, Path]]
dsf = [(file, Path(file).parent.parent / "1.DSD 64" / f"{Path(file).stem}.dsf")
       for file in collection]  # type: List[Tuple[str, Path]]
flac = [(file, Path(file).parent.parent / "1.Free Lossless Audio Codec" /
         f"{Path(file).stem}.flac")
        for file in collection]  # type: List[Tuple[str, Path]]

# -----
collection = chain.from_iterable(
Exemple #53
0
def vowel(c):
    return c.lower() in 'aeiou'


print(list(filter(vowel, 'Aardvark')))

import itertools
print(list(itertools.filterfalse(vowel, 'Aardvark')))
print(list(itertools.dropwhile(vowel, 'Aardvark')))
print(list(itertools.takewhile(vowel, 'Aardvark')))
print(list(itertools.compress('Aardvark', (1, 0, 1, 1, 0, 1))))
print(list(itertools.islice('Aardvark', 4)))
print(list(itertools.islice('Aardvark', 4, 7)))
print(list(itertools.islice('Aardvark', 1, 7, 2)))
Exemple #54
0
def export_e_emus(session, settings, nodes, emus_file):
    fieldnames = [
        "personUUID",
        "entryDate",
        "leaveDate",
        "cpr",
        "firstName",
        "lastName",
        "workPhone",
        "workContract",
        "workContractText",
        "positionId",
        "position",
        "orgUnit",
        "email",
        "username",
    ]
    manager_rows = []
    engagement_rows = []

    settings["username-itsystem-uuid"] = (session.query(
        ItSystem.uuid).filter(ItSystem.navn == "Active Directory").scalar())

    for node in tqdm(PreOrderIter(nodes["root"]),
                     total=len(nodes),
                     desc="export e"):
        ou = node.unit

        # normal engagements - original export
        engagements = (session.query(Engagement).filter(
            Engagement.enhed_uuid == ou.uuid).all())
        engagements = filterfalse(hourly_paid, engagements)
        engagements = filterfalse(partial(discarded, settings), engagements)
        for engagement in engagements:
            logger.info("adding engagement %s", engagement.uuid)
            engagement_rows.append(
                build_engagement_row(session, settings, ou, engagement))
            engagement_count(nodes, ou)

        # manager engagements - above mentioned musskema adaptation
        for manager in session.query(Leder).filter(
                Leder.enhed_uuid == ou.uuid).all():
            if manager.bruger_uuid is None:
                logger.info("skipping vacant manager %s", manager.uuid)
                continue  # vacant manager
            else:
                # extend, as there can be zero or one
                manager_rows.extend(
                    build_manager_rows(session, settings, ou, manager))

    if not len(manager_rows):
        logger.error("no managers found - did You forget to"
                     " specify correct EMUS_RESPONSIBILITY_CLASS")
    rows = engagement_rows + manager_rows
    logger.info(
        "writing %d engagement rows and %d manager rows to file",
        len(engagement_rows),
        len(manager_rows),
    )

    last_changed = datetime.datetime.now().strftime("%Y-%m-%d")
    for r in rows:
        eng_uuid = r.get("engagementUUID") or r["personUUID"]
        emus_file.write(
            '<employee id="%s" uuid="%s" client="%s" lastChanged="%s">\n' % (
                r["employee_id"],
                eng_uuid,
                r["client"],
                last_changed,
            ))
        for fn in fieldnames:
            emus_file.write("<%s>%s</%s>\n" % (fn, escape(r.get(fn, "")), fn))
        emus_file.write("</employee>\n")
Exemple #55
0
from itertools import chain
print(list(chain([1, 2, 3], [4, 5], [6, 7])))

from itertools import filterfalse
print(
    list(
        filterfalse(lambda x: len(x) < 5,
                    ['hello', 'i', 'write', 'cool', 'code'])))


def count_digits(num):
    x = str(num)
    res = 0
    for i in x:
        res += int(i)
    return res


print(count_digits(126))


def count_words(list_of_words):
    res = 0
    for i in range(len(list_of_words)):
        sign = True
        for j in range(i + 1, len(list_of_words)):
            if list_of_words[i] == list_of_words[j]:
                sign = False
                break
        if sign:
            res += 1
Exemple #56
0
    def filter_candidates(self, context):
        for source in self._current_sources:
            ctx = source.context
            ctx['matchers'] = context['matchers']
            ctx['input'] = context['input']
            if context['smartcase']:
                ctx['ignorecase'] = re.search(r'[A-Z]', ctx['input']) is None
            ctx['mode'] = context['mode']
            ctx['async_timeout'] = 0.03 if ctx['mode'] != 'insert' else 0.02
            if ctx['prev_input'] != ctx['input']:
                ctx['prev_time'] = time.time()
                if ctx['is_interactive']:
                    ctx['event'] = 'interactive'
                    ctx['all_candidates'] = self._gather_source_candidates(
                        ctx, source)
            ctx['prev_input'] = ctx['input']
            entire = ctx['all_candidates']
            if ctx['is_async']:
                ctx['event'] = 'async'
                entire += self._gather_source_candidates(ctx, source)
            if len(entire) > 20000 and (time.time() - ctx['prev_time'] <
                                        int(context['skiptime']) / 1000.0):
                ctx['is_skipped'] = True
                yield self._get_source_status(
                    ctx, source, entire, []), [], []
                continue
            if not entire:
                yield self._get_source_status(
                    ctx, source, entire, []), [], []
                continue

            ctx['is_skipped'] = False
            partial = []
            ctx['candidates'] = entire
            for i in range(0, len(entire), 1000):
                ctx['candidates'] = entire[i:i+1000]
                matchers = [self._filters[x] for x in
                            (ctx['matchers'].split(',') if ctx['matchers']
                             else source.matchers)
                            if x in self._filters]
                self.match_candidates(ctx, matchers)
                partial += ctx['candidates']
                if len(partial) >= source.max_candidates:
                    break
            ctx['candidates'] = partial
            for f in [self._filters[x]
                      for x in source.sorters + source.converters
                      if x in self._filters]:
                ctx['candidates'] = f.filter(ctx)
            partial = ctx['candidates'][: source.max_candidates]
            for c in partial:
                c['source_name'] = source.name
                c['source_index'] = source.index
            ctx['candidates'] = []

            patterns = filterfalse(lambda x: x == '', (
                self._filters[x].convert_pattern(context['input'])
                for x in source.matchers if self._filters[x]))

            yield self._get_source_status(
                ctx, source, entire, partial), partial, patterns
Exemple #57
0
 def incomplete_files(self):
     return list(chain(*(job.output
                         for job in
                         filter(self.workflow.persistence.incomplete,
                                filterfalse(self.needrun, self.jobs)))))
Exemple #58
0
def main(args):
    """Start device identification based on command line parameters."""
    if sys.version_info[0] < 3:
        raise Exception("This tool must be run using Python 3!")
    parser = argparse.ArgumentParser(
        description=
        "Tool for classifying IoT devices based on captured network traffic")
    parser = argparser_add_verbose(parser)
    parser.add_argument(
        "pcap",
        help=("Packet capture file (in PCAP or PCAPNG format) "
              "with recorded traffic for device identification"),
    )
    parser.add_argument(
        "--min",
        default=3,
        help="minimum number of packets to classify device "
        "(devices with smaller number will not be classified) (default: 3)",
    )
    parser.add_argument(
        "--max",
        default=1000,
        help=
        "maximum number of packets used to classify device (default: 1000)",
    )
    parser.add_argument("--ip", "-I", help="use IP filter to identify device")
    parser.add_argument(
        "-S",
        "--short",
        dest="short_result",
        help="display only short result of classification",
        action="store_true",
    )

    options = parser.parse_args(args)
    test_name = "device identification"

    if options.verbose:
        print("options: {}".format(options))
    else:
        os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
        logging.getLogger("tensorflow").setLevel(logging.FATAL)

    packets = load_packets(options.pcap)

    if options.pcap and not options.ip:
        list_ips = ips_from_pcap(packets)
    else:
        list_ips = prepare_ips(options.ip)

    list_ips = list(filterfalse(lambda x: x.endswith(".0"), list_ips))
    list_ips = list(filterfalse(lambda x: x.endswith(".1"), list_ips))
    list_ips = list(filterfalse(lambda x: x.endswith(".255"), list_ips))
    list_ips = list(filterfalse(lambda x: x.startswith("224.0."), list_ips))
    list_ips = list(filterfalse(lambda x: x.startswith("232."), list_ips))
    list_ips = list(filterfalse(lambda x: x.startswith("233."), list_ips))
    list_ips = list(filterfalse(lambda x: x.startswith("234."), list_ips))
    list_ips = list(filterfalse(lambda x: x.startswith("239."), list_ips))
    list_ips = list(filterfalse(lambda x: x.startswith("ff0"), list_ips))

    if options.verbose:
        print("list_ips: {}".format(list_ips))

    print(f"[.] Started {test_name}")
    try:
        for test_ip in list_ips:
            print(prepare_separator("-"))
            print(f"[.] Started classification for IP: {test_ip}")
            start_time = time.time()
            classify_device(
                packets,
                test_ip,
                show_probability=not options.short_result,
                min_packets=int(options.min),
                max_packets=int(options.max),
            )
            classify_time = time.time() - start_time
            print(f"[.] Classification time: {classify_time:.2f} sec")
        print(prepare_separator("="))
        print(f"[.] Finished {test_name} (for all IPs)")
    except KeyboardInterrupt:
        print("\nExiting...")
    finally:
        pass
def diff_states(a, b):
    '''Diff 2 arbitrary conftool states'''

    return list(map(str, filterfalse(lambda x: x in b, a)))
Exemple #60
0
def curves_iter(curves: Iterable[CurveOrRange]
) -> Iterator[Curve]:
    return cast(Iterator[Curve], 
        filterfalse(lambda expr: expr[1] is None, curves))