예제 #1
0
def repeatfunc(func, times=None, *args):
    u"""Repeat calls to func with specified arguments.

        Example:  repeatfunc(random.random)"""
    if times is None:
        return starmap(func, repeat(args))
    return starmap(func, repeat(args, times))
예제 #2
0
def config_main(args):
    i = get_series_importer(args)

    i.reset(*args.clear)

    if args.pattern:
        i.pattern = re.compile(args.pattern)

    if args.exclude:
        i.exclude = re.compile(args.exclude)

    if args.specials:
        if args.append:
            i.specials.extend(itertools.starmap(Special.parse, args.specials))
        else:
            i.specials = list(itertools.starmap(Special.parse, args.specials))

    for opt in ("auto_specials",):
        if getattr(args, opt) is not None:
            i.options[opt] = getattr(args, opt)

    if not args.dry_run:
        i.save()

    if not args.no_update:
        run_update(i, args)

    return 0
예제 #3
0
	def __init__(self, parent, control_surface, skin, grid_resolution, parent_task_group, settings = DEFAULT_INSTRUMENT_SETTINGS, *a, **k):
		super(MonoDrumpadComponent, self).__init__(*a, **k)
		self._settings = settings
		self._parent = parent
		self._control_surface = control_surface
		self._skin = skin
		self._grid_resolution = grid_resolution

		self._drum_offset_component = self.register_component(self._offset_settings_component_class(attribute_tag = 'drum_offset', name = 'DrumPadOffset', parent_task_group = parent_task_group, value_dict = range(28), default_value_index = self._settings['DefaultDrumOffset'], default_channel = 0, bank_increment = 4, on_color = 'MonoInstrument.OffsetOnValue', off_color = 'MonoInstrument.OffsetOffValue'))
		self._drum_offset_value.subject = self._drum_offset_component
		self.set_offset_shift_toggle = self._drum_offset_component.shift_toggle.set_control_element

		self._drumgroup = MonoDrumGroupComponent(translation_channel = 3, set_pad_translations = self._control_surface.set_pad_translations, channel_list = self._settings['Channels'], settings = self._settings)
		self._drumpad_position_value.subject = self._drumgroup
		self.set_drumpad_matrix = self._drumgroup.set_matrix
		self.set_drumpad_select_matrix = self._drumgroup.set_select_matrix

		drum_clip_creator = ClipCreator()
		drum_note_editor = MonoNoteEditorComponent(clip_creator=drum_clip_creator, grid_resolution=grid_resolution)
		self._step_sequencer = MonoStepSeqComponent(clip_creator=drum_clip_creator, skin=skin, grid_resolution=grid_resolution, name='Drum_Sequencer', note_editor_component=drum_note_editor, instrument_component=self._drumgroup)
		self._step_sequencer._playhead_component._notes=tuple(chain(*starmap(range, ((64, 68), (56, 60), (48, 52), (40, 44)))))
		self._step_sequencer._playhead_component._triplet_notes=tuple(chain(*starmap(range, ((64, 67), (56, 59), (48, 51), (40, 43)))))
		self._step_sequencer._playhead_component._feedback_channels = [15]
		self._step_sequencer._note_editor._visible_steps_model = lambda indices: filter(lambda k: k % 4 != 3, indices)
		self.set_sequencer_matrix = self._step_sequencer.set_button_matrix
		self.set_playhead = self._step_sequencer.set_playhead
		self.set_loop_selector_matrix = self._step_sequencer.set_loop_selector_matrix 
		self.set_quantization_buttons = self._step_sequencer.set_quantization_buttons
		self.set_follow_button = self._step_sequencer.set_follow_button
		self.set_follow_button = self._step_sequencer.set_follow_button
		self.set_mute_button = self._step_sequencer.set_mute_button
		self.set_solo_button = self._step_sequencer.set_solo_button
		self.register_component(self._step_sequencer)

		self.set_split_matrix = self._parent._selected_session.set_clip_launch_buttons
예제 #4
0
def runUpdateDaemon(restartTimerFunc):
    updateDao = dao.dao()
    for userID in daemonMap.keys():
        if not userID in onlineFriends:
            onlineFriends[userID] = set([])
        
        friendListUpdate = set([])
        friendListOffline = set([])
        fullFriendList = updateDao.getFriends(userID)
        for (_, friendID) in fullFriendList:
            if friendID in onlineClients:
                friendListUpdate.add(friendID)
            else:
                friendListOffline.add(friendID)
        # Build lists (actually sets) of friends who went offline or online
        # Offline: Friend exists in old list but not in new one
        # Online: Friend exists in new list but not in old one
        offlinelist = onlineFriends[userID] & friendListOffline
        onlinelist = friendListUpdate - (onlineFriends[userID] & friendListUpdate)
        
        # Creating the actual data to send -- this uses some craziness with mapping functions
        # Basically, the first line does the offline list, and the second does the online one
        payload = ''.join(starmap(upFormat.pack, list(izip_longest(offlinelist, '', fillvalue=0))))
        payload = payload + ''.join(starmap(upFormat.pack, list(izip_longest(onlinelist, '', fillvalue=1))))
        
        onlineFriends[userID] = onlinelist
        sock = daemonMap[userID]
        sock.send(padToSize(pktFormat.pack(0, r_status_update, len(payload)) + payload, BUFSIZE))
    del updateDao
    restartTimerFunc()
예제 #5
0
    def get_selection(self, view):
        """
        Return the selected row and column indices of the selection in view.
        """
        selmodel = view.selectionModel()

        selection = selmodel.selection()
        model = view.model()
        # map through the proxies into input table.
        while isinstance(model, QAbstractProxyModel):
            selection = model.mapSelectionToSource(selection)
            model = model.sourceModel()

        assert isinstance(selmodel, BlockSelectionModel)
        assert isinstance(model, TableModel)

        row_spans, col_spans = selection_blocks(selection)
        rows = list(itertools.chain.from_iterable(itertools.starmap(range, row_spans)))
        cols = list(itertools.chain.from_iterable(itertools.starmap(range, col_spans)))
        rows = numpy.array(rows, dtype=numpy.intp)
        # map the rows through the applied sorting (if any)
        rows = model.mapToSourceRows(rows)
        rows.sort()
        rows = rows.tolist()
        return rows, cols
예제 #6
0
파일: astar.py 프로젝트: RONNCC/Sandbox
	def func(city1, city2):
		q = [[0,list(itertools.starmap(lambda x,y:math.sqrt(sum(itertools.imap(lambda x1,x2: (x1-x2)**2, xy[x],xy[y]))),[(city1,city2)]))[0],city1]]
		while(True):
			popped = q.pop(0)
			if popped[-1]==city2: print "found", "dist:",popped[0];break
			[[q.append([popped[0]+list(itertools.starmap(lambda x,y:math.sqrt(sum(itertools.imap(lambda x1,x2: (x1-x2)**2, xy[x],xy[y]))),[(path[-1],n)]))[0],list(itertools.starmap(lambda x,y:math.sqrt(sum(itertools.imap(lambda x1,x2: (x1-x2)**2, xy[x],xy[y]))),[(n,city2)]))[0]] + list(itertools.chain(popped[2:],[n]))) for n in adj[popped[2:][-1]]] for path in [popped[2:]]]
			q = sorted(q,key=lambda l:l[0]+l[1])
예제 #7
0
파일: _default.py 프로젝트: dhuth/pylang
def _create(*args, **kwargs):
    rules = list(itertools.starmap(grammar.EBnfRule, _meta_grammar_rules))
    token_defs = list(itertools.starmap(grammar.ETokenDef, _meta_grammar_tokens))
    p = parser.TextParser._create_text_parser(grammar.TextBnfGrammar(
            token_defs, _meta_grammar_ignore, rules,
            '<grammar>', _meta_grammar_err, _meta_grammar_eof, _meta_grammar_newline), _meta_grammar_env)
    return p
예제 #8
0
파일: types.py 프로젝트: flypy/pykit
def compare_recursive(rec1, rec2, mapping, t1, t2):
    """Structural comparison of recursive types"""
    cmp = partial(compare_recursive, rec1, rec2, mapping)

    sub1 = subterms(t1)
    sub2 = subterms(t2)

    if id(t1) in rec1:
        if id(t1) in mapping:
            return mapping[id(t1)] == id(t2)

        mapping[id(t1)] = id(t2)

    if bool(sub1) ^ bool(sub2) or type(t1) != type(t2):
        return False
    elif not sub1:
        return t1 == t2 # Unit types
    elif t1.is_struct:
        return (t1.names == t2.names and
                all(starmap(cmp, zip(t1.types, t2.types))))
    elif t1.is_function:
        return (t1.varargs == t2.varargs and
                cmp(t1.restype, t2.restype) and
                all(starmap(cmp, zip(t1.argtypes, t2.argtypes))))
    elif t1.is_vector or t1.is_array:
        return t1.count == t2.count and cmp(t1.base, t2.base)
    elif t1.is_pointer:
        return cmp(t1.base, t2.base)
def main():

    tests = [
        # Same data
        (["validata/zero", "validata/zero_2", 1, 1e-10], True),

        # Different data
        (["validata/zero", "validata/one", 1, 1e-10], False),

        # Different data, just outside numerical zero
        (["validata/zero", "validata/small", 1, 0.9e-8], False),

        # Different data, just inside numerical zero
        (["validata/zero", "validata/small", 1, 1.1e-8], True),
             ]

    # Run the tests
    passes = list(it.starmap(pytest, tests)) + list(it.starmap(shelltest, tests))

    # Print information on passes
    print("If all of the following are True then the test passed:")
    print(passes)
    print("also you can just check the exit status with 'echo $?'")

    # Check that they passed
    if all(passes):
        return 0
    else:
        return 1
예제 #10
0
파일: packaging.py 프로젝트: Dynavisor/pbr
def get_requirements_files():
    files = os.environ.get("PBR_REQUIREMENTS_FILES")
    if files:
        return tuple(f.strip() for f in files.split(','))
    # Returns a list composed of:
    # - REQUIREMENTS_FILES with -py2 or -py3 in the name
    #   (e.g. requirements-py3.txt)
    # - REQUIREMENTS_FILES with -{platform.system} in the name
    #   (e.g. requirements-windows.txt)
    # - REQUIREMENTS_FILES with both Python version and platform's
    #   system in the name
    #   (e.g. requirements-freebsd-py2.txt)
    # - REQUIREMENTS_FILES
    pyversion = sys.version_info[0]
    system = platform.system().lower()
    parts = list(map(os.path.splitext, REQUIREMENTS_FILES))

    version_cb = functools.partial("{1}-py{0}{2}".format, pyversion)
    platform_cb = functools.partial("{1}-{0}{2}".format, system)
    both_cb = functools.partial("{2}-{0}-py{1}{3}".format, system, pyversion)

    return list(itertools.chain(
        itertools.starmap(both_cb, parts),
        itertools.starmap(platform_cb, parts),
        itertools.starmap(version_cb, parts),
        REQUIREMENTS_FILES,
    ))
예제 #11
0
 def test_GcovLine_mrege_lines(self):
     '''
        56:   14:        while((index < pivot_index) && (list[index] >= pivot_value)) {
         6:   15:            swap(list, index, pivot_index);
         6:   16:            pivot_index--;
         -:   17:        }
     And
         78:   14:        while((index < pivot_index) && (list[index] >= pivot_value)) {
         18:   15:            swap(list, index, pivot_index);
         18:   16:            pivot_index--;
         -:   17:        }
     '''
     lines_proc_one = list(itertools.starmap(gcov.GcovLine,[ 
             (14, {'proc_one':   56},'        while((index < pivot_index) && (list[index] >= pivot_value)) {'),
             (15, {'proc_one':    6},'            swap(list, index, pivot_index);'),
             (16, {'proc_one':    6},'            pivot_index--;'),
             (17, {'proc_one': None},'        }')]))
     lines_proc_two = list(itertools.starmap(gcov.GcovLine,[
             (14, {'proc_two':   78},'        while((index < pivot_index) && (list[index] >= pivot_value)) {'),
             (15, {'proc_two':   18},'            swap(list, index, pivot_index);'),
             (16, {'proc_two':   18},'            pivot_index--;'),
             (17, {'proc_two': None},'        }')]))
     gcov.GcovLine.merge_lines(lines_proc_one, lines_proc_two)
     self.assertEqual(lines_proc_one[0].lineno, 14)
     self.assertEqual(lines_proc_one[1].lineno, 15)
     self.assertEqual(lines_proc_one[2].lineno, 16)
     self.assertEqual(lines_proc_one[3].lineno, 17)
예제 #12
0
def Enum(*enums, **other_enums):
    """
    Creates an enum-like type that sets attributes with corresponding 0-indexed integer
    values coming from positional arguments that are converted to uppercase. For example::

        >>> e = Enum('foo', 'bar')
        >>> e.FOO
        0
        >>> e.BAR
        1

    If keyword arguments are passed, the effect is the same, however the keyword value
    will represent the value of the enum attribute, rather than a 0-indexed integer.
    For example::

        >>> e = Enum(foo='bar', baz='qux')
        >>> e.FOO
        'bar'
        >>> e.BAZ
        'qux'
    """
    # Handle args that should be numeric. Swap enumerate idx and value for dict comprehension later
    numerical_items = itertools.starmap(lambda i, v: (str(v).upper(), i), enumerate(enums))

    # Handle keyword arguments
    keyword_items = itertools.starmap(lambda k, v: (str(k).upper(), v), other_enums.iteritems())

    # Chain all items
    all_items = itertools.chain(numerical_items, keyword_items)

    return type('Enum', (), dict(x for x in all_items))
예제 #13
0
def repeatfunc(func, times=None, *args):
    """Call *func* with *args* repeatedly, returning an iterable over the
    results.

    If *times* is specified, the iterable will terminate after that many
    repetitions:

        >>> from operator import add
        >>> times = 4
        >>> args = 3, 5
        >>> list(repeatfunc(add, times, *args))
        [8, 8, 8, 8]

    If *times* is ``None`` the iterable will not terminate:

        >>> from random import randrange
        >>> times = None
        >>> args = 1, 11
        >>> take(6, repeatfunc(randrange, times, *args))  # doctest:+SKIP
        [2, 4, 8, 1, 8, 4]

    """
    if times is None:
        return starmap(func, repeat(args))
    return starmap(func, repeat(args, times))
예제 #14
0
    def get_aggregates(self, form_data):

        def extract_columns(name, stats):
            percent = make_percent_function(stats['total'])
            return (
                name,
                stats['total'],
                stats[models.AnswerVote.UPVOTE],
                percent(stats[models.AnswerVote.UPVOTE]),
                stats[models.AnswerVote.DOWNVOTE],
                percent(stats[models.AnswerVote.DOWNVOTE]),
                stats[models.AnswerVote.FINAL_CHOICE],
                percent(stats[models.AnswerVote.FINAL_CHOICE]),
            )

        assignment = form_data['assignment']
        question = form_data['question']
        filters = {}
        if assignment is not None:
            filters['assignment'] = assignment
        if question is not None:
            filters['answer__question'] = question
        votes_qs = models.AnswerVote.objects.filter(**filters)
        username_data, country_data = aggregate_fake_attribution_data(votes_qs)
        username_data_table = list(itertools.starmap(
            extract_columns, sorted(username_data.iteritems())
        ))
        country_data_table = list(itertools.starmap(
            extract_columns, sorted(country_data.iteritems())
        ))
        return username_data_table, country_data_table
예제 #15
0
파일: iterables.py 프로젝트: melodi-lab/SGM
def repeatfunc(func, times = None, *args):
    """Create iterable that repeats a function.

    Examples:

    >>> list(repeatfunc(lambda: 1, 0))
    []

    >>> list(repeatfunc(lambda: 1, 10))
    [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]

    >>> list(islice(repeatfunc(lambda: 1), 10))
    [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]

    >>> list(repeatfunc(lambda r: r*r, 3, 10))
    [100, 100, 100]

    Args:
        func: function to repeat.
        times: number of times to repeat (None = infinite iterable)
        args: arguments to func

    """
    if times is None:
        return starmap(func, repeat(args))
    return starmap(func, repeat(args, times))
예제 #16
0
        def _include_file(path):
            if not os.path.exists(path):
                return invalid(path, "File doesn't exist")

            return not any(chain(
                starmap(partial(ignored, path), filename.should_ignore(path)),
                starmap(partial(invalid, path), filename.is_invalid(path))))
def main(field):
    client = MongoClient()
    db = client['github']
    ranks = map(lambda x: x[0],
                db['influences'].find_one({'field': field})['ranks'][:10])

    github = map(
        lambda x: Github(x['login'], x['passwd'], timeout=3600), users)[0]

    def count_followers(user):
        return github.get_user(user).followers

    def count_stars(user):
        html = lxml.html.fromstring(urllib2.urlopen(
            'https://github.com/' + user).read())

        results = html.xpath(
            '//strong[@class="vcard-stat-count"]')

        strings = results[1].text.split('k')
        if len(strings) > 1:
            number = int(float(strings[0]) * 1000)
        else:
            number = int(strings[0])

        return number

    def count_public_repos(user):
        return github.get_user(user).public_repos

    followers_numbers = map(count_followers, ranks)
    stars_numbers = map(count_stars, ranks)
    public_repos_numbers = map(count_public_repos, ranks)
    products_sqrts = map(math.sqrt, list(starmap(
        operator.mul, zip(followers_numbers, list(starmap(
            operator.add, zip(stars_numbers, public_repos_numbers)))))))

    fig, ax = plt.subplots()

    fig.set_size_inches(20, 10)

    index = np.arange(len(ranks))
    bar_width = 0.2

    plt.bar(index, followers_numbers, bar_width, color='r', label='Followers')
    plt.bar(index + bar_width, stars_numbers, bar_width, color='g',
            label='Starred')
    plt.bar(index + 2 * bar_width, public_repos_numbers, bar_width, color='b',
            label='Repos')
    plt.bar(index + 3 * bar_width, products_sqrts, bar_width, color='y',
            label='Product')

    plt.xlabel('Developer')
    plt.ylabel('Number')
    plt.title("Developer's Followers, Starred and Repos")
    plt.xticks(index + 3 * bar_width, ranks)
    plt.legend()
    plt.tight_layout()

    plt.savefig('images/' + field + '-histogram.png')
def get_specs(yaml_data, ignore_build_strings=False):
    """
    Return a dict of { name : VersionSpec } for the packages listed in the given yaml data.
    We support two types of yaml docs:
    - meta.yaml (we look at requirements/run)
    - environment export files (we look at dependencies)
    """
    if 'requirements' in yaml_data:
        # meta.yaml format
        if 'run' not in yaml_data['requirements']:
            raise Exception("Package metadata does not contain 'run' requirements")
        specs = map(str.split, yaml_data['requirements']['run'])

    elif 'dependencies' in yaml_data:
        # environment export format
        specs = map( lambda s: s.split('='),
                     yaml_data['dependencies'] )
    else:
        raise Exception("Unsupported yaml format.")

    spec_list = starmap( lambda name, version='', string='': VersionSpec(name, version, string),
                         specs )

    if ignore_build_strings:
        spec_list = starmap( lambda name, version, string: VersionSpec(name, version, ''),
                             spec_list )

    spec_dict = { s.name : s for s in spec_list }
    return spec_dict
예제 #19
0
파일: problem_091.py 프로젝트: yred/euler
def solution():
    Point = namedtuple('Point', ['x', 'y'])

    # The origin
    O = Point(x=0, y=0)

    # Min and Max for x and y
    mx = my = 0
    Mx = My = 50

    rtriangles = 0

    for P in starmap(Point, product(range(mx, Mx+1), range(my, My+1))):
        if P == O:
            continue

        for Q in starmap(Point, product(range(mx, P.x+1), range(my, My+1))):
            # Skip duplicate and invalid point sets
            if (P.x == Q.x and P.y <= Q.y) or Q == O:
                continue

            if is_triangle(O, P, Q) and is_right(O, P, Q):
                rtriangles += 1

    return rtriangles
예제 #20
0
파일: combine.py 프로젝트: cqql/hhu-cs
def main(maskpath, apath, bpath):
    apath = os.path.realpath(apath)
    bpath = os.path.realpath(bpath)
    (root, ext) = os.path.splitext(apath)
    (broot, _) = os.path.splitext(bpath)
    bname = os.path.basename(broot)

    # Read images
    mask = cv2.imread(maskpath)
    a = cv2.imread(apath)
    b = cv2.imread(bpath)

    # Create direct combination
    cmb = combine(mask, a, b)

    # Construct the pyramids
    masks = downscalepyr(mask)
    agausspyr = gausspyr(a)
    bgausspyr = gausspyr(b)
    alappyr = lappyr(agausspyr)
    blappyr = lappyr(bgausspyr)

    # Combine the pyramids
    cgausspyr = list(itertools.starmap(combine, zip(masks, agausspyr,
                                                    bgausspyr)))
    clappyr = list(itertools.starmap(combine, zip(masks, alappyr, blappyr)))

    # Recreate the original from the combined pyramids
    c = ilappyr(cgausspyr, clappyr)

    cv2.imwrite("{}_{}{}".format(root, bname, ext), cmb)
    cv2.imwrite("{}_{}_laplacian{}".format(root, bname, ext), c)
예제 #21
0
 def test_consecutive(self):
     list0 = range(0, 100)
     diff = lambda a, b: b - a
     list1 = list(starmap(diff, consecutive(list0)))
     list2 = list(starmap(diff, consecutive(list0, step=2)))
     self.assertListEqual(list1, [1] * 99)
     self.assertListEqual(list2, [2] * 49)
예제 #22
0
 def normalize_data(self, w_totals, lsize):
     n, m, t = w_totals * lsize, w_totals * lsize * lsize, (1 / lsize)
     (
         self.sx,
         self.sy,
         self.sz,
         self.sxvar,
         self.syvar,
         self.szvar,
         self.sxyvar,
         self.sxzvar,
         self.syzvar,
     ) = starmap(
         op.itruediv,
         zip(
             (self.sx, self.sy, self.sz, self.sxvar, self.syvar, self.szvar, self.sxyvar, self.sxzvar, self.syzvar),
             (n, n, n, m, m, m, m, m, m),
         ),
     )
     (self.sxvar, self.syvar, self.szvar) = starmap(op.iadd, zip((self.sxvar, self.syvar, self.szvar), (t, t, t)))
     (self.sxvar, self.syvar, self.szvar, self.sxyvar, self.sxzvar, self.syzvar) = starmap(
         op.isub,
         zip(
             (self.sxvar, self.syvar, self.szvar, self.sxyvar, self.sxzvar, self.syzvar),
             (
                 (self.sx) ** 2,
                 (self.sy) ** 2,
                 (self.sz) ** 2,
                 (self.sx * self.sy),
                 (self.sx * self.sz),
                 (self.sy * self.sz),
             ),
         ),
     )
def approximate(xs, ys, power):
    assert len(xs) == len(ys)

    matrix_size = power + 1
    variables = 2 * power + 1

    xs = map(float, xs)
    ys = map(float, ys)

    xs = reduce(
        lambda x, y: x + [list(starmap(operator.mul, izip(x[-1], y)))], repeat(xs, variables - 1), [[1] * len(xs)]
    )
    assert len(xs) == variables

    s = map(sum, xs)
    assert s[0] == len(ys)

    b = [sum(starmap(operator.mul, izip(ys, x))) for x in xs[:matrix_size]]
    a = [s[i : i + matrix_size] for i in xrange(matrix_size)]

    # So, we have a*x = b and we are looking for x
    matr = [ai + [bi] for ai, bi in izip(a, b)]

    def unify_row(i, j):
        matr[i] = [cell / matr[i][j] for cell in matr[i]]
        assert matr[i][j] == 1

    def subtract_row(i, j, row):
        assert matr[i][j] == 1

        matr[row] = [matr[row][k] - matr[i][k] * matr[row][j] for k in xrange(len(matr[i]))]

        assert matr[row][j] == 0

    # NOTE: Example for matrix_size = 3
    #    unify_row(0, 0)
    #    subtract_row(0, 0, 1)
    #    subtract_row(0, 0, 2)
    #    unify_row(1, 1)
    #    subtract_row(1, 1, 2)
    #    unify_row(2, 2)
    #    subtract_row(2, 2, 1)
    #    subtract_row(2, 2, 0)
    #    subtract_row(1, 1, 0)

    for i in xrange(matrix_size):
        unify_row(i, i)
        for j in xrange(matrix_size - i - 1):
            subtract_row(i, i, i + j + 1)

    for i in xrange(matrix_size):
        for j in xrange(matrix_size - i - 1):
            subtract_row(matrix_size - i - 1, matrix_size - i - 1, j)

    assert all(matr[i][:matrix_size] == ([0] * i) + [1] + ([0] * (matrix_size - 1 - i)) for i in xrange(matrix_size))

    ret = map(operator.itemgetter(matrix_size), matr)

    return ret
예제 #24
0
파일: iters.py 프로젝트: mgill25/fn.py
def repeatfunc(func, times=None, *args):
    """Repeat calls to func with specified arguments.
    Example:  repeatfunc(random.random)

    http://docs.python.org/3.4/library/itertools.html#itertools-recipes
    """
    if times is None:
        return starmap(func, repeat(args))
    return starmap(func, repeat(args, times))
예제 #25
0
파일: index.py 프로젝트: wtpayne/hiai
def write(indices, dirpath_idxfiles):
    """
    Persist supplied index data to disk.

    Build a mapping from an identifier string to
    the locations (file/row/col) where that
    identifier string occurs.

    data[idclass][idstr][filepath] -> (iline, icol)

    """
    (line_index, references_index, objects_index) = indices

    # Check for duplicate identifier numbers or descriptions.
    for idclass, classdata in line_index.items():
        set_nums  = set()
        set_descs = set()
        for idstr in sorted(classdata.keys()):
            (idnum, description) = idstr.split('_', maxsplit = 1)
            if idnum in set_nums:
                raise RuntimeError(
                    'Duplicate identifier num: "{idnum}" in "{idstr}"'.format(
                                                        idnum = idnum,
                                                        idstr = idstr))
            if description in set_descs:
                raise RuntimeError(
                    'Duplicate description: "{desc}" in "{idstr}"'.format(
                                                        desc  = description,
                                                        idstr = idstr))

            set_nums.add(idnum)
            set_descs.add(description)

    # Persist all indices to disk.
    da.util.ensure_dir_exists(dirpath_idxfiles)
    for idclass, classdata in line_index.items():
        filename  = '{idclass}.line_index.jseq'.format(idclass = idclass)
        da.util.write_jseq(os.path.join(dirpath_idxfiles, filename),
                           itertools.starmap(lambda k, v: {k: v},
                                             sorted(classdata.items())))

    if references_index is not None:
        for idclass, classdata in references_index.items():
            filename = '{idclass}.references_index.jseq'.format(
                                                            idclass = idclass)
            da.util.write_jseq(os.path.join(dirpath_idxfiles, filename),
                               itertools.starmap(lambda k, v: {k: v},
                                                 sorted(classdata.items())))
    if objects_index is not None:
        for idclass, classdata in objects_index.items():
            filename = '{idclass}.objects_index.jseq'.format(
                                                            idclass = idclass)
            da.util.write_jseq(os.path.join(dirpath_idxfiles, filename),
                               itertools.starmap(lambda k, v: {k: v},
                                                 sorted(classdata.items())))

    return True
예제 #26
0
def repeatfunc(func, times=None, *args):
    """Repeat calls to func with specified arguments.

    Example:  repeatfunc(random.random)
    """
    from itertools import starmap, repeat
    if times is None:
        return starmap(func, repeat(args))
    return starmap(func, repeat(args, times))
예제 #27
0
 def store(self, data):
     """ Store data to the medium """
     get_bits = lambda items, n, fillvalue: its.izip_longest(*[iter(items)] * n, fillvalue=fillvalue)
     biterator = lambda d: ((ord(ch) >> shift) & 1 for ch, shift in its.product(d, range(7, -1, -1)))
     magic = lambda pixel, bits: tuple(its.starmap(lambda cpt, bit: cpt & ~1 | bit, zip(pixel, bits)))
     img = Image.open("%sng" % self.medium)
     bits = get_bits(biterator('%s%s' % (struct.pack(">i", len(data)), data)), len(img.getbands()), 0)
     img.putdata(list(its.starmap(magic, its.izip(img.getdata(), bits))))
     img.save("%sng" % self.medium)
예제 #28
0
def run(input='network.txt', makedot=False):
    lines = map(lambda s: s.rstrip().split(','), \
                open(input).readlines())

    def weight(i, j):
        t = lines[i][j]
        w = t != '-' and int(t) or 0
        return w

    # Represent the vertices with integers 0-39
    V = range(len(lines))

    # The "i > j" condition keeps us from using duplicate edges and the
    # "weight(i, j) > 0" lets us use only edges that exist in the graph.
    # (The weight function above returns 0 for "-" entries in the file.)
    E = filter(lambda (i, j): i > j and weight(i, j) > 0, product(V, V))

    # Run Kruskal's algorthim to find an minimum spanning tree.
    # Define an elementary cluster C(v) <- {v}
    C = dict((v, set([v])) for v in V)

    # Initialize a priority queue Q to contain all edges in G, using the
    # weights as keys.
    Q = map(lambda (u, v): (weight(u, v), u, v), E)
    heapify(Q)

    # Define a tree T <- {}
    # T will ultimately contain the edges of the MST
    T = set()

    # n is the total number of vertices
    n = len(V)

    while len(T) < n-1:
        # Edge (u, v) is the minimum weighted route from u to v
        w, u, v = heappop(Q)

        # Prevent cycles in T. add (u, v) only if T does not already contain
        # a path between u and v.
        if C[v] != C[u]:
            # Add edge (u, v) to T.
            T.add((u, v))
            # Merge C[u] and C[v]/
            C[u].update(C[v])
            for x in C[u]:
                C[x] = C[u]

    # T now describes a miniumum spanning tree on G.
    # Produce a DOT language file to visualize T.
    if makedot:
        dot(V, T, weight)

    # Print the savings, as to answer problem #107.
    Tweight = sum(starmap(weight, T))
    Gweight = sum(starmap(weight, E))
    print Gweight - Tweight
예제 #29
0
파일: mapping.py 프로젝트: lcrees/twoq
    def times(self, n=None):
        """
        repeat call with incoming things `n` times

        @param n: repeat call n times on incoming things (default: None)
        """
        with self._context():
            if n is None:
                return self._xtend(starmap(self._call, repeat(list(self._iterable))))
            return self._xtend(starmap(self._call, repeat(list(self._iterable), n)))
예제 #30
0
	def from_dict(cls, d):
		from itertools import starmap
		dim = tuple(d.pop('dim'))
		if len(dim) != 2: raise ValueError
		if not all(isinstance(x,int) for x in dim): raise ValueError
		if not all(x>0 for x in dim): raise ValueError

		vacancies = starmap(Vacancy, d.pop('vacancies'))
		trefoils  = starmap(Trefoil, d.pop('trefoils'))
		return cls.from_entity_lists(dim, vacancies, trefoils)
예제 #31
0
def form_blob_(seg_, root_fork):
    """
    Form blobs from given list of segments.
    Each blob is formed from a number of connected segments.
    """

    # Determine params type:
    if 'M' not in seg_[0]:  # No M.
        Dert_keys = (*aDERT_PARAMS[:2], *aDERT_PARAMS[3:], "S", "Ly")
    else:
        Dert_keys = (*aDERT_PARAMS, "S", "Ly") if nI != 1 \
            else (*gDERT_PARAMS, "S", "Ly")

    # Form blob:
    blob_ = []
    for blob_seg_ in cluster_segments(seg_):
        # Compute boundary box in batch:
        y0, yn, x0, xn = starmap(
            lambda func, x_: func(x_),
            zip(
                (min, max, min, max),
                zip(*[
                    (
                        seg['y0'],  # y0_ .
                        seg['y0'] + seg['Ly'],  # yn_ .
                        seg['x0'],  # x0_ .
                        seg['xn'],  # xn_ .
                    ) for seg in blob_seg_
                ]),
            ),
        )

        # Compute mask:
        mask = np.ones((yn - y0, xn - x0), dtype=bool)
        for blob_seg in blob_seg_:
            for y, P in enumerate(blob_seg['Py_'], start=blob_seg['y0']):
                x_start = P['x0'] - x0
                x_stop = x_start + P['L']
                mask[y - y0, x_start:x_stop] = False

        dert__ = root_fork['dert__'][:, y0:yn, x0:xn]
        dert__.mask[:] = mask

        blob = dict(
            Dert=dict(
                zip(
                    Dert_keys,
                    [
                        *map(sum,
                             zip(*map(op.itemgetter(*Dert_keys), blob_seg_)))
                    ],
                )),
            box=(y0, yn, x0, xn),
            seg_=blob_seg_,
            sign=blob_seg_[0].pop('sign'),  # Pop the remaining segment's sign.
            dert__=dert__,
            root_fork=root_fork,
            fork_=defaultdict(list),
        )
        blob_.append(blob)

        # feedback(blob)

    return blob_
예제 #32
0
def get_console_script_specs(console):
    # type: (Dict[str, str]) -> List[str]
    """
    Given the mapping from entrypoint name to callable, return the relevant
    console script specs.
    """
    # Don't mutate caller's version
    console = console.copy()

    scripts_to_generate = []

    # Special case pip and setuptools to generate versioned wrappers
    #
    # The issue is that some projects (specifically, pip and setuptools) use
    # code in setup.py to create "versioned" entry points - pip2.7 on Python
    # 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
    # the wheel metadata at build time, and so if the wheel is installed with
    # a *different* version of Python the entry points will be wrong. The
    # correct fix for this is to enhance the metadata to be able to describe
    # such versioned entry points, but that won't happen till Metadata 2.0 is
    # available.
    # In the meantime, projects using versioned entry points will either have
    # incorrect versioned entry points, or they will not be able to distribute
    # "universal" wheels (i.e., they will need a wheel per Python version).
    #
    # Because setuptools and pip are bundled with _ensurepip and virtualenv,
    # we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
    # override the versioned entry points in the wheel and generate the
    # correct ones. This code is purely a short-term measure until Metadata 2.0
    # is available.
    #
    # To add the level of hack in this section of code, in order to support
    # ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
    # variable which will control which version scripts get installed.
    #
    # ENSUREPIP_OPTIONS=altinstall
    #   - Only pipX.Y and easy_install-X.Y will be generated and installed
    # ENSUREPIP_OPTIONS=install
    #   - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
    #     that this option is technically if ENSUREPIP_OPTIONS is set and is
    #     not altinstall
    # DEFAULT
    #   - The default behavior is to install pip, pipX, pipX.Y, easy_install
    #     and easy_install-X.Y.
    pip_script = console.pop('pip', None)
    if pip_script:
        if "ENSUREPIP_OPTIONS" not in os.environ:
            scripts_to_generate.append('pip = ' + pip_script)

        if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
            scripts_to_generate.append('pip{} = {}'.format(
                sys.version_info[0], pip_script))

        scripts_to_generate.append(
            f'pip{get_major_minor_version()} = {pip_script}')
        # Delete any other versioned pip entry points
        pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
        for k in pip_ep:
            del console[k]
    easy_install_script = console.pop('easy_install', None)
    if easy_install_script:
        if "ENSUREPIP_OPTIONS" not in os.environ:
            scripts_to_generate.append('easy_install = ' + easy_install_script)

        scripts_to_generate.append('easy_install-{} = {}'.format(
            get_major_minor_version(), easy_install_script))
        # Delete any other versioned easy_install entry points
        easy_install_ep = [
            k for k in console if re.match(r'easy_install(-\d\.\d)?$', k)
        ]
        for k in easy_install_ep:
            del console[k]

    # Generate the console entry points specified in the wheel
    scripts_to_generate.extend(starmap('{} = {}'.format, console.items()))

    return scripts_to_generate
예제 #33
0
 def _map_no_thread(method, list_of_args):
     return list(itertools.starmap(method, list_of_args))
예제 #34
0
You are given a function `f(X) = X^2` and `K` lists, where the `i^th` list
contains `N_i` elements. Pick one element from each list so that the value 
for `S = (f(X_1) + f(X_2) + ... + f(X_K)) % M` is maximized.
"""
from itertools import product
from itertools import starmap


def f(X):
    return X * X


def compute(*L):
    res = 0
    for i in L:
        res += f(i)

    return (res)


if __name__ == '__main__':
    K, M = map(int, input().rsplit())
    N = list()

    for _ in range(K):
        N.append(list(map(int, input().rsplit()))[1:])

    res = list(product(*N))

    print(max([i % M for i in starmap(compute, res)]))
예제 #35
0
from nyt import NYT
from multiprocessing import Pool
import itertools
import time


def apithread(yr, mo):
    data = nytapi.get_archive(yr, mo)
    print('Processing mo {}, {}.'.format(mo, yr))
    time.sleep(10)
    return data


if __name__ == '__main__':
    workers = None
    nytapi = NYT()
    years = range(1852, 2017)
    months = range(1, 12 + 1)
    yrmo = list(itertools.product(years, months))

    if workers is not None:
        with Pool(workers) as p:
            data = p.starmap(nytapi.get_archive, yrmo)
    else:
        data = itertools.starmap(apithread, yrmo)

    print(list(data))
예제 #36
0
def map2x(func, *iterables):
    """map() function for Python 2/3 compatability"""
    zipped = zip_longest(*iterables)
    if func is None:
        return zipped
    return starmap(func, zipped)
예제 #37
0
 def _get_simplification(spell_name):
     spellout = module_info.get(spell_name, {})
     iter_simplifications = filter(lambda item: item[0].isdigit(),
                                   spellout.items())
     yield from starmap(lambda k, v: {re.sub(r'\W+', '', v): k},
                        iter_simplifications)
예제 #38
0
 def gini(arr):
     arr = sorted(arr, reverse=True)
     dividend = sum(starmap(mul, izip(arr, xrange(1, 2 * len(arr), 2))))
     divisor = len(arr) * sum(arr)
     return float(dividend) / divisor
예제 #39
0
def groups(iterable, n, step):
    """Make groups of 'n' elements from the iterable advancing
    'step' elements on each iteration"""
    itlist = tee(iterable, n)
    onestepit = izip(*(starmap(drop, enumerate(itlist))))
    return take_every(step, onestepit)
예제 #40
0
파일: align.py 프로젝트: u3005466/psi4
def _plausible_atom_orderings(ref,
                              current,
                              rgeom,
                              cgeom,
                              algo='hunguno',
                              verbose=1,
                              uno_cutoff=1.e-3):
    """

    Parameters
    ----------
    ref : list
        Hashes encoding distinguishable non-coord characteristics of reference
        molecule. Namely, atomic symbol, mass, basis sets?.
    current : list
        Hashes encoding distinguishable non-coord characteristics of

    Returns
    -------
    iterator of tuples

    """
    if sorted(ref) != sorted(current):
        raise ValidationError(
            """ref and current can't map to each other.\n""" + 'R:  ' +
            str(ref) + '\nC:  ' + str(current))

    where = collections.defaultdict(list)
    for iuq, uq in enumerate(ref):
        where[uq].append(iuq)

    cwhere = collections.defaultdict(list)
    for iuq, uq in enumerate(current):
        cwhere[uq].append(iuq)

    connect = collections.OrderedDict()
    for k in where:
        connect[tuple(where[k])] = tuple(cwhere[k])

    def filter_permutative(rgp, cgp):
        """Original atom ordering generator for like subset of atoms (e.g., all carbons).
        Relies on permutation. Filtering depends on similarity of structure (see `atol` parameter).
        Only suitable for total system size up to about 20 atoms.

        """
        if verbose >= 1:
            print("""Space:     {} <--> {}""".format(rgp, cgp))
        bnbn = [
            rrdistmat[first, second] for first, second in zip(rgp, rgp[1:])
        ]
        for pm in itertools.permutations(cgp):
            cncn = [
                ccdistmat[first, second] for first, second in zip(pm, pm[1:])
            ]
            if np.allclose(bnbn, cncn, atol=0.5):
                if verbose >= 1:
                    print('Candidate:', rgp, '<--', pm)
                yield pm

    def filter_hungarian(rgp, cgp):
        if verbose >= 1:
            print("""Space:     {} <--> {}""".format(rgp, cgp))
        submatCR = crdistmat[np.ix_(
            cgp, rgp)]  # this one gets manipulated by hungarian call
        submatCRcopy = np.copy(submatCR)
        lapCR = hungarian.lap(submatCR)
        ptsCR = list(zip(lapCR[1], range(len(rgp))))
        sumCR = sum(submatCRcopy[lapCR[1], range(len(rgp))])

        subans = lapCR[1]
        npcgp = np.array(cgp)
        ans = tuple(npcgp[np.array(subans)])
        if verbose >= 1:
            print('Best Candidate ({:6.3}):'.format(sumCR), rgp, '<--', ans,
                  '     from', cgp, subans)
        yield ans

    def filter_hungarian_uno(rgp, cgp):
        """Hungarian algorithm on cost matrix based off headless (all Z same w/i space anyways) NRE.
        Having found _a_ solution and the reduced cost matrix, this still isn't likely to produce
        atom rearrangement fit for Kabsch b/c internal coordinate cost matrix doesn't nail down
        distance-equivalent atoms with different Cartesian coordinates like Cartesian-distance-matrix
        cost matrix does. So, form a bipartite graph from all essentially-zero connections between
        ref and concern and run Uno algorithm to enumerate them.

        """
        if verbose >= 1:
            print("""Space:     {} <--> {}""".format(rgp, cgp))

        # formulate cost matrix from internal (not Cartesian) layouts of R & C
        npcgp = np.array(cgp)
        submatCC = ccnremat[np.ix_(cgp, cgp)]
        submatRR = rrnremat[np.ix_(rgp, rgp)]
        sumCC = 100. * np.sum(
            submatCC,
            axis=0)  # cost mat small if not scaled, this way like Z=Neon
        sumRR = 100. * np.sum(submatRR, axis=0)
        cost = np.zeros((len(cgp), len(rgp)))
        for j in range(cost.shape[1]):
            for i in range(cost.shape[0]):
                cost[i, j] = (sumCC[i] - sumRR[j])**2
        if verbose >= 2:
            print('Cost:\n', cost)
        costcopy = np.copy(
            cost)  # other one gets manipulated by hungarian call

        # find _a_ best match btwn R & C atoms through Kuhn-Munkres (Hungarian) algorithm
        t00 = time.time()
        # exactly like `scipy.optimize.linear_sum_assignment(cost)` only with extra return
        #    import scipy.optimize
        #    raise ImportError("""Python module scipy >=0.17 not found. Solve by installing it: `conda install scipy` or `pip install scipy`""")
        (row_ind,
         col_ind), reducedcost = linear_sum_assignment(cost, return_cost=True)
        ptsCR = list(zip(row_ind, col_ind))
        ptsCR = sorted(ptsCR, key=lambda tup: tup[1])
        sumCR = costcopy[row_ind, col_ind].sum()
        t01 = time.time()
        if verbose >= 2:
            print('Reduced cost:\n', cost)
        if verbose >= 1:
            print('Hungarian time [s] for space:         {:.3}'.format(t01 -
                                                                       t00))

        # final _all_ best matches btwn R & C atoms through Uno algorithm, seeded from Hungarian sol'n
        edges = np.argwhere(reducedcost < uno_cutoff)
        gooduns = uno(edges, ptsCR)
        t02 = time.time()
        if verbose >= 1:
            print('Uno time [s] for space:               {:.3}'.format(t02 -
                                                                       t01))

        for gu in gooduns:
            gu2 = gu[:]
            gu2.sort(key=lambda x: x[1]
                     )  # resorts match into (r, c) = (info, range)
            subans = [p[0] for p in gu2]  # compacted to subans/lap format

            ans = tuple(npcgp[np.array(subans)])
            if verbose >= 3:
                print('Best Candidate ({:6.3}):'.format(sumCR), rgp, '<--',
                      ans, '     from', cgp, subans)
            yield ans

    if algo == 'perm':
        ccdistmat = qcel.util.distance_matrix(cgeom, cgeom)
        rrdistmat = qcel.util.distance_matrix(rgeom, rgeom)
        algofn = filter_permutative

    if algo == 'hung':
        crdistmat = qcel.util.distance_matrix(cgeom, rgeom)
        algofn = filter_hungarian

    if algo == 'hunguno':
        ccdistmat = qcel.util.distance_matrix(cgeom, cgeom)
        rrdistmat = qcel.util.distance_matrix(rgeom, rgeom)
        # TODO investigate soundness
        with np.errstate(divide='ignore'):
            ccnremat = np.reciprocal(ccdistmat)
            rrnremat = np.reciprocal(rrdistmat)
        ccnremat[ccnremat == np.inf] = 0.
        rrnremat[rrnremat == np.inf] = 0.
        algofn = filter_hungarian_uno
        from .util.scipy_hungarian import linear_sum_assignment
        from .util.gph_uno_bipartite import uno

    # collect candidate atom orderings from algofn for each of the atom classes,
    #   recombine the classes with each other in every permutation (could maybe
    #   add Hungarian here, too) as generator back to permutation_kabsch
    for cpmut in itertools.product(
            *itertools.starmap(algofn, connect.items())):
        atpat = [None] * len(ref)
        for igp, group in enumerate(cpmut):
            for iidx, idx in enumerate(list(connect.keys())[igp]):
                atpat[idx] = group[iidx]
        yield atpat
예제 #41
0
    def prepare_gkp(self, state, epsilon, ampl_cutoff, representation="real", shape="square"):
        r"""Prepares the arrays of weights, means and covs for a finite energy GKP state.

        GKP states are qubits, with the qubit state defined by:

        :math:`\ket{\psi}_{gkp} = \cos\frac{\theta}{2}\ket{0}_{gkp} + e^{-i\phi}\sin\frac{\theta}{2}\ket{1}_{gkp}`

        where the computational basis states are :math:`\ket{\mu}_{gkp} = \sum_{n} \ket{(2n+\mu)\sqrt{\pi\hbar}}_{q}`.

        Args:
            state (list): ``[theta,phi]`` for qubit definition above
            epsilon (float): finite energy parameter of the state
            ampl_cutoff (float): this determines how many terms to keep
            representation (str): ``'real'`` or ``'complex'`` reprsentation
            shape (str): shape of the lattice; default 'square'

        Returns:
            tuple: arrays of the weights, means and covariances for the state

        Raises:
            NotImplementedError: if the complex representation or a non-square lattice is attempted
        """

        if representation == "complex":
            raise NotImplementedError("The complex description of GKP is not implemented")

        if shape != "square":
            raise NotImplementedError("Only square GKP are implemented for now")

        theta, phi = state[0], state[1]

        def coeff(peak_loc):
            """Returns the value of the weight for a given peak.

            Args:
                peak_loc (array): location of the ideal peak in phase space

            Returns:
                float: weight of the peak
            """
            l, m = peak_loc[:, 0], peak_loc[:, 1]
            t = np.zeros(peak_loc.shape[0], dtype=complex)
            t += np.logical_and(l % 2 == 0, m % 2 == 0)
            t += np.logical_and(l % 4 == 0, m % 2 == 1) * (
                np.cos(0.5 * theta) ** 2 - np.sin(0.5 * theta) ** 2
            )
            t += np.logical_and(l % 4 == 2, m % 2 == 1) * (
                np.sin(0.5 * theta) ** 2 - np.cos(0.5 * theta) ** 2
            )
            t += np.logical_and(l % 4 % 2 == 1, m % 4 == 0) * np.sin(theta) * np.cos(phi)
            t -= np.logical_and(l % 4 % 2 == 1, m % 4 == 2) * np.sin(theta) * np.cos(phi)
            t -= (
                np.logical_or(
                    np.logical_and(l % 4 == 3, m % 4 == 3),
                    np.logical_and(l % 4 == 1, m % 4 == 1),
                )
                * np.sin(theta)
                * np.sin(phi)
            )
            t += (
                np.logical_or(
                    np.logical_and(l % 4 == 3, m % 4 == 1),
                    np.logical_and(l % 4 == 1, m % 4 == 3),
                )
                * np.sin(theta)
                * np.sin(phi)
            )
            prefactor = np.exp(
                -np.pi
                * 0.25
                * (l ** 2 + m ** 2)
                * (1 - np.exp(-2 * epsilon))
                / (1 + np.exp(-2 * epsilon))
            )
            weight = t * prefactor
            return weight

        # Set the max peak value
        z_max = int(
            np.ceil(
                np.sqrt(
                    -4
                    / np.pi
                    * np.log(ampl_cutoff)
                    * (1 + np.exp(-2 * epsilon))
                    / (1 - np.exp(-2 * epsilon))
                )
            )
        )
        damping = 2 * np.exp(-epsilon) / (1 + np.exp(-2 * epsilon))

        # Create set of means before finite energy effects
        means_gen = it.tee(
            it.starmap(lambda l, m: l + 1j * m, it.product(range(-z_max, z_max + 1), repeat=2)),
            2,
        )
        means = np.concatenate(
            (
                np.reshape(
                    np.fromiter(means_gen[0], complex, count=(2 * z_max + 1) ** 2), (-1, 1)
                ).real,
                np.reshape(
                    np.fromiter(means_gen[1], complex, count=(2 * z_max + 1) ** 2), (-1, 1)
                ).imag,
            ),
            axis=1,
        )

        # Calculate the weights for each peak
        weights = coeff(means)
        filt = abs(weights) > ampl_cutoff
        weights = weights[filt]

        weights /= np.sum(weights)
        # Apply finite energy effect to means
        means = means[filt]

        means *= 0.5 * damping * np.sqrt(np.pi * self.circuit.hbar)
        # Covariances all the same
        covs = (
            0.5
            * self.circuit.hbar
            * (1 - np.exp(-2 * epsilon))
            / (1 + np.exp(-2 * epsilon))
            * np.identity(2)
        )
        covs = np.repeat(covs[None, :], weights.size, axis=0)

        return weights, means, covs
예제 #42
0
 def scan_egg_links(self, search_path):
     dirs = filter(os.path.isdir, search_path)
     egg_links = ((path, entry) for path in dirs
                  for entry in os.listdir(path)
                  if entry.endswith('.egg-link'))
     list(itertools.starmap(self.scan_egg_link, egg_links))
예제 #43
0
                                            # -> 27 [{'name': 'Lisa', 'age': 27}]
                                            # -> 28 [{'name': 'Claire', 'age': 28}]

groupby(["hi", "nice", "hello", "cool"], key=lambda x: "i" in x)    # e.g -> will return every word which have "i" in group True
                                                                    # True ['hi', 'nice']
                                                                    # False ['hello', 'cool']

### Islice
###
from itertools import islice
islice(range(10), 2, 2, 5)      #Return an iterable with all items from a given iterable between '2' with step '2' and ending in '5' -> [2, 4]

### Starmap
###
from itertools import starmap
starmap(pow, [(2,5), (3,2), (10,3)])    #Return an iterable that computes results of a given function with organized parameters in tuples with the same number of argumens
                                        # -> 32, 9, 1000

### Takewhile
###
from itertools import takewhile
takewhile(lambda x: x<5, [1,4,6,4,1])   #Return items from iterator while statement is True -> [1,4]

### Tee
###
from itertools import tee
tee('abcdef', 4)                        #Return '4' iterators with same items of given iterable

### Ziplongest
###
from itertools import zip_longest
예제 #44
0
 def __init__(self, data, row_index, unique=False):
     node_keys = map(tuple, data)
     self._nodes = SortedList(starmap(Node, zip(node_keys, row_index)))
     self._unique = unique
예제 #45
0
def compositeIsp(*IspThrustPair):
    z = zip(*starmap(lambda Isp, th: (th, th / Isp), IspThrustPair))
    th, ff = map(sum, z)
    return th / ff
예제 #46
0
파일: ANARC05B.py 프로젝트: pohmelie/spoj
from itertools import starmap


def yoba(x, z):
    zi, s = 0, 0
    for n in x:
        if zi < len(z) and n == z[zi]:
            yield s
            yield z[zi]
            zi, s = zi + 1, 0
        else:
            s = s + n
    yield s

while True:
    x = tuple(map(int, input().split()[1:]))
    if x == ():
        break
    y = tuple(map(int, input().split()[1:]))
    z = tuple(sorted(set(x) & set(y)))
    print(sum(starmap(max, zip(yoba(x, z), yoba(y, z)))))
예제 #47
0
 def test_t01(self):
     self.assertListEqual(
         list(itertools.starmap(booleanify_, self.iterable)),
         [("defaultalbums", "Artist, The", "1", "1", True, False, "A", "B"),
          ("defaultalbums", "Artist, The", "1", "2", "A", "B", "C", "D")])
예제 #48
0
    def _load_data(data_dir: Path, script_dir: Path, **_: Any) -> None:
        """Load test data into an Impala backend instance.

        Parameters
        ----------
        data_dir
            Location of testdata
        script_dir
            Location of scripts defining schemas
        """
        fsspec = pytest.importorskip("fsspec")

        # without setting the pool size
        # connections are dropped from the urllib3
        # connection pool when the number of workers exceeds this value.
        # this doesn't appear to be configurable through fsspec
        URLLIB_DEFAULT_POOL_SIZE = 10

        env = IbisTestEnv()
        con = ibis.impala.connect(
            host=env.impala_host,
            port=env.impala_port,
            hdfs_client=fsspec.filesystem(
                env.hdfs_protocol,
                host=env.nn_host,
                port=env.hdfs_port,
                user=env.hdfs_user,
            ),
            pool_size=URLLIB_DEFAULT_POOL_SIZE,
        )

        fs = fsspec.filesystem("file")

        data_files = {
            data_file
            for data_file in fs.find(data_dir)
            # ignore sqlite databases and markdown files
            if not data_file.endswith((".db", ".md"))
            # ignore files in the test data .git directory
            if (
                # ignore .git
                os.path.relpath(data_file, data_dir).split(os.sep, 1)[0]
                != ".git"
            )
        }

        executor = concurrent.futures.ThreadPoolExecutor(
            max_workers=int(
                os.environ.get(
                    "IBIS_DATA_MAX_WORKERS",
                    URLLIB_DEFAULT_POOL_SIZE,
                )
            )
        )

        hdfs = con.hdfs
        tasks = {
            # make the database
            executor.submit(impala_create_test_database, con, env),
            # build and upload UDFs
            *itertools.starmap(
                executor.submit,
                impala_build_and_upload_udfs(hdfs, env, fs=fs),
            ),
            # upload data files
            *(
                executor.submit(
                    hdfs_make_dir_and_put_file,
                    hdfs,
                    data_file,
                    os.path.join(
                        env.test_data_dir,
                        os.path.relpath(data_file, data_dir),
                    ),
                )
                for data_file in data_files
            ),
        }

        for future in concurrent.futures.as_completed(tasks):
            future.result()

        # create the tables and compute stats
        for future in concurrent.futures.as_completed(
            executor.submit(table_future.result().compute_stats)
            for table_future in concurrent.futures.as_completed(
                impala_create_tables(con, env, executor=executor)
            )
        ):
            future.result()
예제 #49
0
def main():
    # Import vocabulary data
    with open("20newsgroups/vocabulary.txt", "r") as f:
        vocab_data = f.readlines()
        vocabulary = []
        word_ID = 1
        for word in vocab_data:
            vocabulary.append((word.strip(), word_ID))
            word_ID += 1

    #Import train label
    with open("20newsgroups/train_label.csv") as csvfile:
        readCSV = csv.reader(csvfile, delimiter=",")
        y_train = []
        document_ID = 1
        for item in readCSV:
            y_train.append((item[0], document_ID))
            document_ID += 1

    # train label in form of dictionary
    dictYtrain = {}
    for a, b in y_train:
        dictYtrain.setdefault(str(b), a)

    # Import test label
    with open("20newsgroups/test_label.csv") as csvfile:
        readCSV = csv.reader(csvfile, delimiter=",")
        y_test = []
        document_ID = 1
        for item in readCSV:
            y_test.append((item[0], document_ID))
            document_ID += 1

    # test label in form of dictionary
    dictYtest = {}
    for a, b in y_test:
        dictYtest.setdefault(str(b), a)

    # Import train data
    with open("20newsgroups/train_data.csv") as csvfile:
        readCSV = csv.reader(csvfile, delimiter=",")
        X_train = []
        for item in readCSV:
            X_train.append((item[0], item[1], item[2]))

    # Import test data
    with open("20newsgroups/test_data.csv") as csvfile:
        readCSV = csv.reader(csvfile, delimiter=",")
        X_test = []
        for item in readCSV:
            X_test.append((item[0], item[1], item[2]))

    # list of all word IDs
    listOfWords = []
    for item in vocabulary:
        word_ID = item[1]
        listOfWords.append(str(word_ID))

    # list of all categories
    listOfCategories = []
    for i in range(1, 21):
        listOfCategories.append(str(i))

    # total number of words in vocabulary
    number_of_words_vocabulary = len(vocabulary)

    # total number of training documents
    number_of_train_documents = len(y_train)

    # dictionary: key = category, values: number of documents
    dictCategoriesDocuments = dictOfCategories()
    dictCategoriesDocumentsTest = dictOfCategoriesTest()

    # dictionary: key=document ID, value=number of words
    dictDocumentsWords = dict_of_documents()

    # dictionary: key=category, value=number of words
    dictCategoryWords = wordsPerCategory(dictDocumentsWords)

    # dictionary, key=category, values=doc IDs
    dictCategoryDocumentIDs = dictOfDocsPerCat()

    # list of dictionaries, where list at 0 is cat 1, list at 1 is cat 2 etc., dictionary, key=wordID, number of occurences
    parallel = True
    iteritems = list(zip(range(1, 21), 20 * [dictCategoryDocumentIDs]))
    num_cpus = multiprocessing.cpu_count()
    if parallel:
        print('\nMultitheaded computations are using', num_cpus, 'cpus')
        with Pool(num_cpus) as p:
            listWordIDNumberOfOccurences = list(
                p.starmap(numberOfTimesWordK_inCatC, iteritems))
    else:
        listWordIDNumberOfOccurences = list(
            itertools.starmap(numberOfTimesWordK_inCatC, iteritems))

    # dictionary, key: document ID, values: word IDs
    dictDocumentIDWordIDs = dictOfDocIDs_WordIDs()

    # dictionary, key: document ID, values: word IDs
    dictDocumentIDWordIDsTest = dictOfDocIDs_WordIDsTest()

    listOfDocumentIDs = set()
    for i in X_train:
        doc_ID = str(i[0])
        if doc_ID not in listOfDocumentIDs:
            listOfDocumentIDs.add(doc_ID)

    listOfDocumentIDsTest = set()
    for i in X_test:
        doc_ID = str(i[0])
        if doc_ID not in listOfDocumentIDsTest:
            listOfDocumentIDsTest.add(doc_ID)
    #print(listOfDocumentIDsTest)

    #Output class priors for 20 categories
    storedPriors = {}
    print("\nClass priors:")
    for i in range(1, 21):
        storedPriors[i] = classPrior(str(i), dictCategoriesDocuments)
        print("P(Omega = ", i, ")", classPrior(str(i),
                                               dictCategoriesDocuments))

    listPosteriors = [dict() for x in range(20)]
    for category in listOfCategories:
        for word in listOfWords:
            (word_ID, cat, P_MLE) = MLE(word, category, dictCategoryWords,
                                        listWordIDNumberOfOccurences)
            dcat = listPosteriors[int(category) - 1]
            if word_ID not in dcat:
                dcat[word_ID] = P_MLE
            else:
                dcat[word_ID] += P_MLE

    listPosteriorsBE = [dict() for x in range(20)]
    for category in listOfCategories:
        for word in listOfWords:
            (word_ID, cat, P_BE) = BE(word, category, dictCategoryWords,
                                      listWordIDNumberOfOccurences)
            dcat = listPosteriorsBE[int(category) - 1]
            if word_ID not in dcat:
                dcat[word_ID] = P_BE
            else:
                dcat[word_ID] += P_BE


############################################################################################
# MLE classification (Testing Data)

    def gen_OmegaNB_MLE_args(listOfDocumentIDs, listOfCategories,
                             dictCategoriesDocuments, listOfWords,
                             dictCategoryWords, listWordIDNumberOfOccurences,
                             dictDocumentIDWordIDs):
        for doc_ID in listOfDocumentIDs:
            yield (doc_ID, listOfCategories, dictCategoriesDocuments,
                   listOfWords, dictCategoryWords,
                   listWordIDNumberOfOccurences, dictDocumentIDWordIDs)

    #return(trainClassification)
    with Pool(NUM_CPUS) as p:
        classifications = p.starmap(
            OmegaNB_MLE,
            gen_OmegaNB_MLE_args(listOfDocumentIDs,
                                 listOfCategories, dictCategoriesDocuments,
                                 set(listOfWords), dictCategoryWords,
                                 listWordIDNumberOfOccurences,
                                 dictDocumentIDWordIDs))

    trainClassification = {
        k: v
        for (k, v) in zip(listOfDocumentIDs, classifications)
    }

    # Accuracy
    numberOfCorrectlyClassifiedDocsTrainingData = 0
    dictCategoryAccuracy = {}
    listActualPredicted = []
    for key in trainClassification:
        actualValue = int(dictYtrain.get(key))
        predictedValue = trainClassification.get(key)
        if actualValue == predictedValue:
            numberOfCorrectlyClassifiedDocsTrainingData += 1
            if actualValue not in dictCategoryAccuracy:
                dictCategoryAccuracy[actualValue] = 1
            else:
                dictCategoryAccuracy[actualValue] += 1

        else:
            numberOfCorrectlyClassifiedDocsTrainingData += 0
        listActualPredicted.append((actualValue, predictedValue))

    # MLE Train Results
    print("\nOverall accuracy for MLE (Training)= ",
          numberOfCorrectlyClassifiedDocsTrainingData / len(listOfDocumentIDs))
    print("\nClass Accuracy for MLE (Training):")
    for i in listOfCategories:
        print("Group ", i, ":", (dictCategoryAccuracy.get(int(i), 0)) /
              (dictCategoriesDocuments.get(i)))
    print("\nMLE Confusion Matrix (Training):",
          confusionMatrix(listActualPredicted))

    ###########################################################################################
    # BE Classification (Testing data)
    def gen_OmegaNB_BE_args(listOfDocumentIDs, listOfCategories,
                            dictCategoriesDocuments, listOfWords,
                            dictCategoryWords, listWordIDNumberOfOccurences,
                            dictDocumentIDWordIDs):
        for doc_ID in listOfDocumentIDs:
            yield (doc_ID, listOfCategories, dictCategoriesDocuments,
                   listOfWords, dictCategoryWords,
                   listWordIDNumberOfOccurences, dictDocumentIDWordIDs)

    #return(trainClassification)
    with Pool(NUM_CPUS) as p:
        classificationsBE = p.starmap(
            OmegaNB_BE,
            gen_OmegaNB_BE_args(listOfDocumentIDs,
                                listOfCategories, dictCategoriesDocuments,
                                set(listOfWords), dictCategoryWords,
                                listWordIDNumberOfOccurences,
                                dictDocumentIDWordIDs))

    trainClassificationBE = {
        k: v
        for (k, v) in zip(listOfDocumentIDs, classificationsBE)
    }

    # Accuracy
    numberOfCorrectlyClassifiedDocsTrainingDataBE = 0
    dictCategoryAccuracyBE = {}
    listActualPredictedBE = []
    for key in trainClassificationBE:
        actualValue = int(dictYtrain.get(key))
        predictedValueBE = trainClassificationBE.get(key)
        if actualValue == predictedValueBE:
            numberOfCorrectlyClassifiedDocsTrainingDataBE += 1
            if actualValue not in dictCategoryAccuracyBE:
                dictCategoryAccuracyBE[actualValue] = 1
            else:
                dictCategoryAccuracyBE[actualValue] += 1
        else:
            numberOfCorrectlyClassifiedDocsTrainingDataBE += 0
        listActualPredictedBE.append((actualValue, predictedValueBE))

    # BE Train Results
    print(
        "\nOverall accuracy for BE (Training)= ",
        numberOfCorrectlyClassifiedDocsTrainingDataBE / len(listOfDocumentIDs))
    print("\nClass Accuracy for BE (Training):")
    for i in listOfCategories:
        print("Group ", i, ":", (dictCategoryAccuracyBE.get(int(i), 0)) /
              (dictCategoriesDocuments.get(i)))
    print("\nBE Confusion Matrix (Training):",
          confusionMatrix(listActualPredictedBE))

    ##################################################################################
    # MLE Classification (Testing data)
    def gen_Predict_MLE_args(listOfDocumentIDsTest, listOfCategories,
                             listOfWords, dictDocumentIDWordIDsTest,
                             storedPriors, listPosteriors):
        for doc_ID in listOfDocumentIDsTest:
            yield (doc_ID, listOfCategories, listOfWords,
                   dictDocumentIDWordIDsTest, storedPriors, listPosteriors)

    with Pool(NUM_CPUS) as p:
        classificationsTestMLE = p.starmap(
            Predict_MLE,
            gen_Predict_MLE_args(listOfDocumentIDsTest, listOfCategories,
                                 set(listOfWords), dictDocumentIDWordIDsTest,
                                 storedPriors, listPosteriors))

    #print("check1")
    testMLEClassification = {
        k: v
        for (k, v) in zip(listOfDocumentIDsTest, classificationsTestMLE)
    }
    #print("check2")
    #print(testMLEClassification)

    # Accuracy
    numberOfCorrectlyClassifiedDocsTestinggDataMLE = 0
    dictCategoryAccuracyTestMLE = {}
    listActualPredictedTestMLE = []
    for key in testMLEClassification:
        actualValueTest = int(dictYtest.get(key))
        predictedValueTestMLE = testMLEClassification.get(key)
        if actualValueTest == predictedValueTestMLE:
            numberOfCorrectlyClassifiedDocsTestinggDataMLE += 1
            if actualValueTest not in dictCategoryAccuracyTestMLE:
                dictCategoryAccuracyTestMLE[actualValueTest] = 1
            else:
                dictCategoryAccuracyTestMLE[actualValueTest] += 1

        else:
            numberOfCorrectlyClassifiedDocsTestinggDataMLE += 0
        listActualPredictedTestMLE.append(
            (actualValueTest, predictedValueTestMLE))

    # MLE Test Results
    print(
        "\nOverall accuracy for MLE (Testing Data) = ",
        numberOfCorrectlyClassifiedDocsTestinggDataMLE /
        len(listOfDocumentIDsTest))
    print("\nClass Accuracy for MLE (Testing Data):")
    for i in listOfCategories:
        print("Group ", i, ":", (dictCategoryAccuracyTestMLE.get(int(i), 0)) /
              (dictCategoriesDocumentsTest.get(i)))
    print("\nMLE Confusion Matrix (Testing Data):",
          confusionMatrix(listActualPredictedTestMLE))

    ##################################################################################
    # BE Classification (Testing data)

    def gen_Predict_BE_args(listOfDocumentIDsTest, listOfCategories,
                            listOfWords, dictDocumentIDWordIDsTest,
                            storedPriors, listPosteriorsBE):
        for doc_ID in listOfDocumentIDsTest:
            yield (doc_ID, listOfCategories, listOfWords,
                   dictDocumentIDWordIDsTest, storedPriors, listPosteriorsBE)

    with Pool(NUM_CPUS) as p:
        classificationsTestBE = p.starmap(
            Predict_BE,
            gen_Predict_MLE_args(listOfDocumentIDsTest, listOfCategories,
                                 set(listOfWords), dictDocumentIDWordIDsTest,
                                 storedPriors, listPosteriorsBE))

    testBEClassification = {
        k: v
        for (k, v) in zip(listOfDocumentIDsTest, classificationsTestBE)
    }

    # Accuracy
    numberOfCorrectlyClassifiedDocsTestinggDataBE = 0
    dictCategoryAccuracyTestBE = {}
    listActualPredictedTestBE = []
    for key in testBEClassification:
        actualValueTest = int(dictYtest.get(key))
        predictedValueTestBE = testBEClassification.get(key)
        if actualValueTest == predictedValueTestBE:
            numberOfCorrectlyClassifiedDocsTestinggDataBE += 1
            if actualValueTest not in dictCategoryAccuracyTestBE:
                dictCategoryAccuracyTestBE[actualValueTest] = 1
            else:
                dictCategoryAccuracyTestBE[actualValueTest] += 1
        else:
            numberOfCorrectlyClassifiedDocsTestinggDataBE += 0
        listActualPredictedTestBE.append(
            (actualValueTest, predictedValueTestBE))

    # MLE Test Results
    print(
        "\nOverall accuracy for BE (Testing Data) = ",
        numberOfCorrectlyClassifiedDocsTestinggDataBE /
        len(listOfDocumentIDsTest))
    print("\nClass Accuracy for BE (Testing Data):")
    for i in listOfCategories:
        print("Group ", i, ":", (dictCategoryAccuracyTestBE.get(int(i), 0)) /
              (dictCategoriesDocumentsTest.get(i)))
    print("\nBE Confusion Matrix (Testing Data):",
          confusionMatrix(listActualPredictedTestBE))
예제 #50
0
    inputs = [experiments_repo, linux_repo,],
    documentation = "kernel binary for v4.19.83"
)


if __name__ == "__main__":
    benchmarks = ['blackscholes', 'bodytrack', 'canneal', 'dedup','facesim', 'ferret', 'fluidanimate', 'freqmine', 'raytrace', 'streamcluster', 'swaptions', 'vips', 'x264']

    sizes = ['simsmall', 'simlarge', 'native']
    cpus = ['kvm', 'timing']

    def createRun(bench, size, cpu):
        if cpu == 'timing' and size != 'simsmall':
            return 
        return gem5Run.createFSRun(
            'parsec classic memory tests with gem5-20',    
            'gem5/build/X86/gem5.opt',
            'configs-parsec-tests/run_parsec.py',
            f'''results/run_parsec/{bench}/{size}/{cpu}''',
            gem5_binary, gem5_repo, experiments_repo,
            'linux-stable/vmlinux-4.19.83',
            'disk-image/parsec/parsec-image/parsec',
            linux_binary, disk_image,
            cpu, bench, size, '1',
            timeout = 24*60*60 #24 hours
            )
    # For the cross product of tests, create a run object.
    runs = starmap(createRun, product(benchmarks, sizes, cpus))
    # Run all of these experiments in parallel
    for run in runs:
        run_gem5_instance.apply_async((run, os.getcwd(),))
    except StopIteration:
        print('Stop Iteration executed')
    finally:
        pass


#example of repeat,
squares = list(map(pow, range(10), itertools.repeat(2)))
#Pow --> raises a number to a certain power.
#range --> iterator
#constant value.
print(squares)

#starmap --> takes arguments that are already paired together in tuples.

cubes = itertools.starmap(pow, [(0,2), (1,2), (2,2)])
print(list(cubes))


#Combination and Permutations Functions
#Combination --> order does not matter
#Permutation --> order does matter.
#but both of them don't repeat any of those values.

letters = ['a', 'b', 'c', 'd']
numbers = [0, 1, 2, 3]
names = ['Corey', 'Nicole']

result = itertools.combinations(letters, 2) #Basically combination.

for item in result:
def sum_two_hashed(hashed_a: List[int], hashed_b: List[int]) -> List[int]:
    mapping = zip(hashed_a, hashed_b)
    mapping = starmap(lambda a, b: (a + b) % 256, mapping)
    return list(mapping)
예제 #53
0
def records2array(records, types, native=False, silent=False):
    """Converts records into either a numpy.recarray or a nested array.array

    Args:
        records (Iter[dict]): Rows of data whose keys are the field names.
            E.g., output from any `meza.io` read function.

        types (Iter[dict]):

        native (bool): Return a native array (default: False).

        silent (bool): Suppress the warning message (default: False).

    Returns:
        numpy.recarray

    See also:
        `meza.convert.records2df`

    Examples:
        >>> records = [{'alpha': 'aa', 'beta': 2}, {'alpha': 'bee', 'beta': 3}]
        >>> types = [
        ...     {'id': 'alpha', 'type': 'text'}, {'id': 'beta', 'type': 'int'}]
        >>>
        >>> arr = records2array(records, types, silent=True)
        >>> u, i = 'u', 'i'
        >>> native_resp = [
        ...     [array(u, 'alpha'), array(u, 'beta')],
        ...     [array(u, 'aa'), array(u, 'bee')],
        ...     array(i, [2, 3])]
        >>>
        >>> if np:
        ...     arr.alpha.tolist() == ['aa', 'bee']
        ...     arr.beta.tolist() == [2, 3]
        ... else:
        ...     True
        ...     True
        True
        True
        >>> True if np else arr == native_resp
        True
        >>> records2array(records, types, native=True) == native_resp
        True
    """
    numpy = np and not native
    dialect = "numpy" if numpy else "array"
    zipped = [(ft.get_dtype(t1["type"], dialect), t1["id"]) for t1 in types]
    dtype, ids = list(zip(*zipped))

    if numpy:
        data = [tuple(r.get(id_) for id_ in ids) for r in records]
        ndtype = [tuple(z) for z in zip(ids, dtype)]
        ndarray = np.array(data, dtype=ndtype)
        converted = ndarray.view(np.recarray)
    else:
        if not (native or silent):
            msg = (
                "It looks like you don't have numpy installed. This function"
                " will return a native array instead.")

            logger.warning(msg)

        header = [array("u", t2["id"]) for t2 in types]
        data = zip_longest(*([r.get(i) for i in ids] for r in records))

        # array.array can't have nulls, so convert to an appropriate equivalent
        clean = lambda t, d: (x if x else ft.ARRAY_NULL_TYPE[t] for x in d)
        cleaned = it.starmap(clean, zip(dtype, data))

        values = [[array(d, x) for x in c] if d in {"c", "u"} else array(d, c)
                  for d, c in zip(dtype, cleaned)]

        converted = [header] + values

    return converted
예제 #54
0
def _install_wheel(
        name,  # type: str
        wheel_zip,  # type: ZipFile
        wheel_path,  # type: str
        scheme,  # type: Scheme
        pycompile=True,  # type: bool
        warn_script_location=True,  # type: bool
        direct_url=None,  # type: Optional[DirectUrl]
        requested=False,  # type: bool
):
    # type: (...) -> None
    """Install a wheel.

    :param name: Name of the project to install
    :param wheel_zip: open ZipFile for wheel being installed
    :param scheme: Distutils scheme dictating the install directories
    :param req_description: String used in place of the requirement, for
        logging
    :param pycompile: Whether to byte-compile installed Python files
    :param warn_script_location: Whether to check that scripts are installed
        into a directory on PATH
    :raises UnsupportedWheel:
        * when the directory holds an unpacked wheel with incompatible
          Wheel-Version
        * when the .dist-info dir does not match the wheel
    """
    info_dir, metadata = parse_wheel(wheel_zip, name)

    if wheel_root_is_purelib(metadata):
        lib_dir = scheme.purelib
    else:
        lib_dir = scheme.platlib

    # Record details of the files moved
    #   installed = files copied from the wheel to the destination
    #   changed = files changed while installing (scripts #! line typically)
    #   generated = files newly generated during the install (script wrappers)
    installed = {}  # type: Dict[RecordPath, RecordPath]
    changed = set()  # type: Set[RecordPath]
    generated = []  # type: List[str]

    def record_installed(srcfile, destfile, modified=False):
        # type: (RecordPath, str, bool) -> None
        """Map archive RECORD paths to installation RECORD paths."""
        newpath = _fs_to_record_path(destfile, lib_dir)
        installed[srcfile] = newpath
        if modified:
            changed.add(_fs_to_record_path(destfile))

    def all_paths():
        # type: () -> Iterable[RecordPath]
        names = wheel_zip.namelist()
        # If a flag is set, names may be unicode in Python 2. We convert to
        # text explicitly so these are valid for lookup in RECORD.
        decoded_names = map(ensure_text, names)
        for name in decoded_names:
            yield cast("RecordPath", name)

    def is_dir_path(path):
        # type: (RecordPath) -> bool
        return path.endswith("/")

    def assert_no_path_traversal(dest_dir_path, target_path):
        # type: (str, str) -> None
        if not is_within_directory(dest_dir_path, target_path):
            message = ("The wheel {!r} has a file {!r} trying to install"
                       " outside the target directory {!r}")
            raise InstallationError(
                message.format(wheel_path, target_path, dest_dir_path))

    def root_scheme_file_maker(zip_file, dest):
        # type: (ZipFile, str) -> Callable[[RecordPath], File]
        def make_root_scheme_file(record_path):
            # type: (RecordPath) -> File
            normed_path = os.path.normpath(record_path)
            dest_path = os.path.join(dest, normed_path)
            assert_no_path_traversal(dest, dest_path)
            return ZipBackedFile(record_path, dest_path, zip_file)

        return make_root_scheme_file

    def data_scheme_file_maker(zip_file, scheme):
        # type: (ZipFile, Scheme) -> Callable[[RecordPath], File]
        scheme_paths = {}
        for key in SCHEME_KEYS:
            encoded_key = ensure_text(key)
            scheme_paths[encoded_key] = ensure_text(
                getattr(scheme, key), encoding=sys.getfilesystemencoding())

        def make_data_scheme_file(record_path):
            # type: (RecordPath) -> File
            normed_path = os.path.normpath(record_path)
            try:
                _, scheme_key, dest_subpath = normed_path.split(os.path.sep, 2)
            except ValueError:
                message = (
                    "Unexpected file in {}: {!r}. .data directory contents"
                    " should be named like: '<scheme key>/<path>'.").format(
                        wheel_path, record_path)
                raise InstallationError(message)

            try:
                scheme_path = scheme_paths[scheme_key]
            except KeyError:
                valid_scheme_keys = ", ".join(sorted(scheme_paths))
                message = (
                    "Unknown scheme key used in {}: {} (for file {!r}). .data"
                    " directory contents should be in subdirectories named"
                    " with a valid scheme key ({})").format(
                        wheel_path, scheme_key, record_path, valid_scheme_keys)
                raise InstallationError(message)

            dest_path = os.path.join(scheme_path, dest_subpath)
            assert_no_path_traversal(scheme_path, dest_path)
            return ZipBackedFile(record_path, dest_path, zip_file)

        return make_data_scheme_file

    def is_data_scheme_path(path):
        # type: (RecordPath) -> bool
        return path.split("/", 1)[0].endswith(".data")

    paths = all_paths()
    file_paths = filterfalse(is_dir_path, paths)
    root_scheme_paths, data_scheme_paths = partition(is_data_scheme_path,
                                                     file_paths)

    make_root_scheme_file = root_scheme_file_maker(
        wheel_zip,
        ensure_text(lib_dir, encoding=sys.getfilesystemencoding()),
    )
    files = map(make_root_scheme_file, root_scheme_paths)

    def is_script_scheme_path(path):
        # type: (RecordPath) -> bool
        parts = path.split("/", 2)
        return (len(parts) > 2 and parts[0].endswith(".data")
                and parts[1] == "scripts")

    other_scheme_paths, script_scheme_paths = partition(
        is_script_scheme_path, data_scheme_paths)

    make_data_scheme_file = data_scheme_file_maker(wheel_zip, scheme)
    other_scheme_files = map(make_data_scheme_file, other_scheme_paths)
    files = chain(files, other_scheme_files)

    # Get the defined entry points
    distribution = pkg_resources_distribution_for_wheel(
        wheel_zip, name, wheel_path)
    console, gui = get_entrypoints(distribution)

    def is_entrypoint_wrapper(file):
        # type: (File) -> bool
        # EP, EP.exe and EP-script.py are scripts generated for
        # entry point EP by setuptools
        path = file.dest_path
        name = os.path.basename(path)
        if name.lower().endswith('.exe'):
            matchname = name[:-4]
        elif name.lower().endswith('-script.py'):
            matchname = name[:-10]
        elif name.lower().endswith(".pya"):
            matchname = name[:-4]
        else:
            matchname = name
        # Ignore setuptools-generated scripts
        return (matchname in console or matchname in gui)

    script_scheme_files = map(make_data_scheme_file, script_scheme_paths)
    script_scheme_files = filterfalse(is_entrypoint_wrapper,
                                      script_scheme_files)
    script_scheme_files = map(ScriptFile, script_scheme_files)
    files = chain(files, script_scheme_files)

    for file in files:
        file.save()
        record_installed(file.src_record_path, file.dest_path, file.changed)

    def pyc_source_file_paths():
        # type: () -> Iterator[str]
        # We de-duplicate installation paths, since there can be overlap (e.g.
        # file in .data maps to same location as file in wheel root).
        # Sorting installation paths makes it easier to reproduce and debug
        # issues related to permissions on existing files.
        for installed_path in sorted(set(installed.values())):
            full_installed_path = os.path.join(lib_dir, installed_path)
            if not os.path.isfile(full_installed_path):
                continue
            if not full_installed_path.endswith('.py'):
                continue
            yield full_installed_path

    def pyc_output_path(path):
        # type: (str) -> str
        """Return the path the pyc file would have been written to.
        """
        return importlib.util.cache_from_source(path)

    # Compile all of the pyc files for the installed files
    if pycompile:
        with captured_stdout() as stdout:
            with warnings.catch_warnings():
                warnings.filterwarnings('ignore')
                for path in pyc_source_file_paths():
                    # Python 2's `compileall.compile_file` requires a str in
                    # error cases, so we must convert to the native type.
                    path_arg = ensure_str(path,
                                          encoding=sys.getfilesystemencoding())
                    success = compileall.compile_file(path_arg,
                                                      force=True,
                                                      quiet=True)
                    if success:
                        pyc_path = pyc_output_path(path)
                        assert os.path.exists(pyc_path)
                        pyc_record_path = cast(
                            "RecordPath", pyc_path.replace(os.path.sep, "/"))
                        record_installed(pyc_record_path, pyc_path)
        logger.debug(stdout.getvalue())

    maker = PipScriptMaker(None, scheme.scripts)

    # Ensure old scripts are overwritten.
    # See https://github.com/pypa/pip/issues/1800
    maker.clobber = True

    # Ensure we don't generate any variants for scripts because this is almost
    # never what somebody wants.
    # See https://bitbucket.org/pypa/distlib/issue/35/
    maker.variants = {''}

    # This is required because otherwise distlib creates scripts that are not
    # executable.
    # See https://bitbucket.org/pypa/distlib/issue/32/
    maker.set_mode = True

    # Generate the console and Main-App entry points specified in the wheel
    scripts_to_generate = get_console_script_specs(console)

    gui_scripts_to_generate = list(starmap('{} = {}'.format, gui.items()))

    generated_console_scripts = maker.make_multiple(scripts_to_generate)
    generated.extend(generated_console_scripts)

    generated.extend(
        maker.make_multiple(gui_scripts_to_generate, {'gui': True}))

    if warn_script_location:
        msg = message_about_scripts_not_on_PATH(generated_console_scripts)
        if msg is not None:
            logger.warning(msg)

    generated_file_mode = 0o666 & ~current_umask()

    @contextlib.contextmanager
    def _generate_file(path, **kwargs):
        # type: (str, **Any) -> Iterator[BinaryIO]
        with adjacent_tmp_file(path, **kwargs) as f:
            yield f
        os.chmod(f.name, generated_file_mode)
        replace(f.name, path)

    dest_info_dir = os.path.join(lib_dir, info_dir)

    # Record pip as the installer
    installer_path = os.path.join(dest_info_dir, 'INSTALLER')
    with _generate_file(installer_path) as installer_file:
        installer_file.write(b'pip\n')
    generated.append(installer_path)

    # Record the PEP 610 direct URL reference
    if direct_url is not None:
        direct_url_path = os.path.join(dest_info_dir, DIRECT_URL_METADATA_NAME)
        with _generate_file(direct_url_path) as direct_url_file:
            direct_url_file.write(direct_url.to_json().encode("utf-8"))
        generated.append(direct_url_path)

    # Record the REQUESTED file
    if requested:
        requested_path = os.path.join(dest_info_dir, 'REQUESTED')
        with open(requested_path, "w"):
            pass
        generated.append(requested_path)

    record_text = distribution.get_metadata('RECORD')
    record_rows = list(csv.reader(record_text.splitlines()))

    rows = get_csv_rows_for_installed(record_rows,
                                      installed=installed,
                                      changed=changed,
                                      generated=generated,
                                      lib_dir=lib_dir)

    # Record details of all files installed
    record_path = os.path.join(dest_info_dir, 'RECORD')

    with _generate_file(record_path, **csv_io_kwargs('w')) as record_file:
        # The type mypy infers for record_file is different for Python 3
        # (typing.IO[Any]) and Python 2 (typing.BinaryIO). We explicitly
        # cast to typing.IO[str] as a workaround.
        writer = csv.writer(cast('IO[str]', record_file))
        writer.writerows(_normalized_outrows(rows))
예제 #55
0
파일: meter.py 프로젝트: KevinZ1992/Jacinle
 def format(self, caption, values, kv_format, glue):
     meters_kv = self._canonize_values(values)
     log_str = [caption]
     log_str.extend(
         itertools.starmap(kv_format.format, sorted(meters_kv.items())))
     return glue.join(log_str)
예제 #56
0
 def clamp(self, to):
     start = tuple(starmap(clamp, zip(self.start, to.start, to.end)))
     end = tuple(starmap(clamp, zip(self.end, to.start, to.end)))
     return Cuboid(self.on, start, end)
예제 #57
0
# initializing list
li = [2, 4, 5, 7, 8, 10, 20]

# initializing tuple list
li1 = [(1, 10, 5), (8, 4, 1), (5, 4, 9), (11, 10, 1)]

# using islice() to slice the list acc. to need
# starts printing from 2nd index till 6th skipping 2
print("The sliced list values are : ", end="")
print(list(itertools.islice(li, 1, 6, 2)))

# using starmap() for selection value acc. to function
# selects min of all tuple values
print("The values acc. to function are : ", end="")
print(list(itertools.starmap(min, li1)))

# Python code to demonstrate the working of
# takewhile() and tee()

# importing "itertools" for iterator operations
import itertools

# initializing list
li = [2, 4, 6, 7, 8, 10, 20]

# storing list in iterator
iti = iter(li)

# using takewhile() to print values till condition is false.
print("The list values till 1st false value are : ", end="")
예제 #58
0
    def ValidateAndMeasurePage(self, page, tab, results):
        with tab.action_runner.CreateInteraction('wait-for-quiescence'):
            tab.ExecuteJavaScript2('console.time("");')
            try:
                util.WaitFor(tab.HasReachedQuiescence, 15)
            except py_utils.TimeoutException:
                # Some sites never reach quiesence. As this benchmark normalizes/
                # categories results, it shouldn't be necessary to reach the same
                # state on every run.
                pass

        tab.ExecuteJavaScript2('''
        for (var i = 0; i < 11; i++) {
          var cold = i % 2 == 0;
          var name = "update_style";
          if (cold) name += "_cold";
          console.time(name);
          // Occasionally documents will break the APIs we need
          try {
            // On cold runs, force a new StyleResolver
            if (cold) {
              var style = document.createElement("style");
              document.head.appendChild(style);
              style.remove();
            }
            // Invalidate style for the whole document
            document.documentElement.lang += "z";
            // Force a style update (but not layout)
            getComputedStyle(document.documentElement).color;
          } catch (e) {}
          console.timeEnd(name);
        }''')

        self._controller.Stop(tab, results)
        renderer = self._controller.model.GetRendererThreadFromTabId(tab.id)
        markers = [
            event for event in renderer.async_slices
            if event.name.startswith('update_style')
            and event.category == 'blink.console'
        ]
        # Drop the first run.
        markers = markers[1:]
        assert len(markers) == 10

        def duration(event):
            if event.has_thread_timestamps:
                return event.thread_duration
            else:
                return event.duration

        for marker in markers:
            for event in renderer.all_slices:
                if (event.name == 'Document::updateStyle'
                        and event.start >= marker.start
                        and event.end <= marker.end):
                    access_count = event.args.get('resolverAccessCount')
                    if access_count is None:
                        # absent in earlier versions
                        continue
                    min_access_count = 50

                    if access_count >= min_access_count:
                        result = 1000 * (duration(event) / access_count)
                        results.AddValue(
                            scalar.ScalarValue(page, marker.name,
                                               'ms/1000 elements', result))

        class ParserEvent(object):
            def __init__(self, summary_event, tokenize_event, parse_event):
                min_sheet_length = 1000
                ua_sheet_mode = 5
                enormous_token_threshold = 100
                large_token_threshold = 5

                self.mode = summary_event.args.get('mode')
                self.length = summary_event.args.get('length')
                self.tokens = summary_event.args.get('tokenCount')
                self.tokenize_duration = duration(tokenize_event)
                self.parse_duration = duration(parse_event)
                self.chars_per_token = 0
                if self.tokens:
                    self.chars_per_token = self.length / float(self.tokens)
                if self.mode == ua_sheet_mode or self.length < min_sheet_length:
                    self.category = 'ignored'
                elif self.chars_per_token > enormous_token_threshold:
                    self.category = 'enormous_tokens'
                elif self.chars_per_token > large_token_threshold:
                    self.category = 'large_tokens'
                else:
                    self.category = 'regular'

        parser_events = [
            event for event in renderer.all_slices
            if event.name == 'CSSParserImpl::parseStyleSheet'
            or event.name == 'CSSParserImpl::parseStyleSheet.tokenize'
            or event.name == 'CSSParserImpl::parseStyleSheet.parse'
        ]

        merged_events = starmap(ParserEvent, zip(*[iter(parser_events)] * 3))

        events_by_category = defaultdict(list)
        for event in merged_events:
            if event.category != 'ignored':
                events_by_category[event.category].append(event)

        for category, events in events_by_category.items():
            parse_duration = sum(event.parse_duration for event in events)
            tokenize_duration = sum(event.tokenize_duration
                                    for event in events)
            tokens = sum(event.tokens for event in events)
            length = sum(event.length for event in events)

            results.AddValue(
                scalar.ScalarValue(page, ('parse_css_%s' % category),
                                   'tokens/s',
                                   1000 / (parse_duration / tokens)))

            results.AddValue(
                scalar.ScalarValue(page, ('tokenize_css_%s' % category),
                                   'char/s',
                                   1000 / (tokenize_duration / length)))
예제 #59
0
    return ap_gen

# filtering
itertools.compress(it, selector_it)
itertools.dropwhile(predicate, it)
filter(predicate, it)
itertools.filterfalse(predicate, it)
itertools.islice(it.stop)
itertools.islice(it, start, stop, step=1)
itertools.takewhile(predicate, it)

# mapping
itertools.accumulate(it, [func])
enumerate(it, start=0)
map(func, it1, [it2, ..., itN])
itertools.starmap(func, it)

# merging
itertools.chain(it1, ..., itN)
itertools.chain.from_iterable(it)
itertools.product(it1, ..., itN, repeat=1)
zip(it1, ..., itN)
itertools.zip_longest(it1, ..., itN, fillvalue=None)

# expansion
itertools.combinations(it, out_len)
itertools.combinations_with_replacement(it, out_len)
itertools.count(start=0, step=1)
itertools.cycle(it)
itertools.permutations(it, out_len=None)
itertools.repeat(item, [times])
예제 #60
0
 def __call__(self, *args):
     for arg_types, func in self.env.items():
         if all(starmap(isinstance, zip(args, arg_types))):
             return func(*args)