def __call__(self, name, widget=None, getter=None, setter=None, prefs=None):
        prefs = prefs or tprefs
        defval = prefs.defaults[name]
        inval = prefs[name]
        if widget is None:
            if isinstance(defval, bool):
                widget = QCheckBox(self)
                getter = getter or methodcaller('isChecked')
                setter = setter or (lambda x, v: x.setChecked(v))
                widget.toggled.connect(self.emit_changed)
            elif isinstance(defval, (int, float)):
                widget = (QSpinBox if isinstance(defval, int) else QDoubleSpinBox)(self)
                getter = getter or methodcaller('value')
                setter = setter or (lambda x, v:x.setValue(v))
                widget.valueChanged.connect(self.emit_changed)
            else:
                raise TypeError('Unknown setting type for setting: %s' % name)
        else:
            if getter is None or setter is None:
                raise ValueError("getter or setter not provided for: %s" % name)
        self._prevent_changed = True
        setter(widget, inval)
        self._prevent_changed = False

        self.settings[name] = self.Setting(name, prefs, widget, getter, setter, inval)
        return widget
Exemple #2
0
def test_convert_logfiles_to_bag():
    with filetexts({'a1.log': 'Hello\nWorld', 'a2.log': 'Hola\nMundo'}) as fns:
        logs = chunks(TextFile)(list(map(TextFile, fns)))
        b = odo(logs, Bag)
        assert isinstance(b, Bag)
        assert (list(map(methodcaller('strip'), odo(b, list))) ==
                list(map(methodcaller('strip'), odo(logs, list))))
Exemple #3
0
    def __init__(self, language_info, settings=None):
        dictionary = {}
        self._settings = settings
        self.info = language_info

        if 'skip' in language_info:
            skip = map(methodcaller('lower'), language_info['skip'])
            dictionary.update(zip_longest(skip, [], fillvalue=None))
        if 'pertain' in language_info:
            pertain = map(methodcaller('lower'), language_info['pertain'])
            dictionary.update(zip_longest(pertain, [], fillvalue=None))
        for word in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday',
                     'january', 'february', 'march', 'april', 'may', 'june', 'july',
                     'august', 'september', 'october', 'november', 'december',
                     'year', 'month', 'week', 'day', 'hour', 'minute', 'second',
                     'ago']:
            translations = map(methodcaller('lower'), language_info[word])
            dictionary.update(zip_longest(translations, [], fillvalue=word))
        dictionary.update(zip_longest(ALWAYS_KEEP_TOKENS, ALWAYS_KEEP_TOKENS))
        dictionary.update(zip_longest(map(methodcaller('lower'),
                                          DATEUTIL_PARSERINFO_KNOWN_TOKENS),
                                      DATEUTIL_PARSERINFO_KNOWN_TOKENS))

        self._dictionary = dictionary
        self._no_word_spacing = language_info.get('no_word_spacing', False)
    def _generate_forward_declarations(self, domains):
        sections = []

        for domain in domains:
            declaration_types = [decl.type for decl in self.type_declarations_for_domain(domain)]
            object_types = [_type for _type in declaration_types if isinstance(_type, ObjectType)]
            enum_types = [_type for _type in declaration_types if isinstance(_type, EnumType)]
            sorted(object_types, key=methodcaller('raw_name'))
            sorted(enum_types, key=methodcaller('raw_name'))

            if len(object_types) + len(enum_types) == 0:
                continue

            domain_lines = []
            domain_lines.append('namespace %s {' % domain.domain_name)

            # Forward-declare all classes so the type builders won't break if rearranged.
            domain_lines.extend('class %s;' % object_type.raw_name() for object_type in object_types)
            domain_lines.extend('enum class %s;' % enum_type.raw_name() for enum_type in enum_types)
            domain_lines.append('} // %s' % domain.domain_name)
            sections.append(self.wrap_with_guard_for_domain(domain, '\n'.join(domain_lines)))

        if len(sections) == 0:
            return ''
        else:
            return """// Forward declarations.
%s
// End of forward declarations.
""" % '\n\n'.join(sections)
Exemple #5
0
    def __init__(self, locale_info, settings=None):
        dictionary = {}
        self._settings = settings
        self.info = locale_info

        if 'skip' in locale_info:
            skip = map(methodcaller('lower'), locale_info['skip'])
            dictionary.update(zip_longest(skip, [], fillvalue=None))
        if 'pertain' in locale_info:
            pertain = map(methodcaller('lower'), locale_info['pertain'])
            dictionary.update(zip_longest(pertain, [], fillvalue=None))
        for word in KNOWN_WORD_TOKENS:
            if word in locale_info:
                translations = map(methodcaller('lower'), locale_info[word])
                dictionary.update(zip_longest(translations, [], fillvalue=word))
        dictionary.update(zip_longest(ALWAYS_KEEP_TOKENS, ALWAYS_KEEP_TOKENS))
        dictionary.update(zip_longest(map(methodcaller('lower'),
                                          PARSER_KNOWN_TOKENS),
                                          PARSER_KNOWN_TOKENS))

        relative_type = locale_info.get('relative-type', {})
        for key, value in relative_type.items():
            relative_translations = map(methodcaller('lower'), value)
            dictionary.update(zip_longest(relative_translations, [], fillvalue=key))

        self._dictionary = dictionary

        no_word_spacing = locale_info.get('no_word_spacing', 'False')
        self._no_word_spacing = bool(eval(no_word_spacing))

        relative_type_regex = locale_info.get("relative-type-regex", {})
        self._relative_strings = list(chain(*relative_type_regex.values()))
Exemple #6
0
 def searchPopularVideos(self, tag, count):
     videos = []
     if count == 0:
         return videos
     tag = self.searchstring(tag)
     api = InstagramAPI(client_id= '7d428aff533f40e1b3d5f919882576d2', client_secret= 'bfbe419a758141fbaf352c89b579a0a5')
     recent_media, next_ = api.tag_recent_media(count=50, tag_name=tag)
     for media in recent_media:
         if media.type == 'video':
             videos.append(Post(media.videos['standard_resolution'], media.type, media.like_count, media.comment_count))
             if len(videos) >= count:
                 popularvideos = sorted(videos, key=methodcaller('weighted_popularity'),reverse=True)
                 return popularvideos
     i=0
     if len(videos) >= count:
         popularvideos = sorted(videos, key=methodcaller('weighted_popularity'),reverse=True)
         return 	popularvideos
     else:
         while i<10 and next_:
             recent_media, next_ = api.tag_recent_media(count=50, tag_name=self.searchstring(tag), with_next_url=next_)
             for media in recent_media:
                 if media.type == 'video':
                     videos.append(Post(media.videos['standard_resolution'], media.type, media.like_count, media.comment_count))
                     if len(videos) >= count:
                         popularvideos = sorted(videos, key=methodcaller('weighted_popularity'),reverse=True)
                         return 	popularvideos
         i += 1
     popularvideos= sorted(videos, key=methodcaller('weighted_popularity'),reverse=True)
     return 	popularvideos
Exemple #7
0
def _get_processes_in_screen(screen_pid, with_cmdline=False):
    if psutil is None:
        _log_error("No module named 'psutil'")
        return
    screen_proc = psutil.Process(screen_pid)
    if psutil.version_info[0] >= 2:
        # psutil >= 2.0
        get_name = operator.methodcaller('name')
        get_cmdline = operator.methodcaller('cmdline')
        get_children = operator.methodcaller('children')
    else:
        get_name = operator.attrgetter('name')
        get_cmdline = operator.attrgetter('cmdline')
        get_children = operator.methodcaller('get_children')
    for level3_proc in get_children(screen_proc):
        if get_name(level3_proc) == 'login':
            # pstree: screen -- login -- sh
            level2_proc_list = get_children(level3_proc)
        else:
            # pstree: screen -- sh
            level2_proc_list = [level3_proc]
        for level2_proc in level2_proc_list:
            for level1_proc in get_children(level2_proc):
                if with_cmdline:
                    yield level1_proc.pid, get_cmdline(level1_proc)
                else:
                    yield level1_proc.pid
Exemple #8
0
def _test_round_trip_source(self, source, parser, leave_file=False, *args, **kwds):
    src_name = getattr(source,'name',None)
    src_encoding = getattr(source, 'encoding', None)
    source = list(source)
#    import pdb; pdb.set_trace()
    rt_src = sfm.pprint(parser(source, *args, **kwds)).splitlines(True)
    
    # Try for perfect match first
    if source == rt_src:
        self.assert_(True)
        return
    
    # Normalise line endings
    source = map(operator.methodcaller('rstrip'), source)
    rt_src = map(operator.methodcaller('rstrip'), rt_src)
    if source == rt_src:
        self.assert_(True)
        return
    
    # Normalise the \f ..\f* marker forms in the source
    source = map(operator.methodcaller('replace', u'\\ft ',u'\\fr*'), source)
    rt_src = map(operator.methodcaller('replace', u'\\ft ',u'\\fr*'), rt_src)
    
    if leave_file and src_name:
        codecs.open(src_name+'.normalised','w', 
                    encoding=src_encoding).writelines(l+'\n' for l in source)
        codecs.open(src_name+'.roundtrip','w', 
                    encoding=src_encoding).writelines(l+'\n' for l in rt_src)
    
    self.assertEqual(source, rt_src, 'roundtriped source not equal')
Exemple #9
0
 def run(self):
     try:
         while(True):
             cmd = ""
             args = []
             raw = raw_input(">")
             try:
                 cmd_and_args = raw.split()
                 if len(cmd_and_args) == 0: continue
                 cmd, args = (cmd_and_args[0], cmd_and_args[1:])
                 if cmd not in self.whiteListedMethods():
                     print "No command %s" % cmd
                     cmd = "help"
                 f = operator.methodcaller(cmd, *args)
             except ValueError:
                 cmd = raw
                 if cmd not in self.whiteListedMethods():
                     print "No command %s" % cmd
                     cmd = "help"
                 f = operator.methodcaller(cmd)
             try:
                 f(self)
             except TypeError as e:
                 # TODO print help doc string for method in cmd.
                 print "Wrong arguments for %s" % cmd
                 self.help(cmd)
     except (UIQuitException, EOFError, KeyboardInterrupt) as e:
         print
         return
Exemple #10
0
 def make_tests():
     for pos, stream in enumerate(streams):
         if v2_avail:
             # Calls StreamResult API.
             case = subunit.ByteStreamToStreamResult(
                 stream, non_subunit_name='stdout')
         else:
             # Calls TestResult API.
             case = subunit.ProtocolTestCase(stream)
             def wrap_result(result):
                 # Wrap in a router to mask out startTestRun/stopTestRun from the
                 # ExtendedToStreamDecorator.
                 result = testtools.StreamResultRouter(
                     result, do_start_stop_run=False)
                 # Wrap that in ExtendedToStreamDecorator to convert v1 calls to
                 # StreamResult.
                 return testtools.ExtendedToStreamDecorator(result)
             # Now calls StreamResult API :).
             case = testtools.DecorateTestCaseResult(case, wrap_result,
                 methodcaller('startTestRun'),
                 methodcaller('stopTestRun'))
         case = testtools.DecorateTestCaseResult(case,
             lambda result:testtools.StreamTagger(
                 [result], add=['worker-%d' % pos]))
         yield (case, str(pos))
Exemple #11
0
 def update(i):
     operator.methodcaller(method)(model)
     # print "ll: ", model.log_likelihood()
     return model.log_likelihood(), \
            model.perplexity(test_data), \
            model.copy_sample(), \
            time.time()
    def test_visible_fields_in_forms(self, browser):
        """Some fields should only be displayed when the word feature is
        enabled.
        Therefore we test the appearance of all fields.
        """
        fields = ['Title',
                  'Committeeresponsible',
                  'Protocol header template',
                  'Protocol suffix template',
                  'Agenda item header template for the protocol',
                  'Agenda item suffix template for the protocol',
                  'Excerpt header template',
                  'Excerpt suffix template',
                  'Agendaitem list template',
                  'Table of contents template',
                  'Linked repository folder',
                  'Ad hoc agenda item template',
                  'Paragraph template',
                  'Allowed proposal templates']
        with self.login(self.administrator, browser):
            browser.open(self.committee_container)
            factoriesmenu.add('Committee')
            self.assertEquals(
                fields,
                map(methodcaller('normalized_text', recursive=False),
                    browser.css('form#form > div.field > label')))

        with self.login(self.committee_responsible, browser):
            browser.open(self.committee, view='edit')
            self.assertEquals(
                fields,
                map(methodcaller('normalized_text', recursive=False),
                    browser.css('form#form > div.field > label')))
Exemple #13
0
def download_node_src(node_url, src_dir, opt, prefix):
    """
    Download source code
    """
    logger.info('.', extra=dict(continued=True))
    dl_contents = io.BytesIO(urlopen(node_url).read())
    logger.info('.', extra=dict(continued=True))

    if is_WIN or is_CYGWIN:
        ctx = zipfile.ZipFile(dl_contents)
        members = operator.methodcaller('namelist')
        member_name = lambda s: s  # noqa: E731
    else:
        ctx = tarfile_open(fileobj=dl_contents)
        members = operator.methodcaller('getmembers')
        member_name = operator.attrgetter('name')

    with ctx as archive:
        node_ver = re.escape(opt.node)
        rexp_string = r"%s-v%s[^/]*/(README\.md|CHANGELOG\.md|LICENSE)"\
            % (prefix, node_ver)
        extract_list = [
            member
            for member in members(archive)
            if re.match(rexp_string, member_name(member)) is None
        ]
        archive.extractall(src_dir, extract_list)
Exemple #14
0
    def test_lazy_orcall(self):
        def from_mimetype(request):
            # you can return both value or Option
            return request.get("mimetype", None)

        def from_extension(request):
            # you can return both value or Option
            return monad.Option(request.get("url", None))\
                        .map(lambda s: s.split(".")[-1])

        # extract value from extension
        r = dict(url="myfile.png")
        self.assertEqual("PNG", monad.Option(r.get("type", None)) \
                                     .or_call(from_mimetype, r) \
                                     .or_call(from_extension, r) \
                                     .map(operator.methodcaller("upper")) \
                                     .get_or(""))

        # extract value from mimetype
        r = dict(url="myfile.svg", mimetype="png")
        self.assertEqual("PNG", monad.Option(r.get("type", None)) \
                                     .or_call(from_mimetype, r) \
                                     .or_call(from_extension, r) \
                                     .map(operator.methodcaller("upper")) \
                                     .get_or(""))

        # type is set directly
        r = dict(url="myfile.jpeg", mimetype="svg", type="png")
        self.assertEqual("PNG", monad.Option(r.get("type", None)) \
                                     .or_call(from_mimetype, r) \
                                     .or_call(from_extension, r) \
                                     .map(operator.methodcaller("upper")) \
                                     .get_or(""))
Exemple #15
0
    def delete_children_matching(self, linespec):
        """Delete any child :class:`~models_cisco.IOSCfgLine` objects which 
        match ``linespec``.

        Parameters
        ----------

        linespec : str, required
             A string or python regular expression, which should be matched.  

        Returns
        -------

        retval : list
            A list of :class:`~models_cisco.IOSCfgLine` objects which were 
            deleted.

        Examples
        --------

        This example illustrates how you can use 
        :func:`~ccp_abc.delete_children_matching` to delete any description 
        on an interface.

        .. code-block:: python
           :emphasize-lines: 15

           >>> config = [
           ...     '!',
           ...     'interface Serial1/0',
           ...     ' description Some lame description',
           ...     ' ip address 1.1.1.1 255.255.255.252',
           ...     '!',
           ...     'interface Serial1/1',
           ...     ' description Another lame description',
           ...     ' ip address 1.1.1.5 255.255.255.252',
           ...     '!',
           ...     ]
           >>> parse = CiscoConfParse(config)
           >>>
           >>> for obj in parse.find_objects(r'^interface'):
           ...     obj.delete_children_matching(r'description')
           >>>
           >>> for line in parse.ioscfg:
           ...     print line
           ...
           !
           interface Serial1/0
            ip address 1.1.1.1 255.255.255.252
           !
           interface Serial1/1
            ip address 1.1.1.5 255.255.255.252
           !
           >>>
        """
        cobjs = filter(methodcaller('re_search', linespec), self.children)
        retval = map(attrgetter('text'), cobjs)
        # Delete the children
        map(methodcaller('delete'), cobjs)
        return retval
def main():
    args = command_line()

    index = 'user_id' if 'user' in args.file.lower() else 'business_id'

    directory = os.path.dirname(args.file) if os.path.dirname(args.file) else '.'

    print('Creating %s file in %s' % (args.factory, directory))
    df = to_df(args.file, index)

    if args.head:
        head = operator.methodcaller('head', args.head)
        df = head(df)

    if args.factory in ['csv', 'dict'] and not hasattr(args, 'dry_run'):

        original = args.file
        filename = original.replace(
            original.rpartition('.')[-1], args.factory
        )

        factory = operator.methodcaller(
            'to_'+args.factory,
            filename,
            chunksize=1028,
            encoding='utf-8',
        )
        factory(df)

    print(df.head())
def html_print(scan_time,
               scan_id,
               invalid_host_count,
               vulnerable_hosts=[],
               vulnerabilities=[],
               credentials=[]):
    TEMPLATE_DIR = os.path.abspath(os.path.dirname(__file__))
    TEMPLATE_DIR = os.path.join(TEMPLATE_DIR, 'templates')
    templateLoader = FileSystemLoader(searchpath=TEMPLATE_DIR)
    env = Environment(loader=templateLoader)
    env.filters['print_it'] = print_it
    report = env.get_template('html.tmpl')
    return report.render(title=CONF.email.report_subject,
                         scan_time=scan_time,
                         scan_id=scan_id,
                         vulnerablehosts=sorted([h for h in vulnerable_hosts if h.printable_host()], key=lambda host: canonicalise_ip(host.ip)),
                         dump=CONF.dump,
                         vulnerabilities=sorted(vulnerabilities,
                                                key=methodcaller('sort_key')),
                         credentials=sorted(credentials,
                                            key=methodcaller('sort_key')),
                         total=len(vulnerable_hosts),
                         list_boring=CONF.list_boring,
                         white_listed=len([h for h in vulnerable_hosts if h.whitelisted()]),
                         invalid_hosts=CONF.disable_host_validation,
                         invalid_host_count=invalid_host_count)
Exemple #18
0
    def UpdateStats(self, dataset):
        self.dataset = dataset
        self._fetch_hitters()
        self._fetch_pitchers()
        # First hitters
        for i in xrange(20):
            prev_list = self._hitters[:]
            self.calculate_hitter_mean()
            self.calculate_hitter_stddev()
            self._hitters.sort(key=operator.methodcaller("zscore", self), reverse=True)
            diff = [x for x, y in zip(prev_list, self._hitters) if x.id != y.id]
            if len(diff) == 0:
                print "Successfully found ideal hitter pool in %d iterations" % (i + 1)
                break
        else:
            print "Couldn't find ideal hitter pool in %d iterations" % (i + 1)

            # Now do pitchers
        for i in xrange(20):
            prev_list = self._pitchers[:]
            self.calculate_pitcher_mean()
            self.calculate_pitcher_stddev()
            self._pitchers.sort(key=operator.methodcaller("zscore", self), reverse=True)
            diff = [x for x, y in zip(prev_list, self._pitchers) if x.id != y.id]
            if len(diff) == 0:
                print "Successfully found ideal pitcher pool in %d iterations" % (i + 1)
                break
        else:
            print "Couldn't find ideal pitcher pool in %d iterations" % (i + 1)

        self.calculate_replacement_values()
        self._hitters.sort(key=operator.methodcaller("adjusted_zscore", self), reverse=True)
        self._pitchers.sort(key=operator.methodcaller("adjusted_zscore", self), reverse=True)
        self.calculate_sum_of_draft()
        self._initialize_free_agent_data()
        self.use_normal_dollar_values()

        print " ##### Hitting Stats #####"
        print "BA Mean: %.3f" % (self.ba_mean)
        print "BA STDDev: %.3f" % (self.h_sd)
        print "R Mean: %.3f" % (self.r_mean)
        print "R STDDev: %.3f" % (self.r_sd)
        print "HR Mean: %.3f" % (self.hr_mean)
        print "HR STDDev: %.3f" % (self.hr_sd)
        print "RBI Mean: %.3f" % (self.rbi_mean)
        print "RBI STDDev: %.3f" % (self.rbi_sd)
        print "SB Mean: %.3f" % (self.sb_mean)
        print "SB STDDev: %.3f" % (self.sb_sd)
        print " ##### Pitching Stats #####"
        print "ERA Mean: %.3f" % (self.era_mean)
        print "ERA STDDev: %.3f" % (self.er_sd)
        print "WHIP Mean: %.3f" % (self.whip_mean)
        print "WHIP STDDev: %.3f" % (self.wh_sd)
        print "W Mean: %.3f" % (self.w_mean)
        print "W STDDev: %.3f" % (self.w_sd)
        print "K Mean: %.3f" % (self.k_mean)
        print "K STDDev: %.3f" % (self.k_sd)
        print "S Mean: %.3f" % (self.s_mean)
        print "S STDDev: %.3f" % (self.s_sd)
Exemple #19
0
def main():
    s = "Hello, world! Hello, MOTO! Hello, world! Hello, MOTO! Hello, world! Hello, MOTO! "
    print groupby(s, key=methodcaller("isalnum"))

    for k, g in groupby(s, key=methodcaller("isalnum")):
        if k:
            word = ''.join(g)
            print k, g, word
 def parse_thread(self):
     """
     Supplies parameters for Polymath-blog
     to call parse_thread of superclass.
     """
     self.parse_thread_generic(
         methodcaller("find", "ol", {"id": "commentlist"}),
         methodcaller("find_all", "li"))
 def parse_thread(self):
     """
     Supplies parameters for Tao blog
     to call parse_thread of superclass.
     """
     self.parse_thread_generic(
         methodcaller("find", "div", {"class": "commentlist"}),
         methodcaller("find_all", "div", {"class": "comment"}))
Exemple #22
0
def test_methodcaller():
    with BufferingNodeExecutionContext(methodcaller('swapcase')) as context:
        context.write_sync('aaa', 'bBb', 'CcC')
    assert context.get_buffer() == list(map(ensure_tuple, ['AAA', 'BbB', 'cCc']))

    with BufferingNodeExecutionContext(methodcaller('zfill', 5)) as context:
        context.write_sync('a', 'bb', 'ccc')
    assert context.get_buffer() == list(map(ensure_tuple, ['0000a', '000bb', '00ccc']))
 def bestDiLepton(self, diLeptons):
     """Returns the best diLepton (1st precedence opposite-sign,
     2nd precedence highest pt1 + pt2)."""
     osDiLeptons = [dl for dl in diLeptons if dl.leg1().charge() != dl.leg2().charge()]
     if osDiLeptons:
         return max(osDiLeptons, key=operator.methodcaller("sumPt"))
     else:
         return max(diLeptons, key=operator.methodcaller("sumPt"))
Exemple #24
0
 def _parse_type(segment):
     # Discard leading digits (the 0 in 0a1)
     isdigit = operator.methodcaller('isdigit')
     segment = ''.join(itertools.dropwhile(isdigit, segment))
     isalpha = operator.methodcaller('isalpha')
     prerelease_type = ''.join(itertools.takewhile(isalpha, segment))
     prerelease = segment[len(prerelease_type)::]
     return prerelease_type, int(prerelease)
Exemple #25
0
def generate_value_processor(type_, collectionFormat=None, items=None, **kwargs):
    """
    Create a callable that will take the string value of a header and cast it
    to the appropriate type.  This can involve:

    - splitting a header of type 'array' by its delimeters.
    - type casting the internal elements of the array.
    """
    processors = []
    if is_non_string_iterable(type_):
        assert False, "This should not be possible"
    else:
        if type_ == ARRAY and collectionFormat:
            delimeter = DELIMETERS[collectionFormat]
            # split the string based on the delimeter specified by the
            # `collectionFormat`
            processors.append(operator.methodcaller('split', delimeter))
            # remove any Falsy values like empty strings.
            processors.append(functools.partial(filter, bool))
            # strip off any whitespace
            processors.append(functools.partial(map, operator.methodcaller('strip')))
            if items is not None:
                if isinstance(items, collections.Mapping):
                    items_processors = itertools.repeat(
                        generate_value_processor(**items)
                    )
                elif isinstance(items, collections.Sequence):
                    items_processors = itertools.chain(
                        (generate_value_processor(**item) for item in items),
                        itertools.repeat(lambda v: v),
                    )
                elif isinstance(items, six.string_types):
                    raise NotImplementedError("Not implemented")
                else:
                    assert False, "Should not be possible"
                # 1. zip the processor and the array items together
                # 2. apply the processor to each array item.
                # 3. cast the starmap generator to a list.
                processors.append(
                    chain_reduce_partial(
                        functools.partial(zip, items_processors),
                        functools.partial(itertools.starmap, lambda fn, v: fn(v)),
                        list,
                    )
                )
        else:
            processors.append(
                functools.partial(cast_value_to_type, type_=type_)
            )

    def processor(value, **kwargs):
        try:
            return chain_reduce_partial(*processors)(value)
        except (ValueError, TypeError):
            return value

    return processor
Exemple #26
0
def list_parsers_by_type(cps=None):
    """
    :return: List (generator) of (config_type, [config_parser])
    """
    if cps is None:
        cps = PARSERS

    return ((t, sorted(p, key=operator.methodcaller("priority"))) for t, p
            in groupby_key(cps, operator.methodcaller("type")))
Exemple #27
0
def approvalqueue(context, request):
    if context.user.isAdmin:
        result = GetSubmittedProfiles(request, context.user)
        submitted= ifilter(methodcaller("isSubmitted"), result)
        approved = ifilter(methodcaller("isReviewed"), result)
    else:
        submitted, approved = [], []
    return {"success": True
            , "html": {"submitted":render("larryslist:admin/templates/ajax/approvalqueue.html", {'profiles': submitted}, request).strip()
                     ,  "approved":render("larryslist:admin/templates/ajax/approvalqueue.html", {'profiles': approved}, request).strip()}}
Exemple #28
0
    def get_variables(self):
        """
        Get a list of all variables in this flow graph namespace.
        Exclude paramterized variables.

        Returns:
            a sorted list of variable blocks in order of dependency (indep -> dep)
        """
        variables = filter(lambda b: _variable_matcher.match(b.get_key()), self.iter_enabled_blocks())
        return expr_utils.sort_objects(variables, methodcaller('get_id'), methodcaller('get_var_make'))
Exemple #29
0
 def test_methodcaller(self):
     from operator import methodcaller
     class X(object):
         def method(self, arg1=2, arg2=3):
             return arg1, arg2
     x = X()
     assert methodcaller("method")(x) == (2, 3)
     assert methodcaller("method", 4)(x) == (4, 3)
     assert methodcaller("method", 4, 5)(x) == (4, 5)
     assert methodcaller("method", 4, arg2=42)(x) == (4, 42)
Exemple #30
0
    def stop_trace(self):
        """Stop tracing."""

        print "Terminating ftcat processes..."
        map(methodcaller('terminate'), self.ftcats)

        print "Waiting for ftcats to finish..."
        map(methodcaller('wait'), self.ftcats)

        print "FeatherTrace done."
        index=pd.MultiIndex.from_product([["A"], ["a", "b"]]),
    )
    tm.assert_frame_equal(result, expected)


def test_dataframe_insert_raises():
    df = pd.DataFrame({"A": [1, 2]}).set_flags(allows_duplicate_labels=False)
    msg = "Cannot specify"
    with pytest.raises(ValueError, match=msg):
        df.insert(0, "A", [3, 4], allow_duplicates=True)


@pytest.mark.parametrize(
    "method, frame_only",
    [
        (operator.methodcaller("set_index", "A", inplace=True), True),
        (operator.methodcaller("set_axis", ["A", "B"], inplace=True), False),
        (operator.methodcaller("reset_index", inplace=True), True),
        (operator.methodcaller("rename", lambda x: x, inplace=True), False),
    ],
)
def test_inplace_raises(method, frame_only):
    df = pd.DataFrame({
        "A": [0, 0],
        "B": [1, 2]
    }).set_flags(allows_duplicate_labels=False)
    s = df["A"]
    s.flags.allows_duplicate_labels = False
    msg = "Cannot specify"

    with pytest.raises(ValueError, match=msg):
Exemple #32
0
 def nodes_by_distance_to(self, id: int) -> List[Node]:
     return sorted(self.nodes, key=operator.methodcaller('distance_to', id))
 def test_frozendict_method_viewitems(self):
     self._items_proxy(methodcaller('items'))
Exemple #34
0
 def get_min_weight(self):
     """
     Return the minumum weight in the rows
     """
     return min(
         map(operator.methodcaller('get_min_weight'), self._synaptic_rows))
    def __populateSeasonsData(self):
        seasons = []
        pqType = self.__navInfo.selectedPQType
        for seasonID, season in _getQuestsCache().getSeasons().iteritems():
            tiles = []
            for tile in sorted(season.getTiles().values(),
                               key=operator.methodcaller('getID')):
                isCompleted, isUnlocked = tile.isAwardAchieved(
                ), tile.isUnlocked()
                iconID = tile.getIconID()
                if isCompleted:
                    bgImgUp = event_items.getTileNormalUpIconPath(iconID)
                    bgImgOver = event_items.getTileNormalOverIconPath(iconID)
                else:
                    bgImgUp = event_items.getTileGrayUpIconPath(iconID)
                    bgImgOver = event_items.getTileGrayOverIconPath(iconID)
                vehicleBonus = tile.getVehicleBonus()
                if vehicleBonus is not None:
                    vehLevelStr = icons.makeImageTag(
                        Vehicle.getLevelSmallIconPath(vehicleBonus.level), 16,
                        16, -3, 0)
                    vehTypeStr = icons.makeImageTag(
                        Vehicle.getTypeSmallIconPath(vehicleBonus.type), 16,
                        16, -3, 0)
                    vehicleBonusLabel = i18n.makeString(
                        QUESTS.PERSONAL_SEASONS_TILELABEL,
                        type=vehTypeStr,
                        level=vehLevelStr,
                        name=vehicleBonus.userName)
                else:
                    vehicleBonusLabel = ''
                tokenIcon = icons.makeImageTag(
                    RES_ICONS.MAPS_ICONS_QUESTS_TOKEN16, 16, 16, -3, 0)
                if isUnlocked and not isCompleted and pqType == QUESTS_ALIASES.SEASON_VIEW_TAB_RANDOM:
                    gottenTokensCount, totalTokensCount = tile.getTokensCount()
                    progress = text_styles.standard(
                        i18n.makeString(QUESTS.PERSONAL_SEASONS_TILEPROGRESS,
                                        count=text_styles.gold(
                                            str(gottenTokensCount)),
                                        total=str(totalTokensCount),
                                        icon=tokenIcon))
                else:
                    progress = ''
                if tile.isFullCompleted():
                    animation = event_items.getTileAnimationPath(iconID)
                else:
                    animation = None
                if pqType == QUESTS_ALIASES.SEASON_VIEW_TAB_RANDOM:
                    tooltipType = TOOLTIPS_CONSTANTS.PRIVATE_QUESTS_TILE
                else:
                    tooltipType = TOOLTIPS_CONSTANTS.PRIVATE_QUESTS_FALLOUT_TILE
                tiles.append({
                    'id':
                    tile.getID(),
                    'isNew':
                    isUnlocked and quest_settings.isPQTileNew(tile.getID()),
                    'label':
                    text_styles.standard(vehicleBonusLabel),
                    'progress':
                    progress,
                    'isCompleted':
                    isUnlocked and isCompleted,
                    'enabled':
                    isUnlocked,
                    'image':
                    bgImgUp,
                    'imageOver':
                    bgImgOver,
                    'animation':
                    animation,
                    'tooltipType':
                    tooltipType
                })

            seasons.append({
                'id': seasonID,
                'title': quests_fmts.getFullSeasonUserName(season),
                'tiles': tiles
            })

        self.as_setSeasonsDataS({'seasons': seasons})
        return
Exemple #36
0
 def nodes_by_id_distance(self, id):
     assert isinstance(id, (int, long))
     return sorted(self.nodes, key=operator.methodcaller('id_distance', id))
from operator import methodcaller
s = 'The time has come'
upcase = methodcaller('upper')
upcase(s)

hiphenate = methodcaller('replace', ' ', '-')
hiphenate(s)
Exemple #38
0
def getdata(filename,
            *args,
            header=None,
            lower=None,
            upper=None,
            view=None,
            **kwargs):
    """
    Get the data from an extension of a FITS file (and optionally the
    header).

    Parameters
    ----------
    filename : file path, file object, or file like object
        File to get data from.  If opened, mode must be one of the
        following rb, rb+, or ab+.

    ext
        The rest of the arguments are for extension specification.
        They are flexible and are best illustrated by examples.

        No extra arguments implies the primary header::

            getdata('in.fits')

        By extension number::

            getdata('in.fits', 0)      # the primary header
            getdata('in.fits', 2)      # the second extension
            getdata('in.fits', ext=2)  # the second extension

        By name, i.e., ``EXTNAME`` value (if unique)::

            getdata('in.fits', 'sci')
            getdata('in.fits', extname='sci')  # equivalent

        Note ``EXTNAME`` values are not case sensitive

        By combination of ``EXTNAME`` and EXTVER`` as separate
        arguments or as a tuple::

            getdata('in.fits', 'sci', 2)  # EXTNAME='SCI' & EXTVER=2
            getdata('in.fits', extname='sci', extver=2)  # equivalent
            getdata('in.fits', ('sci', 2))  # equivalent

        Ambiguous or conflicting specifications will raise an exception::

            getdata('in.fits', ext=('sci',1), extname='err', extver=2)

    header : bool, optional
        If `True`, return the data and the header of the specified HDU as a
        tuple.

    lower, upper : bool, optional
        If ``lower`` or ``upper`` are `True`, the field names in the
        returned data object will be converted to lower or upper case,
        respectively.

    view : ndarray, optional
        When given, the data will be returned wrapped in the given ndarray
        subclass by calling::

           data.view(view)

    kwargs
        Any additional keyword arguments to be passed to
        `astropy.io.fits.open`.

    Returns
    -------
    array : array, record array or groups data object
        Type depends on the type of the extension being referenced.

        If the optional keyword ``header`` is set to `True`, this
        function will return a (``data``, ``header``) tuple.
    """

    mode, closed = _get_file_mode(filename)

    hdulist, extidx = _getext(filename, mode, *args, **kwargs)
    try:
        hdu = hdulist[extidx]
        data = hdu.data
        if data is None and extidx == 0:
            try:
                hdu = hdulist[1]
                data = hdu.data
            except IndexError:
                raise IndexError('No data in this HDU.')
        if data is None:
            raise IndexError('No data in this HDU.')
        if header:
            hdr = hdu.header
    finally:
        hdulist.close(closed=closed)

    # Change case of names if requested
    trans = None
    if lower:
        trans = operator.methodcaller('lower')
    elif upper:
        trans = operator.methodcaller('upper')
    if trans:
        if data.dtype.names is None:
            # this data does not have fields
            return
        if data.dtype.descr[0][0] == '':
            # this data does not have fields
            return
        data.dtype.names = [trans(n) for n in data.dtype.names]

    # allow different views into the underlying ndarray.  Keep the original
    # view just in case there is a problem
    if isinstance(view, type) and issubclass(view, np.ndarray):
        data = data.view(view)

    if header:
        return data, hdr
    else:
        return data
Exemple #39
0
print(l)
del l[3:5]
print(l)

# We can do the same thing this way:
l = [1, 2, 3, 4]
operator.getitem(l, slice(0, 2))
operator.setitem(l, slice(0, 2), ['a', 'b', 'c'])
print(l)
operator.delitem(l, slice(3, 5))
print(l)

# #### Calling another Callable
x = 'python'
x.upper()
operator.methodcaller('upper')('python')

# Of course, since **upper** is just an attribute of the string object **x**, we could also have used:
operator.attrgetter('upper')(x)()


# If the callable takes in more than one parameter, they can be specified as additional arguments in **methodcaller**:
class MyClass:
    def __init__(self):
        self.a = 10
        self.b = 20

    def do_something(self, c):
        print(self.a, self.b, c)

Exemple #40
0
 def encode(self):
     import operator
     return bytearray().join(
         map(operator.methodcaller("encode"), self._lower()))
Exemple #41
0
 def flip_weights(self):
     """
     flips the weights of each row from postive to negative and visa versa
     """
     for _ in self._synaptic_rows:
         map(operator.methodcaller('flip_weights'), self._synaptic_rows)
 def test_frozendict_method_viewvalues(self):
     self._values_proxy(methodcaller('values'))
Exemple #43
0
        return unichr(codepoint)
    else:
        return '\N{REPLACEMENT CHARACTER}'  # U+FFFD

UNICODE_UNESCAPE = functools.partial(
    re.compile(COMPILED_MACROS['unicode'], re.I).sub,
    _unicode_replace)

NEWLINE_UNESCAPE = functools.partial(
    re.compile(r'()\\' + COMPILED_MACROS['nl']).sub,
    '')

SIMPLE_UNESCAPE = functools.partial(
    re.compile(r'\\(%s)' % COMPILED_MACROS['simple_escape'] , re.I).sub,
    # Same as r'\1', but faster on CPython
    operator.methodcaller('group', 1))

FIND_NEWLINES = lambda x : list(re.compile(COMPILED_MACROS['nl']).finditer(x))


class Token(object):
    r"""A single atomic token.

    .. attribute:: is_container

        Always ``False``.
        Helps to tell :class:`Token` apart from :class:`ContainerToken`.

    .. attribute:: type

        The type of token as a string:
 def assertFirst(self, expected, templates):
     templates = sorted(templates,
                        key=methodcaller('sharingKey'),
                        reverse=True)
     self.assertEqual(expected, templates[0])
Exemple #45
0
import operator
import logging
import re

from django.conf import settings
from django.utils.cache import patch_vary_headers

from subdomains.utils import get_domain

logger = logging.getLogger(__name__)
lower = operator.methodcaller('lower')

UNSET = object()


class SubdomainMiddleware:
    """
    A middleware class that adds a ``subdomain`` attribute to the current request.
    """
    def __init__(self, get_response):
        self.get_response = get_response

    def __call__(self, request):
        self.process_request(request)
        return self.process_response(request, self.get_response(request))

    def process_request(self, request):
        """
        Adds a ``subdomain`` attribute to the ``request`` parameter.
        """
        domain, host = map(lower, (get_domain(), request.get_host()))
 def _sortTemplates(self, templates=None):
     """Order templates by precedence."""
     if templates is None:
         templates = self.templates
     return sorted(templates, key=methodcaller('sharingKey'), reverse=True)
import json

from datetime import datetime
from operator import methodcaller

from sqlalchemy import func

from ckan.lib.search import rebuild, commit, clear
import ckan.model as model
import ckan.plugins.toolkit as tk
import ckan.logic as logic
import ckan.lib.helpers as h
from ckan.common import response, request, config, g, c
from ckan.controllers.package import PackageController

ascii = methodcaller('encode', 'ascii', 'ignore')


def set_attachment(response, filename):
    response.headers["Content-Disposition"
                     ] = "attachment; filename=" + filename


def get_key(container, key, default=''):
    try:
        return container[key]
    except (AttributeError, KeyError, TypeError):
        return default


class NSWController(PackageController):
Exemple #48
0

if PY3:
    def iterkeys(d, **kw):
        return iter(d.keys(**kw))

    def itervalues(d, **kw):
        return iter(d.values(**kw))

    def iteritems(d, **kw):
        return iter(d.items(**kw))

    def iterlists(d, **kw):
        return iter(d.lists(**kw))

    viewkeys = operator.methodcaller("keys")

    viewvalues = operator.methodcaller("values")

    viewitems = operator.methodcaller("items")
else:
    def iterkeys(d, **kw):
        return d.iterkeys(**kw)

    def itervalues(d, **kw):
        return d.itervalues(**kw)

    def iteritems(d, **kw):
        return d.iteritems(**kw)

    def iterlists(d, **kw):
Exemple #49
0
 def format_help(self, formatter=None):
     self.option_list.sort(key=operator.methodcaller('get_opt_string'))
     return optparse.OptionParser.format_help(self, formatter=None)
Exemple #50
0
 def has_child_with(self, linespec):
     return bool(filter(methodcaller('re_search', linespec), self.children))
Exemple #51
0
if PY3:

    def b(s):
        return s.encode("latin-1")

    def u(s):
        return s

    if sys.version_info[1] <= 1:

        def int2byte(i):
            return bytes((i, ))
    else:
        # This is about 2x faster than the implementation above on 3.2+
        int2byte = operator.methodcaller("to_bytes", 1, "big")
    import io
    StringIO = io.StringIO
    BytesIO = io.BytesIO
else:

    def b(s):
        return s

    def u(s):
        return unicode(s, "unicode_escape")

    int2byte = chr
    import StringIO
    StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
Exemple #52
0
 def buckets_by_distance_to(self, id: int) -> List[KBucket]:
     return sorted(self.buckets,
                   key=operator.methodcaller('distance_to', id))
Exemple #53
0
 def test_transform_method_name(self, method):
     # GH 19760
     df = pd.DataFrame({"A": [-1, 2]})
     result = df.transform(method)
     expected = operator.methodcaller(method)(df)
     tm.assert_frame_equal(result, expected)
Exemple #54
0
	def sorted(self, attribute):
		self.replace_values(attribute, None, "")
		try:
			return sorted(self, key=methodcaller(attribute))
		except TypeError:
			return sorted(self, key=attrgetter(attribute))
class TestRaises:
    @pytest.mark.parametrize(
        "cls, axes",
        [
            (pd.Series, {
                "index": ["a", "a"],
                "dtype": float
            }),
            (pd.DataFrame, {
                "index": ["a", "a"]
            }),
            (pd.DataFrame, {
                "index": ["a", "a"],
                "columns": ["b", "b"]
            }),
            (pd.DataFrame, {
                "columns": ["b", "b"]
            }),
        ],
    )
    def test_set_flags_with_duplicates(self, cls, axes):
        result = cls(**axes)
        assert result.flags.allows_duplicate_labels is True

        msg = "Index has duplicates."
        with pytest.raises(pd.errors.DuplicateLabelError, match=msg):
            cls(**axes).set_flags(allows_duplicate_labels=False)

    @pytest.mark.parametrize(
        "data",
        [
            pd.Series(index=[0, 0], dtype=float),
            pd.DataFrame(index=[0, 0]),
            pd.DataFrame(columns=[0, 0]),
        ],
    )
    def test_setting_allows_duplicate_labels_raises(self, data):
        msg = "Index has duplicates."
        with pytest.raises(pd.errors.DuplicateLabelError, match=msg):
            data.flags.allows_duplicate_labels = False

        assert data.flags.allows_duplicate_labels is True

    @pytest.mark.parametrize(
        "func",
        [operator.methodcaller("append", pd.Series(0, index=["a", "b"]))])
    def test_series_raises(self, func):
        s = pd.Series([0, 1],
                      index=["a",
                             "b"]).set_flags(allows_duplicate_labels=False)
        msg = "Index has duplicates."
        with pytest.raises(pd.errors.DuplicateLabelError, match=msg):
            func(s)

    @pytest.mark.parametrize(
        "getter, target",
        [
            (operator.itemgetter(["A", "A"]), None),
            # loc
            (operator.itemgetter(["a", "a"]), "loc"),
            pytest.param(operator.itemgetter(("a", ["A", "A"])),
                         "loc",
                         marks=not_implemented),
            (operator.itemgetter((["a", "a"], "A")), "loc"),
            # iloc
            (operator.itemgetter([0, 0]), "iloc"),
            pytest.param(operator.itemgetter((0, [0, 0])),
                         "iloc",
                         marks=not_implemented),
            pytest.param(operator.itemgetter(([0, 0], 0)),
                         "iloc",
                         marks=not_implemented),
        ],
    )
    def test_getitem_raises(self, getter, target):
        df = pd.DataFrame({
            "A": [1, 2],
            "B": [3, 4]
        }, index=["a", "b"]).set_flags(allows_duplicate_labels=False)
        if target:
            # df, df.loc, or df.iloc
            target = getattr(df, target)
        else:
            target = df

        msg = "Index has duplicates."
        with pytest.raises(pd.errors.DuplicateLabelError, match=msg):
            getter(target)

    @pytest.mark.parametrize(
        "objs, kwargs",
        [(
            [
                pd.Series(1, index=[0, 1],
                          name="a").set_flags(allows_duplicate_labels=False),
                pd.Series(2, index=[0, 1],
                          name="a").set_flags(allows_duplicate_labels=False),
            ],
            {
                "axis": 1
            },
        )],
    )
    def test_concat_raises(self, objs, kwargs):
        msg = "Index has duplicates."
        with pytest.raises(pd.errors.DuplicateLabelError, match=msg):
            pd.concat(objs, **kwargs)

    @not_implemented
    def test_merge_raises(self):
        a = pd.DataFrame({
            "A": [0, 1, 2]
        }, index=["a", "b", "c"]).set_flags(allows_duplicate_labels=False)
        b = pd.DataFrame({"B": [0, 1, 2]}, index=["a", "b", "b"])
        msg = "Index has duplicates."
        with pytest.raises(pd.errors.DuplicateLabelError, match=msg):
            pd.merge(a, b, left_index=True, right_index=True)
class TestPreserves:
    @pytest.mark.parametrize(
        "cls, data",
        [
            (pd.Series, np.array([])),
            (pd.Series, [1, 2]),
            (pd.DataFrame, {}),
            (pd.DataFrame, {
                "A": [1, 2]
            }),
        ],
    )
    def test_construction_ok(self, cls, data):
        result = cls(data)
        assert result.flags.allows_duplicate_labels is True

        result = cls(data).set_flags(allows_duplicate_labels=False)
        assert result.flags.allows_duplicate_labels is False

    @pytest.mark.parametrize(
        "func",
        [
            operator.itemgetter(["a"]),
            operator.methodcaller("add", 1),
            operator.methodcaller("rename", str.upper),
            operator.methodcaller("rename", "name"),
            operator.methodcaller("abs"),
            np.abs,
        ],
    )
    def test_preserved_series(self, func):
        s = pd.Series([0, 1],
                      index=["a",
                             "b"]).set_flags(allows_duplicate_labels=False)
        assert func(s).flags.allows_duplicate_labels is False

    @pytest.mark.parametrize(
        "other",
        [pd.Series(0, index=["a", "b", "c"]),
         pd.Series(0, index=["a", "b"])])
    # TODO: frame
    @not_implemented
    def test_align(self, other):
        s = pd.Series([0, 1],
                      index=["a",
                             "b"]).set_flags(allows_duplicate_labels=False)
        a, b = s.align(other)
        assert a.flags.allows_duplicate_labels is False
        assert b.flags.allows_duplicate_labels is False

    def test_preserved_frame(self):
        df = pd.DataFrame({
            "A": [1, 2],
            "B": [3, 4]
        }, index=["a", "b"]).set_flags(allows_duplicate_labels=False)
        assert df.loc[["a"]].flags.allows_duplicate_labels is False
        assert df.loc[:, ["A", "B"]].flags.allows_duplicate_labels is False

    @not_implemented
    def test_to_frame(self):
        s = pd.Series(dtype=float).set_flags(allows_duplicate_labels=False)
        assert s.to_frame().flags.allows_duplicate_labels is False

    @pytest.mark.parametrize("func", ["add", "sub"])
    @pytest.mark.parametrize(
        "frame", [False, pytest.param(True, marks=not_implemented)])
    @pytest.mark.parametrize("other", [1, pd.Series([1, 2], name="A")])
    def test_binops(self, func, other, frame):
        df = pd.Series([1, 2], name="A",
                       index=["a",
                              "b"]).set_flags(allows_duplicate_labels=False)
        if frame:
            df = df.to_frame()
        if isinstance(other, pd.Series) and frame:
            other = other.to_frame()
        func = operator.methodcaller(func, other)
        assert df.flags.allows_duplicate_labels is False
        assert func(df).flags.allows_duplicate_labels is False

    @not_implemented
    def test_preserve_getitem(self):
        df = pd.DataFrame({
            "A": [1, 2]
        }).set_flags(allows_duplicate_labels=False)
        assert df[["A"]].flags.allows_duplicate_labels is False
        assert df["A"].flags.allows_duplicate_labels is False
        assert df.loc[0].flags.allows_duplicate_labels is False
        assert df.loc[[0]].flags.allows_duplicate_labels is False
        assert df.loc[0, ["A"]].flags.allows_duplicate_labels is False

    @pytest.mark.xfail(reason="Unclear behavior.")
    def test_ndframe_getitem_caching_issue(self):
        # NDFrame.__getitem__ will cache the first df['A']. May need to
        # invalidate that cache? Update the cached entries?
        df = pd.DataFrame({"A": [0]}).set_flags(allows_duplicate_labels=False)
        assert df["A"].flags.allows_duplicate_labels is False
        df.flags.allows_duplicate_labels = True
        assert df["A"].flags.allows_duplicate_labels is True

    @pytest.mark.parametrize(
        "objs, kwargs",
        [
            # Series
            (
                [
                    pd.Series(1, index=["a", "b"]).set_flags(
                        allows_duplicate_labels=False),
                    pd.Series(2, index=["c", "d"]).set_flags(
                        allows_duplicate_labels=False),
                ],
                {},
            ),
            (
                [
                    pd.Series(1, index=["a", "b"]).set_flags(
                        allows_duplicate_labels=False),
                    pd.Series(2, index=["a", "b"]).set_flags(
                        allows_duplicate_labels=False),
                ],
                {
                    "ignore_index": True
                },
            ),
            (
                [
                    pd.Series(1, index=["a", "b"]).set_flags(
                        allows_duplicate_labels=False),
                    pd.Series(2, index=["a", "b"]).set_flags(
                        allows_duplicate_labels=False),
                ],
                {
                    "axis": 1
                },
            ),
            # Frame
            (
                [
                    pd.DataFrame({
                        "A": [1, 2]
                    }, index=["a", "b"
                              ]).set_flags(allows_duplicate_labels=False),
                    pd.DataFrame({
                        "A": [1, 2]
                    }, index=["c", "d"
                              ]).set_flags(allows_duplicate_labels=False),
                ],
                {},
            ),
            (
                [
                    pd.DataFrame({
                        "A": [1, 2]
                    }, index=["a", "b"
                              ]).set_flags(allows_duplicate_labels=False),
                    pd.DataFrame({
                        "A": [1, 2]
                    }, index=["a", "b"
                              ]).set_flags(allows_duplicate_labels=False),
                ],
                {
                    "ignore_index": True
                },
            ),
            (
                [
                    pd.DataFrame({
                        "A": [1, 2]
                    }, index=["a", "b"
                              ]).set_flags(allows_duplicate_labels=False),
                    pd.DataFrame({
                        "B": [1, 2]
                    }, index=["a", "b"
                              ]).set_flags(allows_duplicate_labels=False),
                ],
                {
                    "axis": 1
                },
            ),
            # Series / Frame
            (
                [
                    pd.DataFrame({
                        "A": [1, 2]
                    }, index=["a", "b"
                              ]).set_flags(allows_duplicate_labels=False),
                    pd.Series(
                        [1, 2],
                        index=["a", "b"],
                        name="B",
                    ).set_flags(allows_duplicate_labels=False, ),
                ],
                {
                    "axis": 1
                },
            ),
        ],
    )
    def test_concat(self, objs, kwargs):
        result = pd.concat(objs, **kwargs)
        assert result.flags.allows_duplicate_labels is False

    @pytest.mark.parametrize(
        "left, right, kwargs, expected",
        [
            # false false false
            pytest.param(
                pd.DataFrame({
                    "A": [0, 1]
                }, index=["a", "b"]).set_flags(allows_duplicate_labels=False),
                pd.DataFrame({
                    "B": [0, 1]
                }, index=["a", "d"]).set_flags(allows_duplicate_labels=False),
                {
                    "left_index": True,
                    "right_index": True
                },
                False,
                marks=not_implemented,
            ),
            # false true false
            pytest.param(
                pd.DataFrame({
                    "A": [0, 1]
                }, index=["a", "b"]).set_flags(allows_duplicate_labels=False),
                pd.DataFrame({"B": [0, 1]}, index=["a", "d"]),
                {
                    "left_index": True,
                    "right_index": True
                },
                False,
                marks=not_implemented,
            ),
            # true true true
            (
                pd.DataFrame({"A": [0, 1]}, index=["a", "b"]),
                pd.DataFrame({"B": [0, 1]}, index=["a", "d"]),
                {
                    "left_index": True,
                    "right_index": True
                },
                True,
            ),
        ],
    )
    def test_merge(self, left, right, kwargs, expected):
        result = pd.merge(left, right, **kwargs)
        assert result.flags.allows_duplicate_labels is expected

    @not_implemented
    def test_groupby(self):
        # XXX: This is under tested
        # TODO:
        #  - apply
        #  - transform
        #  - Should passing a grouper that disallows duplicates propagate?
        df = pd.DataFrame({
            "A": [1, 2, 3]
        }).set_flags(allows_duplicate_labels=False)
        result = df.groupby([0, 0, 1]).agg("count")
        assert result.flags.allows_duplicate_labels is False

    @pytest.mark.parametrize("frame", [True, False])
    @not_implemented
    def test_window(self, frame):
        df = pd.Series(
            1,
            index=pd.date_range("2000", periods=12),
            name="A",
            allows_duplicate_labels=False,
        )
        if frame:
            df = df.to_frame()
        assert df.rolling(3).mean().flags.allows_duplicate_labels is False
        assert df.ewm(3).mean().flags.allows_duplicate_labels is False
        assert df.expanding(3).mean().flags.allows_duplicate_labels is False
 class Test_BaseSet_Python2(Test_BaseSet, unittest.TestCase):
     itemcaller = methodcaller('items')
Exemple #58
0
 def _invoke(self, method, *args):
     forEach(operator.methodcaller(method, *args), self.__plugins.itervalues())
Exemple #59
0
def sort_by_distance(nodes: List[Node], target_id: int) -> List[Node]:
    return sorted(nodes, key=operator.methodcaller('distance_to', target_id))
 def test_frozendict_method_viewkeys(self):
     self._keys_proxy(methodcaller('keys'))