Example #1
1
    def _argcheck(*args, **kwargs):
        """
        Check that arguments are consistent with spark array construction.

        Conditions are:
        (1) a positional argument is a SparkContext
        (2) keyword arg 'context' is a SparkContext
        (3) an argument is a BoltArraySpark, or
        (4) an argument is a nested list containing a BoltArraySpark
        """
        try:
            from pyspark import SparkContext
        except ImportError:
            return False

        cond1 = any([isinstance(arg, SparkContext) for arg in args])
        cond2 = isinstance(kwargs.get("context", None), SparkContext)
        cond3 = any([isinstance(arg, BoltArraySpark) for arg in args])
        cond4 = any(
            [
                any([isinstance(sub, BoltArraySpark) for sub in arg]) if isinstance(arg, (tuple, list)) else False
                for arg in args
            ]
        )
        return cond1 or cond2 or cond3 or cond4
Example #2
1
    def test_smart_strings(self):
        """Lxml smart strings return values"""

        class SmartStringsSelector(Selector):
            _lxml_smart_strings = True

        body = """<body>
                    <div class='one'>
                      <ul>
                        <li>one</li><li>two</li>
                      </ul>
                    </div>
                    <div class='two'>
                      <ul>
                        <li>four</li><li>five</li><li>six</li>
                      </ul>
                    </div>
                  </body>"""

        response = HtmlResponse(url="http://example.com", body=body)

        # .getparent() is available for text nodes and attributes
        # only when smart_strings are on
        x = self.sscls(response)
        li_text = x.xpath("//li/text()")
        self.assertFalse(any(map(lambda e: hasattr(e._root, "getparent"), li_text)))
        div_class = x.xpath("//div/@class")
        self.assertFalse(any(map(lambda e: hasattr(e._root, "getparent"), div_class)))

        x = SmartStringsSelector(response)
        li_text = x.xpath("//li/text()")
        self.assertTrue(all(map(lambda e: hasattr(e._root, "getparent"), li_text)))
        div_class = x.xpath("//div/@class")
        self.assertTrue(all(map(lambda e: hasattr(e._root, "getparent"), div_class)))
 def _copy_configuration(self):
     self.distroseries.backports_not_automatic = any(
         parent.backports_not_automatic for parent in self.derivation_parents
     )
     self.distroseries.include_long_descriptions = any(
         parent.include_long_descriptions for parent in self.derivation_parents
     )
Example #4
1
def word_count_dict_to_html(word_count_dict, type, ignores, includes):
    sorted_dict = sorted(word_count_dict.items(), key=lambda (word, freq): freq)

    if type == "word":
        # filter short words
        sorted_dict = [
            (word, freq)
            for (word, freq) in sorted_dict
            if len(word) > 4 or any(pattern.match(word) for pattern in includes)
        ]

    # filter words in ignore_list
    sorted_dict = [
        (word, freq)
        for (word, freq) in sorted_dict
        # if word.lower() not in ignore_list]
        if not any(pattern.match(word) for pattern in ignores)
    ]

    number_of_words = 42

    # only take the longest words. If there are less words than n,
    # len(sorted_dict) words are returned
    cloud_words = sorted_dict[-number_of_words:]

    if not cloud_words:
        return [], ""

    min_count = cloud_words[0][1]
    max_count = cloud_words[-1][1]

    delta_count = max_count - min_count
    if delta_count == 0:
        delta_count = 1

    min_font_size = 10
    max_font_size = 50

    font_delta = max_font_size - min_font_size

    # sort words with unicode sort function
    cloud_words.sort(key=lambda (word, count): unicode.coll(word))

    html_elements = []

    for index, (word, count) in enumerate(cloud_words):
        font_factor = (count - min_count) / delta_count
        font_size = int(min_font_size + font_factor * font_delta)

        html_elements.append(
            '<a href="search/%s">'
            '<span style="font-size:%spx">%s</span></a>' % (index, font_size, word) +
            # Add some whitespace
            "&#xA0;"
        )

    html_body = "".join(["<body>", "\n".join(html_elements), "\n</body>\n"])
    html_doc = "".join(["<html><head>", CLOUD_CSS, "</head>", html_body, "</html>"])

    return (cloud_words, html_doc)
Example #5
1
    def indent_code(self, code):
        """Accepts a string of code or a list of code lines"""

        # code mostly copied from ccode
        if isinstance(code, string_types):
            code_lines = self.indent_code(code.splitlines(True))
            return "".join(code_lines)

        tab = "  "
        inc_regex = ("^function ", "^if ", "^elseif ", "^else$", "^for ")
        dec_regex = ("^end$", "^elseif ", "^else$")

        # pre-strip left-space from the code
        code = [line.lstrip(" \t") for line in code]

        increase = [int(any([search(re, line) for re in inc_regex])) for line in code]
        decrease = [int(any([search(re, line) for re in dec_regex])) for line in code]

        pretty = []
        level = 0
        for n, line in enumerate(code):
            if line == "" or line == "\n":
                pretty.append(line)
                continue
            level -= decrease[n]
            pretty.append("%s%s" % (tab * level, line))
            level += increase[n]
        return pretty
Example #6
1
 def test_publish_as_user(self):
     Droplet.objects.publish(self.t1a, as_user=self.user)
     droplets = Droplet.objects.all()
     self.assertEqual(len(droplets), 2)
     self.assertTrue(any(d.publication == self.t3a for d in droplets))
     self.assertTrue(any(d.publication == self.t3b for d in droplets))
     self.assertEqual(droplets[0].published_by, self.user)
Example #7
0
def parse_enmedia(raw, auth_token, guid, note_store):
    # find all <en-media> attribute
    sub_medias = re.findall("<en-media(.*?)>", raw, re.M)
    for subm in sub_medias:
        split = re.split("[' =\"]", subm)
        if (
            any("image/jpeg" in s for s in split)
            | any("image/gif" in s for s in split)
            | any("image/png" in s for s in split)
        ):
            hash_str = re.search("hash=[\"'](.*?)[\"']", subm)
            resource = note_store.getResourceByHash(
                auth_token, guid, hash_str.group(1).decode("hex"), True, False, False
            )
            file_name = files.blobstore.create(mime_type=resource.mime)
            with files.open(file_name, "ab") as f:
                f.write(resource.data.body)
            files.finalize(file_name)
            blob_key = files.blobstore.get_blob_key(file_name)
            raw = raw.replace(
                "<en-media" + subm + ">", '<img src="http://www.knonce.com/resource/XXX" width="123" height="123"/>'
            )
        else:
            raw = raw.replace("<en-media" + subm + ">", "")

    raw = raw.replace("</en-media(.*?)>", "")
Example #8
0
def satisfies_search_terms(task, search_terms):
    any_value_search_term = search_terms.get("any")
    result_search_term = search_terms.get("result")
    args_search_terms = search_terms.get("args")
    kwargs_search_terms = search_terms.get("kwargs")
    state_search_terms = search_terms.get("state")

    if not any([any_value_search_term, result_search_term, args_search_terms, kwargs_search_terms, state_search_terms]):
        return True

    terms = [
        state_search_terms and task.state in state_search_terms,
        any_value_search_term
        and any_value_search_term
        in "|".join(
            filter(
                None, [task.name, task.uuid, task.state, task.worker.hostname, task.args, task.kwargs, str(task.result)]
            )
        ),
        result_search_term and result_search_term in task.result,
        kwargs_search_terms
        and all(stringified_dict_contains_value(k, v, task.kwargs) for k, v in kwargs_search_terms.items()),
        args_search_terms and task_args_contains_search_args(task.args, args_search_terms),
    ]
    return any(terms)
Example #9
0
def diff_states(start, end, ignore=None):
    """
    Differences two "filesystem states" as represented by dictionaries
    of FoundFile and FoundDir objects.

    Returns a dictionary with following keys:

    ``deleted``
        Dictionary of files/directories found only in the start state.

    ``created``
        Dictionary of files/directories found only in the end state.

    ``updated``
        Dictionary of files whose size has changed (FIXME not entirely
        reliable, but comparing contents is not possible because
        FoundFile.bytes is lazy, and comparing mtime doesn't help if
        we want to know if a file has been returned to its earlier
        state).

    Ignores mtime and other file attributes; only presence/absence and
    size are considered.

    """
    ignore = ignore or []
    # FIXME: this code ignores too much, e.g. foo/bar when only foo/b is specified
    start_keys = set([k for k in start.keys() if not any([k.startswith(i) for i in ignore])])
    end_keys = set([k for k in end.keys() if not any([k.startswith(i) for i in ignore])])
    deleted = dict([(k, start[k]) for k in start_keys.difference(end_keys)])
    created = dict([(k, end[k]) for k in end_keys.difference(start_keys)])
    updated = {}
    for k in start_keys.intersection(end_keys):
        if start[k].size != end[k].size:
            updated[k] = end[k]
    return dict(deleted=deleted, created=created, updated=updated)
Example #10
0
def callback(kwargs):
    s = convert2str(kwargs["text"])
    trigger_word = convert2str(kwargs["trigger_word"])
    # remove trigger_words
    if trigger_word is not None:
        s = replaced(s, trigger_word.split(","))
    # remove metion block
    s = re.sub(r"(@.*?)\W", "", s)
    private = any([word in s for word in ["private", "私聊"]])
    attachmented = any([word in s for word in ["带图", "附件"]])
    data = {"message": replaced(s, ["private", "私聊", "带图", "附件"]).strip()}

    if not data["message"]:
        return {"text": ""}

    for plugin_module in plugin_modules:
        if plugin_module.test(data):
            ret = plugin_module.handle(data)
            if not isinstance(ret, tuple):
                text = ret
                attaches = None
            else:
                text, attaches = ret
            if trigger_word is None:
                text = "!" + text
            if attachmented and attaches:
                return {"text": " ", "private": private, "attachments": attaches}
            return {"text": text, "private": private}

    return {"text": "!呵呵"}
Example #11
0
def numerical_grad(f, inputs, grad_outputs, eps=1e-3):
    """Computes numerical gradient by finite differences.

    This function is used to implement gradient check. For usage example, see
    unit tests of :mod:`chainer.functions`.

    Args:
        f (function): Python function with no arguments that runs forward
            computation and returns the result.
        inputs (tuple of arrays): Tuple of arrays that should be treated as
            inputs. Each element of them is slightly modified to realize
            numerical gradient by finite differences.
        grad_outputs (tuple of arrays): Tuple of arrays that are treated as
            output gradients.
        eps (float): Epsilon value of finite differences.

    Returns:
        tuple: Numerical gradient arrays corresponding to ``inputs``.

    """
    assert eps > 0
    inputs = tuple(inputs)
    grad_outputs = tuple(grad_outputs)
    gpu = any(isinstance(x, cuda.ndarray) for x in inputs + grad_outputs)

    cpu = any(isinstance(x, numpy.ndarray) for x in inputs + grad_outputs)

    if gpu and cpu:
        raise RuntimeError("Do not mix GPU and CPU arrays in `numerical_grad`")
    elif gpu:
        return numerical_grad_gpu(f, inputs, grad_outputs, eps)
    else:
        return numerical_grad_cpu(f, inputs, grad_outputs, eps)
Example #12
0
def marknodes_rclick(c, p, menu):
    """ Mark selected nodes """

    pl = c.getSelectedPositions()

    have_mark = False
    have_unmark = False
    if any(p.isMarked() for p in pl):
        have_unmark = True

    if any(not p.isMarked() for p in pl):
        have_mark = True

    def marknodes_rclick_cb():
        for p in pl:
            p.setMarked()
        c.redraw_after_icons_changed()

    def unmarknodes_rclick_cb():
        for p in pl:
            p.v.clearMarked()
        c.redraw_after_icons_changed()

    if have_mark:
        markaction = menu.addAction("Mark")
        markaction.connect(markaction, QtCore.SIGNAL("triggered()"), marknodes_rclick_cb)

    if have_unmark:
        unmarkaction = menu.addAction("Unmark")
        unmarkaction.connect(unmarkaction, QtCore.SIGNAL("triggered()"), unmarknodes_rclick_cb)
Example #13
0
def remove_actions(prefix, specs, index=None, pinned=True):
    linked = install.linked(prefix)

    mss = [MatchSpec(spec) for spec in specs]

    if index:
        r = Resolve(index)
    else:
        r = None

    pinned_specs = get_pinned_specs(prefix)

    actions = defaultdict(list)
    actions[inst.PREFIX] = prefix
    for dist in sorted(linked):
        fn = dist + ".tar.bz2"
        if any(ms.match(fn) for ms in mss):
            if pinned and any(MatchSpec(spec).match("%s.tar.bz2" % dist) for spec in pinned_specs):
                raise RuntimeError("Cannot remove %s because it is pinned. Use --no-pin " "to override." % dist)

            add_unlink(actions, dist)
            if r and fn in index and r.track_features(fn):
                features_actions = remove_features_actions(prefix, index, r.track_features(fn))
                for action in features_actions:
                    if isinstance(actions[action], list):
                        for item in features_actions[action]:
                            if item not in actions[action]:
                                actions[action].append(item)
                    else:
                        assert actions[action] == features_actions[action]

    return actions
Example #14
0
    def __str__(self):
        """Pretty map."""
        s = "Turn {0}/{1}\n".format(self.turn, self.max_turns)
        s += " "
        s += "-" * 2 * self.map.size + "\n"
        for y in range(self.map.size):
            s += "|"
            for x in range(self.map.size):
                tile = self.map[x, y]
                hero = [h for h in self.heroes if h.pos == (x, y)]
                spawn = [h for h in self.heroes if h.spawn == (x, y)]

                if tile == Tile.wall:
                    s += "##"
                elif any(hero):
                    s += "@"
                    s += str(hero[0].id)
                elif any(spawn):
                    s += ".."
                elif (x, y) in self.mine_owner:
                    owner = self.mine_owner[(x, y)]
                    s += "$"
                    s += "-" if owner is None else str(owner)
                elif tile == Tile.tavern:
                    s += "[]"
                else:
                    s += "  "
            s += "|\n"
        s += " " + "-" * 2 * self.map.size
        for i in range(4):
            s += "\n"
            s += str(self.heroes[i])
        return s
Example #15
0
    def delta(self, MGen, algGens):
        # Preliminary tests
        if len(algGens) > 0 and algGens[0].left_idem != MGen.idem2:
            return E0
        if any([algGens[i].right_idem != algGens[i + 1].left_idem for i in range(len(algGens) - 1)]):
            return E0
        if any([alg.isIdempotent() for alg in algGens]):
            return E0

        assignment, algs_local, prod_d = self.getAssignments(MGen, algGens)
        if assignment is None:
            return E0

        local_MGen = MGen.local_gen
        if len(algs_local) > 0:
            local_MGen = self.adjustLocalMGen(local_MGen, algs_local[0])
            if local_MGen is None:
                return E0

        local_delta = self.local_da.delta(local_MGen, tuple(algs_local))
        if local_delta == 0:
            return E0

        result = E0
        for (local_d, local_y), ring_coeff in local_delta.items():
            alg_d, y = self.joinOutput(local_d, local_y, prod_d)
            result += 1 * TensorGenerator((alg_d, y), self.AtensorM)
        return result
Example #16
0
 def test1(self):
     assert_(all(self.A == self.B))
     assert_(all(self.A >= self.B))
     assert_(all(self.A <= self.B))
     assert_(not any(self.A > self.B))
     assert_(not any(self.A < self.B))
     assert_(not any(self.A != self.B))
def normalize_attributes(input_dir, output_dir, target_tag, attribute, wrong_value_list, normal_value, remove=False):
    eads = [ead for ead in os.listdir(input_dir) if ead.endswith(".xml")]

    for ead in tqdm(eads):
        tree = etree.parse(os.path.join(input_dir, ead))
        tags = tree.xpath("//{0}".format(target_tag))

        if target_tag == "physloc" and attribute == "label":
            for tag in tags:
                if any(tag.text.lower() == text for text in ["online", "server"]):
                    if tag.attrib.get(attribute, ""):
                        del tag.attrib[attribute]

            with open(os.path.join(output_dir, ead), mode="w") as f:
                f.write(etree.tostring(tree, pretty_print=True, xml_declaration=True, encoding="utf-8"))

            continue

        for tag in tags:
            if len(wrong_value_list) > 0:
                if any([tag.attrib.get(attribute, "") == wrong_value for wrong_value in wrong_value_list]):
                    if remove:
                        del tag.attrib[attribute]
                    else:
                        tag.attrib[attribute] = normal_value
            elif remove:
                if tag.attrib.get(attribute, ""):
                    del tag.attrib[attribute]
            else:
                tag.attrib[attribute] = normal_value

        with open(os.path.join(output_dir, ead), mode="w") as f:
            f.write(etree.tostring(tree, pretty_print=True, xml_declaration=True, encoding="utf-8"))
    def divide(self):
        """ Run the algorithm to perform a suggested
        division.

        :returns: A dictionary of divisions of {user: piece}
        """
        slices = defaultdict(list)  # we will return N pieces per cutter
        pieces = self.cake.as_collection()  # flatten the collection into choices
        cutters = self.strategy(self.users, pieces)  # create our alternation strategy
        contest = []  # initialize the contested pieces

        while any(pieces):  # distribute the un-contested pieces
            choices = list_worst_pieces(self.users, pieces)  # find each user's worst piece
            settled = all_unique(choices.values())  # are any choices the same
            for cutter, piece in choices.items():  # check all the chosen items
                if settled:
                    slices[cutter].append(piece)  # if not contested, give each user that piece
                elif piece not in pieces:
                    continue  # this piece has already been contested
                else:
                    contest.append(piece)  # both users want this piece
                pieces.remove(piece)  # remove these from the choosing

        while any(contest):  # distribute the contested pieces
            for cutter in cutters():  # change users based on our strategy
                piece = choose_worst_piece(cutter, contest)
                slices[cutter].append(piece)  # give that user their next worst piece
                if not any(contest):
                    break  # exit early in case of odd pieces
        return slices
Example #19
0
    def _fields_sync(self, cr, uid, partner, update_values, context=None):
        """ Sync commercial fields and address fields from company and to children after create/update,
        just as if those were all modeled as fields.related to the parent """
        # 1. From UPSTREAM: sync from parent
        if update_values.get("parent_id") or update_values.get("use_parent_address"):
            # 1a. Commercial fields: sync if parent changed
            if update_values.get("parent_id"):
                self._commercial_sync_from_company(cr, uid, partner, context=context)
            # 1b. Address fields: sync if parent or use_parent changed *and* both are now set
            if partner.parent_id and partner.use_parent_address:
                onchange_vals = self.onchange_address(
                    cr,
                    uid,
                    [partner.id],
                    use_parent_address=partner.use_parent_address,
                    parent_id=partner.parent_id.id,
                    context=context,
                ).get("value", {})
                partner.update_address(onchange_vals)

        # 2. To DOWNSTREAM: sync children
        if partner.child_ids:
            # 2a. Commercial Fields: sync if commercial entity
            if partner.commercial_partner_id == partner:
                commercial_fields = self._commercial_fields(cr, uid, context=context)
                if any(field in update_values for field in commercial_fields):
                    self._commercial_sync_to_children(cr, uid, partner, context=context)
            # 2b. Address fields: sync if address changed
            address_fields = self._address_fields(cr, uid, context=context)
            if any(field in update_values for field in address_fields):
                domain_children = [("parent_id", "=", partner.id), ("use_parent_address", "=", True)]
                update_ids = self.search(cr, uid, domain_children, context=context)
                self.update_address(cr, uid, update_ids, update_values, context=context)
Example #20
0
    def _fields_sync(self, values):
        """ Sync commercial fields and address fields from company and to children after create/update,
        just as if those were all modeled as fields.related to the parent """
        # 1. From UPSTREAM: sync from parent
        if values.get("parent_id") or values.get("type", "contact"):
            # 1a. Commercial fields: sync if parent changed
            if values.get("parent_id"):
                self._commercial_sync_from_company()
            # 1b. Address fields: sync if parent or use_parent changed *and* both are now set
            if self.parent_id and self.type == "contact":
                onchange_vals = self.onchange_parent_id().get("value", {})
                self.update_address(onchange_vals)

        # 2. To DOWNSTREAM: sync children
        if self.child_ids:
            # 2a. Commercial Fields: sync if commercial entity
            if self.commercial_partner_id == self:
                commercial_fields = self._commercial_fields()
                if any(field in values for field in commercial_fields):
                    self._commercial_sync_to_children()
            # 2b. Address fields: sync if address changed
            address_fields = self._address_fields()
            if any(field in values for field in address_fields):
                contacts = self.child_ids.filtered(lambda c: c.type == "contact")
                contacts.update_address(values)
Example #21
0
def test_objects_with_cached_properties_can_be_garbage_collected():
    import gc

    obj = Object()
    ident = id(obj)
    dealloc_flag = obj.deallocation_flag

    # Invoke the cached_property.
    obj.some_property

    # The object is tracked by the garbage collector.
    assert any(
        id(tracked_obj) == ident for tracked_obj in gc.get_objects()
    ), "The object is not being tracked by the garbage collector"
    # The object has been deallocated.
    assert not dealloc_flag[0], "The object was already deallocated"

    # Delete the object and run a full garbage collection.
    del obj
    gc.collect()

    # The object is no longer tracked by the garbage collector.
    assert not any(
        id(tracked_obj) == ident for tracked_obj in gc.get_objects()
    ), "The object is still being tracked by the garbage collector"
    # The object has been deallocated.
    assert dealloc_flag[0], "The object was not deallocated"
Example #22
0
def itersearch(table, pattern, field, flags, complement):
    prog = re.compile(pattern, flags)
    it = iter(table)
    hdr = next(it)
    flds = list(map(text_type, hdr))
    yield tuple(hdr)

    if field is None:
        # search whole row
        test = lambda r: any(prog.search(text_type(v)) for v in r)
    else:
        indices = asindices(hdr, field)
        if len(indices) == 1:
            index = indices[0]
            test = lambda r: prog.search(text_type(r[index]))
        else:
            getvals = operator.itemgetter(*indices)
            test = lambda r: any(prog.search(text_type(v)) for v in getvals(r))
    # complement==False, return rows that match
    if not complement:
        for row in it:
            if test(row):
                yield tuple(row)
    # complement==True, return rows that do not match
    else:
        for row in it:
            if not test(row):
                yield tuple(row)
Example #23
0
def lib(names, sources=[], requirements=[], default_build=[], usage_requirements=[]):
    """The implementation of the 'lib' rule. Beyond standard syntax that rule allows
    simplified: 'lib a b c ;'."""

    if len(names) > 1:
        if any(r.startswith("<name>") for r in requirements):
            get_manager().errors()(
                "When several names are given to the 'lib' rule\n" + "it is not allowed to specify the <name> feature."
            )

        if sources:
            get_manager().errors()(
                "When several names are given to the 'lib' rule\n" + "it is not allowed to specify sources."
            )

    project = get_manager().projects().current()
    result = []

    for name in names:
        r = requirements[:]

        # Support " lib a ; " and " lib a b c ; " syntax.
        if (
            not sources
            and not any(r.startswith("<name>") for r in requirements)
            and not any(r.startswith("<file") for r in requirements)
        ):
            r.append("<name>" + name)

        result.append(targets.create_typed_metatarget(name, "LIB", sources, r, default_build, usage_requirements))
    return result
Example #24
0
def _file_configs_paths(osname, agentConfig):
    """ Retrieve all the file configs and return their paths
    """
    try:
        confd_path = get_confd_path(osname)
        all_file_configs = glob.glob(os.path.join(confd_path, "*.yaml"))
        all_default_configs = glob.glob(os.path.join(confd_path, "*.yaml.default"))
    except PathNotFound as e:
        log.error(
            "No conf.d folder found at '%s' or in the directory where the Agent is currently deployed.\n" % e.args[0]
        )
        sys.exit(3)

    if all_default_configs:
        current_configs = set([_conf_path_to_check_name(conf) for conf in all_file_configs])
        for default_config in all_default_configs:
            if not _conf_path_to_check_name(default_config) in current_configs:
                all_file_configs.append(default_config)

    # Compatibility code for the Nagios checks if it's still configured
    # in datadog.conf
    # FIXME: 6.x, should be removed
    if not any("nagios" in config for config in itertools.chain(*all_file_configs)):
        # check if it's configured in datadog.conf the old way
        if any([nagios_key in agentConfig for nagios_key in NAGIOS_OLD_CONF_KEYS]):
            all_file_configs.append("deprecated/nagios")

    return all_file_configs
 def inventor_list(self):
     """
     Returns list of lists of applicant dictionary and location dictionary
     applicant:
       name_last
       name_first
       sequence
     location:
       id
       city
       state
       country
     """
     applicants = self.xml.first_named_inventor + self.xml.inventors.inventor
     if not applicants:
         return []
     res = []
     for i, applicant in enumerate(applicants):
         # add applicant data
         app = {}
         app.update(self._name_helper_dict(applicant.name))
         app["nationality"] = applicant.contents_of("country_code", as_string=True)
         # add location data for applicant
         loc = {}
         for tag in ["city", "state"]:
             loc[tag] = applicant.residence.contents_of(tag, as_string=True, upper=False)
         loc["country"] = app["nationality"]
         # this is created because of MySQL foreign key case sensitivities
         loc["id"] = unidecode("|".join([loc["city"], loc["state"], loc["country"]]).lower())
         del app["nationality"]
         if any(app.values()) or any(loc.values()):
             app["sequence"] = i
             app["uuid"] = str(uuid.uuid4())
             res.append([app, loc])
     return res
Example #26
0
    def indent_code(self, code):
        """Accepts a string of code or a list of code lines"""

        if isinstance(code, string_types):
            code_lines = self.indent_code(code.splitlines(True))
            return "".join(code_lines)

        tab = "   "
        inc_token = ("{", "(", "{\n", "(\n")
        dec_token = ("}", ")")

        code = [line.lstrip(" \t") for line in code]

        increase = [int(any(map(line.endswith, inc_token))) for line in code]
        decrease = [int(any(map(line.startswith, dec_token))) for line in code]

        pretty = []
        level = 0
        for n, line in enumerate(code):
            if line == "" or line == "\n":
                pretty.append(line)
                continue
            level -= decrease[n]
            pretty.append("%s%s" % (tab * level, line))
            level += increase[n]
        return pretty
    def test_resized_server_instance_actions(self):
        """Verify the correct actions are logged during a confirmed resize."""

        actions = self.servers_client.get_instance_actions(self.server.id).entity

        # Verify the resize action is listed
        self.assertTrue(any(a.action == "resize" for a in actions))
        filtered_actions = [a for a in actions if a.action == "resize"]
        self.assertEquals(len(filtered_actions), 1)

        resize_action = filtered_actions[0]
        self.validate_instance_action(
            resize_action,
            self.server.id,
            self.user_config.user_id,
            self.user_config.project_id,
            self.resize_resp.headers["x-compute-request-id"],
        )

        # Verify the confirm resize action is listed
        self.assertTrue(any(a.action == "confirmResize" for a in actions))
        filtered_actions = [a for a in actions if a.action == "confirmResize"]
        self.assertEquals(len(filtered_actions), 1)

        resize_action = filtered_actions[0]
        self.validate_instance_action(
            resize_action,
            self.server.id,
            self.user_config.user_id,
            self.user_config.project_id,
            self.confirm_resize_resp.headers["x-compute-request-id"],
        )
    def config_args(self, section="default"):
        """Loop through the configuration file and set all of our values.

        :param section: ``str``
        :return: ``dict``
        """
        if sys.version_info >= (2, 7, 0):
            parser = ConfigParser.SafeConfigParser(allow_no_value=True)
        else:
            parser = ConfigParser.SafeConfigParser()

        # Set to preserve Case
        parser.optionxform = str
        args = {}

        try:
            parser.read(self.config_file)
            for name, value in parser.items(section):
                name = name.encode("utf8")

                if any([value == "False", value == "false"]):
                    args[name] = False
                elif any([value == "True", value == "true"]):
                    args[name] = True
                else:
                    args[name] = value

        except Exception:
            return {}
        else:
            return args
Example #29
0
def has_people(nlp, s):
    NNP = nlp.vocab.strings["NNP"]
    any_proper_nouns = any([word.tag == NNP for word in s])
    any_caps = any([word.string[0].isupper() for word in s[1:]])
    any_person_nouns = any([lemma_is_person(nn.lemma_) for nn in get_nouns(nlp, s)])
    any_not_its = any([prp.lemma_ not in ("it", "its") for prp in get_pronouns(nlp, s)])
    return any_person_nouns or any_not_its or any_proper_nouns or any_caps
Example #30
0
def negate_sequence(text):
    """
    Detects negations and transforms negated words into "not_" form.
    """
    negation = False
    delims = "?.,!:;"
    result = []
    words = text.split()
    prev = None
    pprev = None
    for word in words:
        # stripped = word.strip(delchars)
        stripped = word.strip(delims).lower()
        negated = "not_" + stripped if negation else stripped
        result.append(negated)
        if prev:
            bigram = prev + " " + negated
            result.append(bigram)
            if pprev:
                trigram = pprev + " " + bigram
                result.append(trigram)
            pprev = prev
        prev = negated

        if any(neg in word for neg in ["not", "n't", "no"]):
            negation = not negation

        if any(c in word for c in delims):
            negation = False

    return result