Example #1
1
def Rconverter(Robj, dataframe=False):
    """
    Convert an object in R's namespace to one suitable
    for ipython's namespace.

    For a data.frame, it tries to return a structured array.
    It first checks for colnames, then names.
    If all are NULL, it returns np.asarray(Robj), else
    it tries to construct a recarray

    Parameters
    ----------

    Robj: an R object returned from rpy2
    """
    is_data_frame = ro.r("is.data.frame")
    colnames = ro.r("colnames")
    rownames = ro.r("rownames")  # with pandas, these could be used for the index
    names = ro.r("names")

    if dataframe:
        as_data_frame = ro.r("as.data.frame")
        cols = colnames(Robj)
        _names = names(Robj)
        if cols != ri.NULL:
            Robj = as_data_frame(Robj)
            names = tuple(np.array(cols))
        elif _names != ri.NULL:
            names = tuple(np.array(_names))
        else:  # failed to find names
            return np.asarray(Robj)
        Robj = np.rec.fromarrays(Robj, names=names)
    return np.asarray(Robj)
Example #2
1
def get_phrase_table(weight, phrase_penalty, word_penalty, path):
    sys.stderr.write("loading phrase table\n")

    f = open(path)
    d = {}
    seenwords = {}
    for line in f:
        ll = line.split(" ||| ")
        f_phrase = tuple(ll[0].split())
        e_phrase = tuple(ll[1].split())
        features = [float(x) for x in ll[2].split()]

        score = sum([x[0] * log(x[1]) for x in zip(weight, features)])
        score += phrase_penalty - len(e_phrase) * word_penalty

        partial_score = [log(x) for x in features]
        partial_score.append(1)  # phrase penalty
        partial_score.append(-len(e_phrase))  # word penalty

        item = (score, e_phrase, partial_score)
        if not f_phrase in d:
            d[f_phrase] = []
        d[f_phrase].append(item)
        for f_word in f_phrase:
            seenwords[f_word] = 1
    for f_phrase in d:
        items = d[f_phrase]
        d[f_phrase] = sorted(items, key=lambda x: -x[0])

    f.close()
    sys.stderr.write("Phrase table: %d\n" % len(d))
    return d, seenwords
Example #3
1
def iterunique(source, key):
    # assume source is sorted
    # first need to sort the data
    it = iter(source)

    hdr = next(it)
    yield tuple(hdr)

    # convert field selection into field indices
    if key is None:
        indices = range(len(hdr))
    else:
        indices = asindices(hdr, key)

    # now use field indices to construct a _getkey function
    # N.B., this may raise an exception on short rows, depending on
    # the field selection
    getkey = operator.itemgetter(*indices)

    prev = next(it)
    prev_key = getkey(prev)
    prev_comp_ne = True

    for curr in it:
        curr_key = getkey(curr)
        curr_comp_ne = curr_key != prev_key
        if prev_comp_ne and curr_comp_ne:
            yield tuple(prev)
        prev = curr
        prev_key = curr_key
        prev_comp_ne = curr_comp_ne

    # last one?
    if prev_comp_ne:
        yield prev
Example #4
1
def main(argv):
    rows = []

    test_case = []
    for (distr_params, num_values, num_clients, num_reports_per_client) in DISTRIBUTION_PARAMS:
        for distribution in DISTRIBUTIONS:
            for (config_name, bloom_name, privacy_params, fr_extra, regex_missing) in TEST_CONFIGS:
                test_name = "r-{}-{}-{}".format(distribution, distr_params, config_name)

                params = (
                    BLOOMFILTER_PARAMS[bloom_name]
                    + PRIVACY_PARAMS[privacy_params]
                    + tuple([int(num_values * fr_extra)])
                    + tuple([MAP_REGEX_MISSING[regex_missing]])
                )

                test_case = (test_name, distribution, num_values, num_clients, num_reports_per_client) + params
                row_str = [str(element) for element in test_case]
                rows.append(row_str)

    for params in DEMO:
        rows.append(params)

    for row in rows:
        print " ".join(row)
Example #5
1
def rectilinear(vertices, periodic=(), name="rect"):
    "rectilinear mesh"

    ndims = len(vertices)
    indices = numpy.ogrid[tuple(slice(len(n) - 1) for n in vertices)]
    domainelem = element.Element(ndims=ndims, vertices=[])

    vertexfmt = name + "(" + ",".join("%%%dd" % len(str(len(n) - 1)) for n in vertices) + ")"
    vertexobjs = util.objmap(
        lambda *index: element.PrimaryVertex(vertexfmt % index), *numpy.ogrid[tuple(slice(len(n)) for n in vertices)]
    )
    for idim in periodic:
        tmp = numeric.bringforward(vertexobjs, idim)
        tmp[-1] = tmp[0]

    structure = util.objmap(
        lambda *index: element.QuadElement(
            ndims=ndims,
            parent=(
                domainelem,
                element.AffineTransformation(
                    offset=[n[i] for n, i in zip(vertices, index)],
                    transform=numpy.diag([n[i + 1] - n[i] for n, i in zip(vertices, index)]),
                ),
            ),
            vertices=vertexobjs[tuple(slice(i, i + 2) for i in index)].ravel(),
        ),
        *indices
    )
    topo = topology.StructuredTopology(structure)
    coords = GridFunc(domainelem, structure, vertices)
    if periodic:
        topo = topo.make_periodic(periodic)
    return topo, coords
Example #6
1
 def _print_pgf_path(self, gc, path, transform):
     f = 1.0 / self.dpi
     # check for clip box
     bbox = gc.get_clip_rectangle() if gc else None
     if bbox:
         p1, p2 = bbox.get_points()
         clip = (p1[0], p1[1], p2[0], p2[1])
     else:
         clip = None
     # build path
     for points, code in path.iter_segments(transform, clip=clip):
         if code == Path.MOVETO:
             x, y = tuple(points)
             writeln(self.fh, r"\pgfpathmoveto{\pgfqpoint{%fin}{%fin}}" % (f * x, f * y))
         elif code == Path.CLOSEPOLY:
             writeln(self.fh, r"\pgfpathclose")
         elif code == Path.LINETO:
             x, y = tuple(points)
             writeln(self.fh, r"\pgfpathlineto{\pgfqpoint{%fin}{%fin}}" % (f * x, f * y))
         elif code == Path.CURVE3:
             cx, cy, px, py = tuple(points)
             coords = cx * f, cy * f, px * f, py * f
             writeln(self.fh, r"\pgfpathquadraticcurveto{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}" % coords)
         elif code == Path.CURVE4:
             c1x, c1y, c2x, c2y, px, py = tuple(points)
             coords = c1x * f, c1y * f, c2x * f, c2y * f, px * f, py * f
             writeln(
                 self.fh,
                 r"\pgfpathcurveto{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}" % coords,
             )
Example #7
1
 def executemany(self, query, params=None):
     # cx_Oracle doesn't support iterators, convert them to lists
     if params is not None and not isinstance(params, (list, tuple)):
         params = list(params)
     try:
         args = [(":arg%d" % i) for i in range(len(params[0]))]
     except (IndexError, TypeError):
         # No params given, nothing to do
         return None
     # cx_Oracle wants no trailing ';' for SQL statements.  For PL/SQL, it
     # it does want a trailing ';' but not a trailing '/'.  However, these
     # characters must be included in the original query in case the query
     # is being passed to SQL*Plus.
     if query.endswith(";") or query.endswith("/"):
         query = query[:-1]
     query = convert_unicode(query % tuple(args), self.charset)
     formatted = [self._format_params(i) for i in params]
     self._guess_input_sizes(formatted)
     try:
         return self.cursor.executemany(query, [self._param_generator(p) for p in formatted])
     except Database.IntegrityError as e:
         six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
     except Database.DatabaseError as e:
         # cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
         if hasattr(e.args[0], "code") and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
             six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
         six.reraise(utils.DatabaseError, utils.DatabaseError(*tuple(e.args)), sys.exc_info()[2])
Example #8
0
 def pipeOpen(self, cmd, *args, **flags):
     l = tuple(cmd.split(" "))
     for (k, v) in flags.items():
         if v is not None:
             l += len(k) == 1 and ("-%s" % (k,), str(v)) or ("--%s=%s" % (k, v),)
     l += tuple(args)
     return sp.Popen(tuple(a for a in l if a), stdout=sp.PIPE, stderr=sp.PIPE)
Example #9
0
def write_FORCES_FOURTH(
    vaspruns, disp_dataset, forces_fourth="FORCES_FOURTH", forces_third="FORCES_THIRD", forces_second="FORCES_SECOND"
):

    count = 0
    for disp1 in disp_dataset["first_atoms"]:
        count += 1
        for disp2 in disp1["second_atoms"]:
            count += 1
    write_FORCES_THIRD(vaspruns[:count], disp_dataset, forces_third=forces_third, forces_second=forces_second)
    natom = disp_dataset["natom"]
    set_of_forces = get_forces_from_vasprun_xmls(vaspruns[count:], natom, index_shift=count)
    count_begin = count
    w4 = open(forces_fourth, "w")
    for disp1 in disp_dataset["first_atoms"]:
        atom1 = disp1["number"]
        for disp2 in disp1["second_atoms"]:
            atom2 = disp2["number"]
            for disp3 in disp2["third_atoms"]:
                atom3 = disp3["number"]
                d = disp3["displacement"]
                w4.write("# File: %-5d\n" % (count + 1))
                w4.write("# %-5d " % (atom1 + 1))
                w4.write("%20.16f %20.16f %20.16f\n" % tuple(disp1["displacement"]))
                w4.write("# %-5d " % (atom2 + 1))
                w4.write("%20.16f %20.16f %20.16f\n" % tuple(disp2["displacement"]))
                w4.write("# %-5d " % (atom3 + 1))
                w4.write("%20.16f %20.16f %20.16f\n" % tuple(d))
                for forces in set_of_forces[count - count_begin]:
                    w4.write("%15.10f %15.10f %15.10f\n" % tuple(forces))
                count += 1
Example #10
0
def write_FORCES_FC4(disp_dataset, forces_fc4, fp=None, filename="FORCES_FC4"):
    if fp is None:
        w = open(filename, "w")
    else:
        w = fp

    natom = disp_dataset["natom"]
    num_disp1 = len(disp_dataset["first_atoms"])
    num_disp2 = 0
    for disp1 in disp_dataset["first_atoms"]:
        num_disp2 += len(disp1["second_atoms"])
    count = num_disp1 + num_disp2

    write_FORCES_FC3(disp_dataset, forces_fc3=forces_fc4, fp=w)

    for i, disp1 in enumerate(disp_dataset["first_atoms"]):
        atom1 = disp1["number"]
        for disp2 in disp1["second_atoms"]:
            atom2 = disp2["number"]
            for disp3 in disp2["third_atoms"]:
                atom3 = disp3["number"]
                w.write("# File: %-5d\n" % (count + 1))
                w.write("# %-5d " % (atom1 + 1))
                w.write("%20.16f %20.16f %20.16f\n" % tuple(disp1["displacement"]))
                w.write("# %-5d " % (atom2 + 1))
                w.write("%20.16f %20.16f %20.16f\n" % tuple(disp2["displacement"]))
                w.write("# %-5d " % (atom3 + 1))
                w.write("%20.16f %20.16f %20.16f\n" % tuple(disp3["displacement"]))
                for forces in forces_fc4[count]:
                    w.write("%15.10f %15.10f %15.10f\n" % tuple(forces))
                count += 1
Example #11
0
File: Deps.py Project: rcuza/bcfg2
    def validate_structures(self, metadata, structures):
        """Examine the passed structures and append any additional
        prerequisite entries as defined by the files in Deps.
        """
        entries = []
        for structure in structures:
            for entry in structure.getchildren():
                tag = entry.tag
                if tag.startswith("Bound"):
                    tag = tag[5:]
                if (tag, entry.get("name")) not in entries and not isinstance(entry, lxml.etree._Comment):
                    entries.append((tag, entry.get("name")))
        entries.sort()
        entries = tuple(entries)
        groups = list(metadata.groups)
        groups.sort()
        groups = tuple(groups)

        # Check to see if we have cached the prereqs already
        if (entries, groups) in self.cache:
            prereqs = self.cache[(entries, groups)]
        else:
            prereqs = self.calculate_prereqs(metadata, entries)
            self.cache[(entries, groups)] = prereqs

        newstruct = lxml.etree.Element("Independent")
        for tag, name in prereqs:
            lxml.etree.SubElement(newstruct, tag, name=name)
        structures.append(newstruct)
Example #12
0
    def __init__(self, name, n=None, reverse=False, randomize_order=False, seed=None):

        # get the colors from a color brewer scheme
        try:
            scheme = colorbrewer.schemes[name]
        except KeyError:
            message = "'%s' is not a valid colorbrewer scheme name" % name
            raise KeyError(message)
        else:
            nColors = n or scheme.max_number()
            rgbList = scheme.get_colors(nColors)

        # reverse the rgb list?
        if reverse:
            rgbList = list(rgbList)
            rgbList.reverse()
            rgbList = tuple(rgbList)

        # randomize the rgb list?
        if randomize_order:
            if seed is None:
                message = "For randomizing order, you have to specify a seed"
                raise ValueError(message)
            random.seed(seed)
            rgbList = list(rgbList)
            random.shuffle(rgbList)
            rgbList = tuple(rgbList)

        # make color instance from the rgb values
        colors = [Color(0, 255, 255, 255, "white"), Color(1, 0, 0, 0, "black")]
        colors.extend([Color(index + 2, r, g, b, "%s-%i" % (name, index)) for index, (r, g, b) in enumerate(rgbList)])

        ColorScheme.__init__(self, colors)
Example #13
0
    def __init__(self, seed, n, reverse=False, maxstep=25):

        # create a list of rgb values
        rgbList = []
        random.seed(seed)
        if n > 0:
            rgbList.append((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)))
        while len(rgbList) < n:
            rgb = list(rgbList[-1])
            drgb = [
                random.randint(-maxstep, maxstep),
                random.randint(-maxstep, maxstep),
                random.randint(-maxstep, maxstep),
            ]
            rgb = [(rgb[i] + drgb[i]) % 256 for i in range(3)]
            rgbList.append(tuple(rgb))
        rgbList = tuple(rgbList)

        # reverse the rgb list?
        if reverse:
            rgbList = list(rgbList)
            rgbList.reverse()
            rgbList = tuple(rgbList)

        # make color instance from the rgb values
        name = "MC-%d-%d" % (maxstep, seed)
        colors = [Color(0, 255, 255, 255, "white"), Color(1, 0, 0, 0, "black")]
        colors.extend([Color(index + 2, r, g, b, "%s-%i" % (name, index)) for index, (r, g, b) in enumerate(rgbList)])

        ColorScheme.__init__(self, colors)
    def _set_address(self, new_address):
        """ Change copter radio address.
            This function workd only with crazyradio crtp link.
        """

        logging.debug("Setting bootloader radio address to" " {}".format(new_address))

        if len(new_address) != 5:
            raise Exception("Radio address should be 5 bytes long")

        self.link.pause()

        for _ in range(10):
            logging.debug("Trying to set new radio address")
            self.link.cradio.set_address((0xE7,) * 5)
            pkdata = (0xFF, 0xFF, 0x11) + tuple(new_address)
            self.link.cradio.send_packet(pkdata)
            self.link.cradio.set_address(tuple(new_address))
            if self.link.cradio.send_packet((0xFF,)).ack:
                logging.info("Bootloader set to radio address" " {}".format(new_address))
                self.link.restart()
                return True

        self.link.restart()
        return False
def forward_one(x, target, label, hidden, train_flag):
    # make input window vector
    distance = window // 2
    char_vecs = list()
    char_type_vecs = list()
    x = list(x)
    for i in range(distance):
        x.append("</s>")
        x.insert(0, "<s>")
    for i in range(-distance + 1, distance + 2):
        char = x[target + i]
        char_id = char2id[char]
        char_vec = model.embed(get_onehot(char_id))
        char_vecs.append(char_vec)
    char_concat = F.concat(tuple(char_vecs))
    for i in range(-distance + 1, distance + 2):
        char = x[target + i]
        char_type = make_char_type(char)
        char_type_id = char_type2id[char_type]
        char_type_vec = model.char_type_embed(get_onehot(char_type_id))
        char_type_vecs.append(char_type_vec)
    char_type_concat = F.concat(tuple(char_type_vecs))
    # dropout_concat = F.dropout(concat, ratio=dropout_rate, train=train_flag)
    concat = F.concat((char_concat, char_type_concat))
    concat = F.concat((concat, hidden))
    hidden = F.sigmoid(model.hidden1(concat))
    output = model.output(hidden)
    dist = F.softmax(output)
    # print(dist.data, label, np.argmax(dist.data))
    correct = get_onehot(label)
    # print(output.data, correct.data)
    return np.argmax(dist.data), F.softmax_cross_entropy(output, correct)
Example #16
0
    def create(self, transaction, prec, succs=(), flag=0, parents=None, date=None, metadata=None):
        """obsolete: add a new obsolete marker

        * ensuring it is hashable
        * check mandatory metadata
        * encode metadata

        If you are a human writing code creating marker you want to use the
        `createmarkers` function in this module instead.

        return True if a new marker have been added, False if the markers
        already existed (no op).
        """
        if metadata is None:
            metadata = {}
        if date is None:
            if "date" in metadata:
                # as a courtesy for out-of-tree extensions
                date = util.parsedate(metadata.pop("date"))
            else:
                date = util.makedate()
        if len(prec) != 20:
            raise ValueError(prec)
        for succ in succs:
            if len(succ) != 20:
                raise ValueError(succ)
        if prec in succs:
            raise ValueError(_("in-marker cycle with %s") % node.hex(prec))

        metadata = tuple(sorted(metadata.iteritems()))

        marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
        return bool(self.add(transaction, [marker]))
Example #17
0
def add_padding_polygon(polygon, bitmap, padding=10):
    draw_detections([polygon], bitmap, fill=True, color=1)
    # get contours
    contours, hierarchy = cv2.findContours(bitmap, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    # get the min bounding rect for the rects
    min_area_rect = cv2.minAreaRect(contours[0])  # rect = ((center_x,center_y),(width,height),angle)
    # convert to list so you can change the value of the width and height
    min_area_rect_list = [list(x) if type(x) is tuple else x for x in min_area_rect]

    height = min_area_rect_list[1][1]
    width = min_area_rect_list[1][0]
    center_x = min_area_rect_list[0][0]
    center_y = min_area_rect_list[0][1]
    bitmap_height, bitmap_width = bitmap.shape

    # only add padding if it doens't go off the image
    if (
        center_x + (width / 2) + padding < bitmap_width
        and center_x - (width / 2) - padding >= 0
        and center_y + (height / 2) + padding < bitmap_height
        and center_y - (height / 2) - padding >= 0
    ):
        min_area_rect_list[1][0] += padding
        min_area_rect_list[1][1] += padding

    # convert back to tuple
    cnt = tuple(tuple(x) if type(x) is list else x for x in min_area_rect_list)  # , dtype=np.int32)
    min_poly_padded = np.int0(cv2.cv.BoxPoints(cnt))
    return min_poly_padded
Example #18
0
def write_kappa(kappa, temperatures, mesh, mesh_divisors=None, grid_point=None, sigma=None, filename=None):
    kappa_filename = "kappa"
    suffix = "-m%d%d%d" % tuple(mesh)
    if mesh_divisors is not None:
        if (np.array(mesh_divisors, dtype=int) != 1).any():
            suffix += "-d%d%d%d" % tuple(mesh_divisors)
    sigma_str = ("%f" % sigma).rstrip("0").rstrip("\.")
    if grid_point is not None:
        suffix += "-g%d" % grid_point
    if sigma is not None:
        suffix += "-s" + sigma_str
    if filename is not None:
        suffix += "." + filename
    suffix += ".dat"
    kappa_filename += suffix
    print "Kappa",
    if grid_point is not None:
        print "at grid adress %d" % grid_point,
    if sigma is not None:
        if grid_point is not None:
            print "and",
        else:
            print "at",
        print "sigma %s" % sigma_str,
    print "were written into",
    if grid_point is not None:
        print ""
    print '"%s"' % kappa_filename
    w = open(kappa_filename, "w")
    w.write("# temp   kappa\n")
    for t, k in zip(temperatures, kappa):
        w.write("%6.1f %.5f\n" % (t, k))
    w.close()
def test_exception_compat_v1():
    endpoint = random_ipc_endpoint()

    class MySrv(zerorpc.Server):
        pass

    srv = MySrv()
    srv.bind(endpoint)
    gevent.spawn(srv.run)

    client_events = zerorpc.Events(zmq.DEALER)
    client_events.connect(endpoint)
    client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)

    rpccall = client.channel()
    rpccall.emit("donotexist", tuple())
    event = rpccall.recv()
    print event
    assert event.name == "ERR"
    (name, msg, tb) = event.args
    print "detailed error", name, msg, tb
    assert name == "NameError"
    assert msg == "donotexist"

    rpccall = client.channel()
    rpccall.emit("donotexist", tuple(), xheader=dict(v=1))
    event = rpccall.recv()
    print event
    assert event.name == "ERR"
    (msg,) = event.args
    print "msg only", msg
    assert msg == "NameError('donotexist',)"

    client_events.close()
    srv.close()
Example #20
0
    def _borg_localroles(self):
        """Returns a tuple with roles obtained via borg.localrole adapters."""
        # Get all local roles (includeding those provided
        # by borg_localroles) and editable local roles
        # (only those stored in the object):
        pas = getToolByName(self.context, "acl_users")
        editable_local_roles = dict(self.context.get_local_roles())

        # Calculate borg_local_roles by substracting editable local
        # roles from all available local roles (including the
        # borg.localrole provided roles):
        borg_local_roles = pas._getAllLocalRoles(self.context)
        for principal, roles in editable_local_roles.items():
            borg_local_roles[principal] = [r for r in borg_local_roles.get(principal, ()) if r not in roles]
            if not borg_local_roles[principal]:
                del borg_local_roles[principal]

        # Adapted from: PluggableAuthService._getLocalRolesForDisplay
        result = []
        for principal, roles in borg_local_roles.items():
            username = principal
            userType = "user"
            if pas.getGroup(principal):
                userType = "group"
            else:
                user = pas.getUserById(principal)
                if user:
                    username = user.getUserName()
                    principal = user.getId()
            result.append((username, tuple(roles), userType, principal))

        return tuple(result)
Example #21
0
    def __init__(self, parent, left_idem, strands):
        """Specifies the parent algebra (which contains the local PMC), left
        idempotent, and strands.

        Input parameters:
        - parent: must be an object of LocalStrandAlgebra.
        - left_idem: tuple containing IDs of occupied pairs.
        - strands: tuple of pairs specifying strands.

        """
        Generator.__init__(self, parent)
        self.local_pmc = parent.local_pmc
        self.left_idem = left_idem
        if not isinstance(self.left_idem, LocalIdempotent):
            self.left_idem = LocalIdempotent(self.local_pmc, self.left_idem)
        if not isinstance(strands, LocalStrands):
            strands = LocalStrands(self.local_pmc, strands)
        self.strands = strands

        # Get right_idem and multiplicity from strands
        self.right_idem = self.strands.propagateRight(self.left_idem)
        self.multiplicity = self.strands.multiplicity

        # Enumerate single and double horizontals
        self.all_hor = list(self.left_idem)
        for st in self.strands:
            start_idem = self.local_pmc.pairid[st[0]]
            if start_idem != -1:
                if start_idem not in self.all_hor:
                    print left_idem, strands
                self.all_hor.remove(start_idem)
        self.all_hor = tuple(self.all_hor)
        self.single_hor = tuple([i for i in self.all_hor if len(self.local_pmc.pairs[i]) == 1])
        self.double_hor = tuple([i for i in self.all_hor if len(self.local_pmc.pairs[i]) == 2])
Example #22
0
def parse_range(number_string):
    """Parse a range and return a tuple with a low and high number as floats.

    We will also parse regular numbers, in which case the tuple will
    only have one item.
    """
    if type(number_string) in [int, float]:
        return (float(number_string), None)
    nums = convert.RANGE_MATCHER.split(number_string.strip())
    if len(nums) > 2:
        debug("WARNING: String %s does not appear to be a normal range." % number_string, 0)
        retval = map(convert.frac_to_float, nums)
        # filter any non-numbers before we take 1 and 3
        retval = filter(lambda x: x, retval)
        if len(retval) > 2:
            debug("Parsing range as %s-%s" % (retval[0], retval[-1]), 0)
            retval = retval[0], retval[-1]
    else:
        retval = map(convert.frac_to_float, nums)
    if len(retval) == 2:
        return tuple(retval)
    elif len(retval) == 1:
        return tuple(retval + [None])
    else:
        return (None, None)
Example #23
0
def declare_chain(
    name="",
    action="",
    ext_in="",
    ext_out="",
    reentrant=True,
    color="BLUE",
    install=0,
    before=None,
    after=None,
    decider=None,
    rule=None,
    scan=None,
):
    """
	see Tools/flex.py for an example
	while i do not like such wrappers, some people really do
	"""
    if before is None:
        before = []
    if after is None:
        after = []

    action = action or rule
    if isinstance(action, str):
        act = Task.simple_task_type(name, action, color=color)
    else:
        act = Task.task_type_from_func(name, action, color=color)
    act.ext_in = tuple(Utils.to_list(ext_in))
    act.ext_out = tuple(Utils.to_list(ext_out))
    act.before = Utils.to_list(before)
    act.after = Utils.to_list(after)
    act.scan = scan

    def x_file(self, node):
        if decider:
            ext = decider(self, node)
        else:
            ext = ext_out

        if isinstance(ext, str):
            out_source = node.change_ext(ext)
            if reentrant:
                self.allnodes.append(out_source)
        elif isinstance(ext, list):
            out_source = [node.change_ext(x) for x in ext]
            if reentrant:
                for i in xrange((reentrant is True) and len(out_source) or reentrant):
                    self.allnodes.append(out_source[i])
        else:
            # XXX: useless: it will fail on Utils.to_list above...
            raise Utils.WafError("do not know how to process {0!s}".format(str(ext)))

        tsk = self.create_task(name, node, out_source)

        if node.__class__.bld.is_install:
            tsk.install = install

    declare_extension(act.ext_in, x_file)
    return x_file
Example #24
0
def str_aligned(results, header=None):
    """
    Given a tuple, generate a nicely aligned string form.
    >>> results = [["a","b","cz"],["d","ez","f"],[1,2,3]]
    >>> print(str_aligned(results))
    a    b   cz
    d   ez    f
    1    2    3

    Args:
        result: 2d sequence of arbitrary types.
        header: optional header

    Returns:
        Aligned string output in a table-like format.
    """
    k = list(zip(*results))
    stringlengths = list()
    count = 0
    for i in k:
        col_max_len = max([len(str(m)) for m in i])
        if header is not None:
            col_max_len = max([len(str(header[count])), col_max_len])
        stringlengths.append(col_max_len)
        count += 1
    format_string = "   ".join(["%" + str(d) + "s" for d in stringlengths])
    returnstr = ""
    if header is not None:
        header_str = format_string % tuple(header)
        returnstr += header_str + "\n"
        returnstr += "-" * len(header_str) + "\n"
    return returnstr + "\n".join([format_string % tuple(result) for result in results])
Example #25
0
def get_emboss_version():
    """Returns a tuple of three ints, e.g. (6,1,0)"""
    # Windows and Unix versions of EMBOSS seem to differ in
    # which lines go to stdout and stderr - so merge them.
    child = subprocess.Popen(
        exes["embossversion"],
        stdout=subprocess.PIPE,
        stderr=subprocess.STDOUT,
        universal_newlines=True,
        shell=(sys.platform != "win32"),
    )
    stdout, stderr = child.communicate()
    child.stdout.close()  # This is both stdout and stderr
    del child
    assert stderr is None  # Send to stdout instead
    for line in stdout.split("\n"):
        if line.strip() == "Reports the current EMBOSS version number":
            pass
        elif line.startswith("Writes the current EMBOSS version number"):
            pass
        elif line.count(".") == 2:
            return tuple(int(v) for v in line.strip().split("."))
        elif line.count(".") == 3:
            # e.g. I installed mEMBOSS-6.2.0.1-setup.exe
            # which reports 6.2.0.1 - for this return (6,2,0)
            return tuple(int(v) for v in line.strip().split("."))[:3]
        else:
            # Either we can't understand the output, or this is really
            # an error message not caught earlier (e.g. not in English)
            raise MissingExternalDependencyError("Install EMBOSS if you want to use Bio.Emboss (%s)." % line)
Example #26
0
File: solvers.py Project: jinz/YACS
 def __hash__(self):
     if self._hash is None:
         if self._old_hash:
             self._hash = hash(tuple(sorted(self.new.items()))) ^ self._old_hash
         else:
             self._hash = hash(tuple(sorted(self.items())))
     return self._hash
Example #27
0
 def addPlane(self, pt, norm, coeff):
     pidx = self.n_planes
     self.n_planes += 1
     self.plane_pts[pidx] = tuple(pt) + (0,)
     self.plane_norms[pidx] = tuple(norm) + (0,)
     self.plane_coeffs[pidx] = coeff
     self.set_planes()
Example #28
0
    def __str__(self):
        # base = self.name+' '+self.typ
        base = self.typ
        if self.typ == "Text":
            if self.actor:
                return base + " {}: ".format(self.actor) + self.text[:50]
            else:
                return base + ": " + self.text[:50]

        elif self.typ in ("Cast", "Scene"):
            return base + ": {} {} {}".format(self.evt, self.target, self.info)
        elif self.typ == "Game":
            if self.evt == "jump":
                return base + ": {} {}".format(self.evt, self.target)
            else:
                return base + ": {} {} {}".format(self.evt, self.target, self.info)
        elif self.typ == "Move":
            return "{} T={}-{}".format(self.evt, self.start, self.info)
        elif self.typ == "Fade":
            return base + "{}->{} T={}-{}".format(tuple(self.source), tuple(self.target), self.start, self.info)
        elif self.typ == "Pan":
            return base + "{} {}-T={}".format(tuple(self.target), self.start, self.info)
        elif self.typ == "Zoom":
            return base + "{} {}-T={}".format(self.target, self.start, self.info)
        elif self.typ == "Sound":
            return base + ": {} {}".format(self.text, self.info)
        elif self.typ == "Python":
            return base + ": {} {}".format(self.info, self.text[:30])
        return base
Example #29
0
def tokens_ngram_processor(tokens, ngram_len):
    """
    Given a `tokens` sequence or iterable of Tokens, return an iterator of
    tuples of Tokens where the tuples length is length `ngram_len`. Buffers at
    most `ngram_len` iterable items. The returned tuples contains
    either `ngram_len` items or less for these cases where the number of tokens
    is smaller than `ngram_len`:

    - between the beginning of the stream and a first gap
    - between a last gap and the end of the stream
    - between two gaps
    In these cases, shorter ngrams can be returned.
    """
    ngram = deque()
    for token in tokens:
        if len(ngram) == ngram_len:
            yield tuple(ngram)
            ngram.popleft()
        if token.gap:
            ngram.append(token)
            yield tuple(ngram)
            # reset
            ngram.clear()
        else:
            ngram.append(token)
    if ngram:
        # yield last ngram
        yield tuple(ngram)
Example #30
0
    def to_list(self, csv_file):
        """ 串接每日資料 舊→新

            :param csv csv_file: csv files
            :rtype: list
        """
        tolist = []
        for i in csv_file:
            i = [value.strip().replace(",", "") for value in i]
            try:
                for value in (1, 2, 3, 4, 5, 6, 8):
                    i[value] = float(i[value])
            except (IndexError, ValueError):
                pass
            tolist.append(i)
        if self._twse:
            if tolist:
                self.__info = (tolist[0][0].split(" ")[1], tolist[0][0].split(" ")[2].decode("cp950"))
                self.__raw_rows_name = tolist[1]
                return tuple(tolist[2:])
            return tuple([])
        else:
            if len(tolist) > 6:
                self.__raw_rows_name = tolist[4]
                self.__info = (self.__get_no, OTCNo().all_stock[self.__get_no])
                if len(tolist[5:]) > 1:
                    return tuple(tolist[5:-1])
            return tuple([])