Example #1
1
    def getLaplacian(mat):
        return None
        # idを1万で割って同じ人達をくくる
        (r, c, y) = X.shape
        cav = 100000
        categoryold = -1
        blocks = []
        begin = -1
        for r, row in enumerate(wholerowIndex):
            category = row / cav
            if category != categoryold:
                if begin > 0:
                    blocks.append(createCompleteLaplacian(r - begin))
                begin = r
                categoryold = category
        L1 = scipy.linalg.block_diag(blocks)

        blocks = []
        begin = -1
        categoryold = -1
        for c, col in enumerate(wholecolumnIndex):
            category = col / cav
            if category != categoryold:
                if begin > 0:
                    blocks.append(createCompleteLaplacian(c - begin))
                begin = c
                categoryold = category
        L2 = scipy.linalg.block_diag(blocks)

        T = createChainLaplacian(len(years))
        return [L1, L2, T]
Example #2
1
    def _generate_clustered_design(self, infolist):
        """Generates condition information for sparse-clustered
        designs.

        """
        infoout = deepcopy(infolist)
        for i, info in enumerate(infolist):
            infoout[i].conditions = None
            infoout[i].onsets = None
            infoout[i].durations = None
            if info.conditions:
                img = load(self.inputs.functional_runs[i])
                nscans = img.get_shape()[3]
                reg, regnames = self._cond_to_regress(info, nscans)
                if hasattr(infoout[i], "regressors") and infoout[i].regressors:
                    if not infoout[i].regressor_names:
                        infoout[i].regressor_names = ["R%d" % j for j in range(len(infoout[i].regressors))]
                else:
                    infoout[i].regressors = []
                    infoout[i].regressor_names = []
                for j, r in enumerate(reg):
                    regidx = len(infoout[i].regressors)
                    infoout[i].regressor_names.insert(regidx, regnames[j])
                    infoout[i].regressors.insert(regidx, r)
        return infoout
Example #3
1
    def __eq__(self, other):
        if (
            not issubclass(type(other), Signature)
            or self.return_annotation != other.return_annotation
            or len(self.parameters) != len(other.parameters)
        ):
            return False

        other_positions = dict((param, idx) for idx, param in enumerate(other.parameters.keys()))

        for idx, (param_name, param) in enumerate(self.parameters.items()):
            if param.kind == _KEYWORD_ONLY:
                try:
                    other_param = other.parameters[param_name]
                except KeyError:
                    return False
                else:
                    if param != other_param:
                        return False
            else:
                try:
                    other_idx = other_positions[param_name]
                except KeyError:
                    return False
                else:
                    if idx != other_idx or param != other.parameters[param_name]:
                        return False

        return True
Example #4
1
def ornt_transform(start_ornt, end_ornt):
    """Return the orientation that transforms from `start_ornt` to `end_ornt`.
    
    Parameters
    ----------
    start_ornt : (n,2) orientation array
        Initial orientation.
        
    end_ornt : (n,2) orientation array
        Final orientation.
       
    Returns
    -------
    orientations : (p, 2) ndarray
       The orientation that will transform the `start_ornt` to the `end_ornt`.
    """
    start_ornt = np.asarray(start_ornt)
    end_ornt = np.asarray(end_ornt)
    if start_ornt.shape != end_ornt.shape:
        raise ValueError("The orientations must have the same shape")
    if start_ornt.shape[1] != 2:
        raise ValueError("Invalid shape for an orientation: %s" % start_ornt.shape)
    result = np.empty_like(start_ornt)
    for end_in_idx, (end_out_idx, end_flip) in enumerate(end_ornt):
        for start_in_idx, (start_out_idx, start_flip) in enumerate(start_ornt):
            if end_out_idx == start_out_idx:
                if start_flip == end_flip:
                    flip = 1
                else:
                    flip = -1
                result[end_in_idx, :] = [start_in_idx, flip]
                break
        else:
            raise ValueError("Unable to find out axis %d in start_ornt" % end_out_idx)
    return result
Example #5
1
    def parallel(self, *tasklist):
        """Run tasks in parallel"""

        pids = []
        old_log_filename = self.log_filename
        for i, task in enumerate(tasklist):
            assert isinstance(task, (tuple, list))
            self.log_filename = old_log_filename + (".%d" % i)
            task_func = lambda: task[0](*task[1:])
            pids.append(parallel.fork_start(self.resultdir, task_func))

        old_log_path = os.path.join(self.resultdir, old_log_filename)
        old_log = open(old_log_path, "a")
        exceptions = []
        for i, pid in enumerate(pids):
            # wait for the task to finish
            try:
                parallel.fork_waitfor(self.resultdir, pid)
            except Exception, e:
                exceptions.append(e)
            # copy the logs from the subtask into the main log
            new_log_path = old_log_path + (".%d" % i)
            if os.path.exists(new_log_path):
                new_log = open(new_log_path)
                old_log.write(new_log.read())
                new_log.close()
                old_log.flush()
                os.remove(new_log_path)
Example #6
1
def _exportExcel(iteration):
    """ Exports the stories in an iteration as an excel sheet. """
    response = HttpResponse(mimetype="Application/vnd.ms-excel")
    response["Content-Disposition"] = "attachment; filename=iteration.xls"
    stories = iteration.stories.all().order_by("rank")
    w = xlwt.Workbook()
    ws = w.add_sheet("Iteration Export")
    headers = _getHeaders(iteration.project)
    heading_xf = ezxf("font: bold on; align: wrap on, vert centre, horiz center")

    # Write out a header row.
    for idx, header in enumerate(headers):
        logger.debug(header[1])
        ws.write(0, idx, header[1], heading_xf)
        ws.col(idx).width = 37 * header[0]

    # Write out all the data.
    for idx, story in enumerate(stories):

        for hidx, header in enumerate(headers):
            f = header[2]
            ws.write(1 + idx, hidx, f(story), header[3])

    w.save(response)
    return response
Example #7
0
    def save_to_packet(self):
        """
        Generate a chunk packet.
        """

        mask = 0
        packed = []

        ls = segment_array(self.blocklight)

        for i, section in enumerate(self.sections):
            if any(section.blocks):
                mask |= 1 << i
                packed.append(section.blocks.tostring())

        for i, section in enumerate(self.sections):
            if mask & 1 << i:
                packed.append(pack_nibbles(section.metadata))

        for i, l in enumerate(ls):
            if mask & 1 << i:
                packed.append(pack_nibbles(l))

        for i, section in enumerate(self.sections):
            if mask & 1 << i:
                packed.append(pack_nibbles(section.skylight))

        # Fake the biome data.
        packed.append("\x00" * 256)

        packet = make_packet("chunk", x=self.x, z=self.z, continuous=True, primary=mask, add=0x0, data="".join(packed))
        return packet
Example #8
0
    def display_json(self, table):

        records = {}
        keys = table[0]
        table.pop(0)

        for row in table:
            data = {}

            for idx, val in enumerate(row):
                val = self.num(val)
                data[keys[idx]] = val

            if "device" in data:
                deviceKey = data["device"]

                for idx, val in enumerate(data):

                    if val != "device":
                        records[deviceKey + " " + val] = data[val]

        if self.smarterror:
            print "ERROR: smart not installed or not working!"
            return None

        return records
Example #9
0
def path_tree_overlaps(nodes, weights, length):
    """
    Find the path tree overlap for every pair of nodes for paths of a specific length.

    :param nodes: which nodes to look at
    :param weights: full weight matrix (rows are targs, cols are srcs)
    :param length: number of edges in paths
    :return: overlap matrix, list of path trees for specified nodes
    """

    path_trees = [
        list(chain(*[paths_of_length(weights, length, start=node) for length in range(1, length + 1)]))
        for node in nodes
    ]

    # path_trees = [paths_of_length(weights, length, start=node) for node in nodes]

    overlap = np.zeros((len(nodes), len(nodes)), dtype=int)

    for node_0_ctr, node_0 in enumerate(nodes):
        for node_1_ctr, node_1 in enumerate(nodes):

            node_0_elements = set(chain(*path_trees[node_0_ctr]))
            node_1_elements = set(chain(*path_trees[node_1_ctr]))

            overlap[node_0_ctr, node_1_ctr] = len(node_0_elements & node_1_elements)

    return overlap, path_trees
Example #10
0
    def cstr(cls, *args, **kwargs):
        if len(args) > len(req_args) + len(other_args):
            raise TypeError(
                "Too many arguments (expected at least %d and no more than "
                "%d)" % (len(req_args), len(req_args) + len(other_args))
            )

        args1, args2 = args[: len(req_args)], args[len(req_args) :]
        req = dict((i, v) for i, v in enumerate(args1))
        other = dict(izip(other_args, args2))

        for k, v in iteritems(kwargs):
            if k in req_args_pos:
                pos = req_args_pos[k]
                if pos in req:
                    raise TypeError("Multiple values for field %s" % k)
                req[pos] = v
            elif k in other_args_pos:
                if k in other:
                    raise TypeError("Multiple values for field %s" % k)
                other[k] = v
            else:
                raise TypeError("Unknown field name %s" % k)

        args = []
        for i, k in enumerate(req_args):
            if i not in req:
                raise TypeError("Missing value for field %s" % k)
            args.append(req[i])

        inst = tuple.__new__(cls, args)
        inst.__dict__.update(other)
        return inst
Example #11
0
    def castling(self):
        import copy

        pairs = self.extract_pairs()
        if not pairs:
            return False

        points = []
        for i, pair in enumerate(pairs[::-1]):
            point = copy.deepcopy(pair["l"])
            point["name"] = "%sl" % pairs[i]["name"]
            point["number"] = pairs[i]["l"]["number"]
            point["preset"]["pointname"] = point["name"]
            if not i:
                point["preset"]["startp"] = True
                point["preset"]["tripledash"] = None
            elif point["preset"].get("startp"):
                del point["preset"]["startp"]
                point["preset"]["tripledash"] = 1
            points.append(point)

        for i, pair in enumerate(pairs):
            point = copy.deepcopy(pair["r"])
            point["name"] = "%sr" % pairs[len(pairs) - i - 1]["name"]
            point["number"] = pairs[len(pairs) - i - 1]["r"]["number"]
            point["preset"]["pointname"] = point["name"]
            points.append(point)

        self.points = points
Example #12
0
def optional_return_type(req_args, other_args):
    """Sort of namedtuple but with name-only fields.

    When deconstructing a namedtuple, you have to get all the fields:

    >>> o = namedtuple('T', ['a', 'b', 'c'])(1, 2, 3)
    >>> a, b = o
    ValueError: too many values to unpack

    You thus cannot easily add new return values. This class allows it:

    >>> o2 = optional_return_type(['a', 'b'], ['c'])(1, 2, 3)
    >>> a, b = o2
    >>> c = o2.c
    """
    if len(set(req_args) | set(other_args)) != len(req_args) + len(other_args):
        raise ValueError

    # Maps argument name to position in each list
    req_args_pos = dict((n, i) for i, n in enumerate(req_args))
    other_args_pos = dict((n, i) for i, n in enumerate(other_args))

    def cstr(cls, *args, **kwargs):
        if len(args) > len(req_args) + len(other_args):
            raise TypeError(
                "Too many arguments (expected at least %d and no more than "
                "%d)" % (len(req_args), len(req_args) + len(other_args))
            )

        args1, args2 = args[: len(req_args)], args[len(req_args) :]
        req = dict((i, v) for i, v in enumerate(args1))
        other = dict(izip(other_args, args2))

        for k, v in iteritems(kwargs):
            if k in req_args_pos:
                pos = req_args_pos[k]
                if pos in req:
                    raise TypeError("Multiple values for field %s" % k)
                req[pos] = v
            elif k in other_args_pos:
                if k in other:
                    raise TypeError("Multiple values for field %s" % k)
                other[k] = v
            else:
                raise TypeError("Unknown field name %s" % k)

        args = []
        for i, k in enumerate(req_args):
            if i not in req:
                raise TypeError("Missing value for field %s" % k)
            args.append(req[i])

        inst = tuple.__new__(cls, args)
        inst.__dict__.update(other)
        return inst

    dct = {"__new__": cstr}
    for i, n in enumerate(req_args):
        dct[n] = property(operator.itemgetter(i))
    return type(str("OptionalReturnType"), (tuple,), dct)
Example #13
0
def time_statistics(*time):
    """Calculates time statistics. Time is a list of variable length of unix timestamps.
  This function calculates the total duration (first value is assumed beginning, last value is assumed end).
  And from there it calculates the time spent in each phase.

  :param *time: unit time stamps
  :type *time: int
  :return: pre-formatted line
  """
    diff = []
    percentages = []
    total_time = time[-1] - time[0]
    for i, item in enumerate(time):
        if i > 0:
            diff_time = item - time[i - 1]
            diff.append(diff_time)

            percent = (diff_time / total_time) * 100
            percentages.append(percent)

    total = "Total run time: {0} seconds.".format(total_time)
    percentage = ""
    for i, item in enumerate(percentages):
        if percentage == "":
            percentage = "{0:.3f}% of the time was in phase {{{1}}}".format(item, i)

        else:
            percentage = "{0}, {1:.3f}% of the time was in {{{2}}} phase".format(percentage, item, i)

    line = "\n".join([total, percentage])
    return line
Example #14
0
    def _preprocess(pat, variable_regex):
        first_appearances = []

        for i, c in enumerate(pat):
            if c.isalnum():
                if c.islower():
                    # c is variable
                    first_appearances.append((i, c))
                else:
                    pass
            else:
                pass

        back_refs = {}
        len_var_regex = len(variable_regex)
        for num, (i, c) in enumerate(first_appearances):
            # if 'x' of 'yxyQxT-x-' gets replaced:
            # y    x    y    Q    x    T    -    x    -
            # 0    1    2    3    4    5    6    7    8
            # y    (-+) y    Q    \1   T    -    \1   -
            # 0    1234 5    6    78   9    A    BC   D
            back_ref_regex = r"\%d" % (num + 1)
            back_refs[num] = c
            split_point = i + len_var_regex
            pat = pat.replace(c, variable_regex, 1)
            pat = "".join((pat[:split_point], pat[split_point:].replace(c, back_ref_regex)))

        return "^%s$" % pat, back_refs
Example #15
0
 def _print_iterations_data(result):
     raw_data = result["data"]["raw"]
     headers = ["iteration", "full duration"]
     float_cols = ["full duration"]
     atomic_actions = []
     for row in raw_data:
         # find first non-error result to get atomic actions names
         if not row["error"] and "atomic_actions" in row:
             atomic_actions = row["atomic_actions"].keys()
     for row in raw_data:
         if row["atomic_actions"]:
             for (c, a) in enumerate(atomic_actions, 1):
                 action = "%(no)i. %(action)s" % {"no": c, "action": a}
                 headers.append(action)
                 float_cols.append(action)
             break
     table_rows = []
     formatters = dict(zip(float_cols, [cliutils.pretty_float_formatter(col, 3) for col in float_cols]))
     for (c, r) in enumerate(raw_data, 1):
         dlist = [c]
         dlist.append(r["duration"])
         if r["atomic_actions"]:
             for action in atomic_actions:
                 dlist.append(r["atomic_actions"].get(action) or 0)
         table_rows.append(rutils.Struct(**dict(zip(headers, dlist))))
     cliutils.print_list(table_rows, fields=headers, formatters=formatters)
     print()
Example #16
0
    def FindCoreMos():
        if 0:
            return range(nCore)
        # ^- just take first nCore orbtials as core

        # project Hartree-Fock MOs onto (selected) core AOs in order to
        # figure out what MOs to delete. Typically those should be the
        # first nCore MOs (i.e., the nCore MOs with the lowest
        # energies), but if trying some unusual cores or in certain
        # ionic combinations, this might always be the case.
        CoreP_AO = 1.0 * zeros((nOrb, nOrb))
        for i in CoreOrbitals:
            CoreP_AO[i, i] = 1.0
        CoreNess = diag(mdot(Orbs.T, CoreP_AO, Orbs))
        CoreNess = [(o, io) for (io, o) in enumerate(CoreNess)]
        CoreNess.sort(reverse=True)
        CoreMos = [io for (io, o) in enumerate(CoreNess[:nCore])]
        CoreMos.sort()
        maxvp, imaxvp = CoreNess[nCore]  # maximum core projection
        mincp, imincp = CoreNess[nCore - 1]  # minimum valence projection
        if mincp / maxvp < 10.0:
            print "\nWARNING: Selected core AOs are not well separated at HF level." + "\n         Minimum projection of core MOs onto core AOs:    %8.4f  (Ew = %12.6f)" % (
                mincp,
                Hfr.Ews[imincp],
            ) + "\n         Maximum projection of valence MOs onto core AOs: %8.4f  (Ew = %12.6f)" % (
                maxvp,
                Hfr.Ews[imaxvp],
            ) + "\n"
        if CoreMos != list(range(nCore)):
            print "\nWARNING: Core orbitals are not the first %i canonical HF orbitals!" + "\n         Deleting MOs: %s" % "  ".join(
                ["%6i" % io for (io, o) in CoreNess]
            ) + "\n         Core-ity:     %s" % "  ".join(
                ["%6.3f" % o for (io, o) in CoreNess]
            ) + "\n"
        return array(CoreMos)
Example #17
0
def rnn_decoder(decoder_inputs, initial_state, cell, scope=None):
    """RNN Decoder that creates training and sampling sub-graphs.

    Args:
        decoder_inputs: Inputs for decoder, list of tensors.
                        This is used only in trianing sub-graph.
        initial_state: Initial state for the decoder.
        cell: RNN cell to use for decoder.
        scope: Scope to use, if None new will be produced.

    Returns:
        List of tensors for outputs and states for training and sampling sub-graphs.
    """
    with vs.variable_scope(scope or "dnn_decoder"):
        states, sampling_states = [initial_state], [initial_state]
        outputs, sampling_outputs = [], []
        with ops.op_scope([decoder_inputs, initial_state], "training"):
            for i, inp in enumerate(decoder_inputs):
                if i > 0:
                    vs.get_variable_scope().reuse_variables()
                output, new_state = cell(inp, states[-1])
                outputs.append(output)
                states.append(new_state)
        with ops.op_scope([initial_state], "sampling"):
            for i, _ in enumerate(decoder_inputs):
                if i == 0:
                    sampling_outputs.append(outputs[i])
                    sampling_states.append(states[i])
                else:
                    sampling_output, sampling_state = cell(sampling_outputs[-1], sampling_states[-1])
                    sampling_outputs.append(sampling_output)
                    sampling_states.append(sampling_state)
    return outputs, states, sampling_outputs, sampling_states
Example #18
0
def _prettyplot(df, prep, prepi, out_file):
    """Plot using prettyplot wrapper around matplotlib.
    """
    cats = ["concordant", "discordant-missing-total", "discordant-extra-total", "discordant-shared-total"]
    vtypes = df["variant.type"].unique()
    fig, axs = ppl.subplots(len(vtypes), len(cats))
    callers = sorted(df["caller"].unique())
    width = 0.8
    for i, vtype in enumerate(vtypes):
        for j, cat in enumerate(cats):
            ax = axs[i][j]
            if i == 0:
                ax.set_title(cat_labels[cat], size=14)
            ax.get_yaxis().set_ticks([])
            if j == 0:
                ax.set_ylabel(vtype_labels[vtype], size=14)
            vals, labels, maxval = _get_chart_info(df, vtype, cat, prep, callers)
            ppl.bar(ax, np.arange(len(callers)), vals, color=ppl.colors.set2[prepi], width=width)
            ax.set_ylim(0, maxval)
            if i == len(vtypes) - 1:
                ax.set_xticks(np.arange(len(callers)) + width / 2.0)
                ax.set_xticklabels([caller_labels.get(x, x) for x in callers], size=8, rotation=45)
            else:
                ax.get_xaxis().set_ticks([])
            _annotate(ax, labels, vals, np.arange(len(callers)), width)
    fig.text(0.5, 0.95, prep_labels[prep], horizontalalignment="center", size=16)
    fig.subplots_adjust(left=0.05, right=0.95, top=0.87, bottom=0.15, wspace=0.1, hspace=0.1)
    # fig.tight_layout()
    fig.set_size_inches(10, 5)
    fig.savefig(out_file)
Example #19
0
    def output_table(self):
        aligns = self.token["align"]
        aligns_length = len(aligns)
        cell = self.renderer.placeholder()

        # header part
        header = self.renderer.placeholder()
        for i, value in enumerate(self.token["header"]):
            align = aligns[i] if i < aligns_length else None
            flags = {"header": True, "align": align}
            cell += self.renderer.table_cell(self.inline(value), **flags)

        header += self.renderer.table_row(cell)

        # body part
        body = self.renderer.placeholder()
        for i, row in enumerate(self.token["cells"]):
            cell = self.renderer.placeholder()
            for j, value in enumerate(row):
                align = aligns[j] if j < aligns_length else None
                flags = {"header": False, "align": align}
                cell += self.renderer.table_cell(self.inline(value), **flags)
            body += self.renderer.table_row(cell)

        return self.renderer.table(header, body)
def weight_bonds(words, idfs, mol):
    # Compute the word matches for bonds in the molecule
    bond_words = defaultdict(set)
    # Keep track of counts for use in TF-IDF later
    doc_word_counts = defaultdict(float)
    for i, w in enumerate(words):
        # mol_matches = mol.GetSubstructMatches(w,uniquify=False)
        mol_matches = mol.GetSubstructMatches(w, uniquify=False)
        if mol_matches:
            doc_word_counts[i] += len(mol_matches)
            for m in mol_matches:
                cmap = dict(enumerate(m))
                for b in w.GetBonds():
                    start = b.GetBeginAtomIdx()
                    end = b.GetEndAtomIdx()
                    bond_words[frozenset([cmap[start], cmap[end]])].add(i)

    # Compute the maximal words
    # words_to_use = set(maximal_words(words,doc_word_counts.keys()))
    words_to_use = doc_word_counts.keys()

    # Compute the TF-IDF scores for each word
    maxtf = float(max(doc_word_counts[t] for t in words_to_use))
    score = defaultdict(float, ((t, doc_word_counts[t] / maxtf * idfs[t]) for t in words_to_use))
    # Alternate, modified TF-IDF score
    # score = defaultdict(float,((t,log(1+doc_word_counts[t]/maxtf)*idfs[t])
    #                          for t in maxwords))

    # Get the combined TF-IDF scores for each bond
    bond_weights = dict((k, sum(score[t] for t in v)) for k, v in bond_words.items())
    # Return the bond values
    return bond_weights
    def flux_matrix(self, geo):
        """Returns a sparse matrix which can be used to multiply a vector of connection table values for underground
        blocks, to give approximate average fluxes of those values at the block centres."""
        natm = geo.num_atmosphere_blocks
        nele = geo.num_underground_blocks
        conindex = dict([((c.block[0].name, c.block[1].name), i) for i, c in enumerate(self.connectionlist)])
        from scipy import sparse

        A = sparse.lil_matrix((3 * nele, self.num_connections))
        if not self.block_centres_defined:
            self.calculate_block_centres(geo)
        for iblk, blk in enumerate(self.blocklist[natm:]):
            ncons = blk.num_connections
            for conname in blk.connection_name:
                otherindex, sgn = [(0, -1), (1, 1)][conname[0] == blk.name]
                blk2name = conname[otherindex]
                icon = conindex[conname]
                centre2 = self.block[blk2name].centre
                if centre2 <> None:
                    n = centre2 - blk.centre
                    n /= np.linalg.norm(n)
                else:
                    n = np.array([0, 0, 1])  # assumed connection to atmosphere
                for i, ni in enumerate(n):
                    A[3 * iblk + i, icon] = -sgn * ni / (ncons * self.connection[conname].area)
        return A
Example #22
0
    def _write_voc_results_file(self, all_boxes):
        use_salt = self.config["use_salt"]
        comp_id = "comp4"
        if use_salt:
            comp_id += "-{}".format(os.getpid())

        # VOCdevkit/results/VOC2007/Main/comp4-44503_det_test_aeroplane.txt
        path = os.path.join(self._devkit_path, "results", "VOC" + self._year, "Main", comp_id + "_")
        for cls_ind, cls in enumerate(self.classes):
            if cls == "__background__":
                continue
            print "Writing {} VOC results file".format(cls)
            filename = path + "det_" + self._image_set + "_" + cls + ".txt"
            with open(filename, "wt") as f:
                for im_ind, index in enumerate(self.image_index):
                    dets = all_boxes[cls_ind][im_ind]
                    if dets == []:
                        continue
                    # the VOCdevkit expects 1-based indices
                    for k in xrange(dets.shape[0]):
                        f.write(
                            "{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n".format(
                                index, dets[k, -1], dets[k, 0] + 1, dets[k, 1] + 1, dets[k, 2] + 1, dets[k, 3] + 1
                            )
                        )
        return comp_id
Example #23
0
    def _create_chunks(context, goals):
        def discriminator(target):
            for i, goal in enumerate(goals):
                if goal.group.predicate(target):
                    return i
            return "other"

        # TODO(John Sirois): coalescing should be made available in another spot, InternalTarget is jvm
        # specific, and all we care is that the Targets have dependencies defined
        coalesced = InternalTarget.coalesce_targets(context.targets(is_internal), discriminator)
        coalesced = list(reversed(coalesced))

        def not_internal(target):
            return not is_internal(target)

        rest = OrderedSet(context.targets(not_internal))

        chunks = [rest] if rest else []
        flavor = None
        chunk_start = 0
        for i, target in enumerate(coalesced):
            target_flavor = discriminator(target)
            if target_flavor != flavor and i > chunk_start:
                chunks.append(OrderedSet(coalesced[chunk_start:i]))
                chunk_start = i
            flavor = target_flavor
        if chunk_start < len(coalesced):
            chunks.append(OrderedSet(coalesced[chunk_start:]))

        context.log.debug("::: created chunks(%d)" % len(chunks))
        for i, chunk in enumerate(chunks):
            context.log.debug("  chunk(%d):\n\t%s" % (i, "\n\t".join(sorted(map(str, chunk)))))

        return chunks
Example #24
0
    def test_lookup_symbol_resolve_multiple(self):

        # Incrementing by two so that start and end dates for each
        # generated Asset don't overlap (each Asset's end_date is the
        # day after its start date.)
        dates = pd.date_range("2013-01-01", freq="2D", periods=5, tz="UTC")
        df = pd.DataFrame.from_records(
            [
                {
                    "sid": i,
                    "file_name": "existing",
                    "company_name": "existing",
                    "start_date_nano": date.value,
                    "end_date_nano": (date + timedelta(days=1)).value,
                    "exchange": "NYSE",
                }
                for i, date in enumerate(dates)
            ]
        )

        finder = AssetFinder(df)
        for _ in range(2):  # Run checks twice to test for caching bugs.
            with self.assertRaises(SymbolNotFound):
                finder.lookup_symbol_resolve_multiple("non_existing", dates[0])

            with self.assertRaises(MultipleSymbolsFound):
                finder.lookup_symbol_resolve_multiple("existing", None)

            for i, date in enumerate(dates):
                # Verify that we correctly resolve multiple symbols using
                # the supplied date
                result = finder.lookup_symbol_resolve_multiple("existing", date)
                self.assertEqual(result.symbol, "existing")
                self.assertEqual(result.sid, i)
Example #25
0
def dofilehorz(filename):
    print filename
    out = open(filename.replace(".csv", ".trunc.csv"), "w")
    outcsv = UnicodeWriter(out)
    # do preparse
    with open(filename, "r") as f:
        for i, row in enumerate(UnicodeReader(f)):
            if i == 0:
                header = row
                headers = Counter(row)
                continue
            for c, cell in enumerate(row):
                size = (len(cell) / MAX_SIZE) + 1  # integer division is horrid.
                headers[header[c]] = max(headers[header[c]], size)
    # pass 2
    with open(filename, "r") as f:
        for i, row in enumerate(UnicodeReader(f)):
            if i == 0:
                newrow = []
                for c, cell in enumerate(header):
                    newrow.extend(["%s_%d" % (cell, r) for r in range(headers[cell])])
                outcsv.writerow(newrow)
                continue
            # populate dictionary
            d = OrderedDict()
            for c, cell in enumerate(row):
                for r in range(headers[header[c]]):
                    d["%s_%d" % (header[c], r)] = cell[MAX_SIZE * r : MAX_SIZE * (r + 1)]
            outcsv.writerow(d.values())
    out.close()
Example #26
0
    def test_purchase_invoice_with_subcontracted_item(self):
        wrapper = frappe.copy_doc(test_records[0])
        wrapper.get("entries")[0].item_code = "_Test FG Item"
        wrapper.insert()
        wrapper.load_from_db()

        expected_values = [["_Test FG Item", 90, 59], ["_Test Item Home Desktop 200", 135, 177]]
        for i, item in enumerate(wrapper.get("entries")):
            self.assertEqual(item.item_code, expected_values[i][0])
            self.assertEqual(item.item_tax_amount, expected_values[i][1])
            self.assertEqual(item.valuation_rate, expected_values[i][2])

        self.assertEqual(wrapper.net_total, 1250)

        # tax amounts
        expected_values = [
            ["_Test Account Shipping Charges - _TC", 100, 1350],
            ["_Test Account Customs Duty - _TC", 125, 1350],
            ["_Test Account Excise Duty - _TC", 140, 1490],
            ["_Test Account Education Cess - _TC", 2.8, 1492.8],
            ["_Test Account S&H Education Cess - _TC", 1.4, 1494.2],
            ["_Test Account CST - _TC", 29.88, 1524.08],
            ["_Test Account VAT - _TC", 156.25, 1680.33],
            ["_Test Account Discount - _TC", 168.03, 1512.30],
        ]

        for i, tax in enumerate(wrapper.get("other_charges")):
            self.assertEqual(tax.account_head, expected_values[i][0])
            self.assertEqual(tax.tax_amount, expected_values[i][1])
            self.assertEqual(tax.total, expected_values[i][2])
    def __build_search_index(self):
        print_enter("worknoteBookServer.__build_search_index")
        from os.path import join
        from worknoteBookHelpers import gen_index
        from whoosh.index import create_in

        self.search_index = create_in(join(self.storagedir, ".search_index"), self.search_index.schema)
        writer = self.search_index.writer()
        print "Processing default storage directory..."
        for index, wn in enumerate(self.worknote_list):
            wn_workdir, title, date = wn
            link = u"./storage/{:s}".format(wn_workdir)
            writer.add_document(
                index=gen_index(index + 1),
                title=title,
                link=link,
                date=date,
                content=self.worknotes[wn_workdir].get_text("Markdown"),
            )
        print "Processing chapters..."
        for chapter_index, chapter in enumerate(self.chapter_list):
            for wn_index, wn in enumerate(self.chapters[chapter]["worknote_list"]):
                wn_workdir, title, date = wn
                link = u"./{:s}/{:s}".format(chapter, wn_workdir)
                writer.add_document(
                    index=gen_index([chapter_index + len(self.worknote_list) + 1, wn_index + 1]),
                    title=title,
                    link=link,
                    date=date,
                    content=self.chapters[chapter]["worknotes"][wn_workdir].get_text("Markdown"),
                )
        writer.commit()
Example #28
0
def find_char(board, char):
    # FUNCTION: Finds a string in a list of lists (matrix)
    # INPUT: list, string
    # RETURN: list
    # initial variables
    location = []
    # create two rows, first row holds character row coordinates
    # second row holds character column coordinates
    for i in range(2):
        location.append([])
    # moves through each row and column looking for a character
    for r, columns in enumerate(board):
        for c, square in enumerate(columns):
            # if it finds the character, it adds it to the location list
            if square == char:
                debugPrint("find_char() function:")
                debugPrint("Finding %r" % char)
                debugPrint("Location (r/c): (%r/%r)" % (r, c))
                # first row for row coordinates
                location[0].append(r)
                # second row for column coordinates
                location[1].append(c)
                debugPrint("%r" % location)
            else:
                pass
    return location
Example #29
0
 def initializeGUI(self):
     self.d = {"Switches": {}, "Triggers": {}}
     # set layout
     layout = QtGui.QGridLayout()
     self.setLayout(layout)
     # get switch names and add them to the layout, and connect their function
     layout.addWidget(QtGui.QLabel("Switches"), 0, 0)
     switchNames = yield self.server.get_switching_channels()
     for order, name in enumerate(switchNames):
         button = QtGui.QPushButton(name)
         self.d["Switches"][name] = button
         button.setCheckable(True)
         initstate = yield self.server.get_state(name)
         button.setChecked(initstate)
         self.setButtonText(button, name)
         button.clicked.connect(self.buttonConnection(name, button))
         layout.addWidget(button, 0, 1 + order)
     # do same for trigger channels
     layout.addWidget(QtGui.QLabel("Triggers"), 1, 0)
     triggerNames = yield self.server.get_trigger_channels()
     for order, name in enumerate(triggerNames):
         button = QtGui.QPushButton(name)
         button.clicked.connect(self.triggerConnection(name))
         self.d["Triggers"][name] = button
         layout.addWidget(button, 1, 1 + order)
Example #30
0
def partb(inp):
    lights = [[0 for x in range(1000)] for y in range(1000)]

    for instruction in inp.split("\n"):
        command = None

        if instruction.startswith("turn on "):
            command = 0
            instruction = instruction[len("turn on ") :]

        elif instruction.startswith("toggle "):
            command = 1
            instruction = instruction[len("toggle ") :]

        elif instruction.startswith("turn off "):
            command = 2
            instruction = instruction[len("turn off ") :]

        start, _, end = instruction.split()
        start = [int(i) for i in start.split(",")]
        end = [int(i) for i in end.split(",")]

        for x in range(start[0], end[0] + 1):
            for y in range(start[1], end[1] + 1):
                lights[y][x] += (1, 2, -1)[command]
                lights[y][x] = max(0, lights[y][x])

    im = Image.new("RGB", (1000, 1000), "white")
    total = 0
    for y, row in enumerate(lights):
        for x, light in enumerate(row):
            total += light
            im.putpixel((x, y), light)
    im.save("out6", "png")
    return total