Example #1
1
    def getParentTransforms(self, variableName, topLayerName=None):
        layerNames = self.layers.keys()
        if topLayerName:
            layerIndex = layerNames.index(topLayerName)
        else:
            layerIndex = len(self.layers) + 1
        transforms = OrderedDict()
        transforms[variableName] = CaffeTransform([1.0, 1.0], [1.0, 1.0], [1.0, 1.0])
        for layerName in reversed(layerNames[0:layerIndex]):
            layer = self.layers[layerName]
            layerTfs = layer.getTransforms(self)
            for i, inputName in enumerate(layer.inputs):
                tfs = []
                if transforms.has_key(inputName):
                    tfs.append(transforms[inputName])
                for j, outputName in enumerate(layer.outputs):
                    if layerTfs[i][j] is None:
                        continue
                    if transforms.has_key(outputName):
                        composed = composeTransforms(layerTfs[i][j], transforms[outputName])
                        tfs.append(composed)

                if len(tfs) > 0:
                    # should resolve conflicts, not simply pick the first tf
                    transforms[inputName] = tfs[0]
        return transforms
class LRUCache(object):
    def __init__(self, capacity):
        """
        :type capacity: int
        """
        self.cache = OrderedDict()
        self.capacity = capacity

    def get(self, key):
        """
        :rtype: int
        """
        if self.cache.has_key(key):
            val = self.cache[key]
            del self.cache[key]
            self.cache[key] = val
            return val
        return -1

    def set(self, key, value):
        """
        :type key: int
        :type value: int
        :rtype: nothing
        """
        if self.cache.has_key(key):
            del self.cache[key]
            self.cache[key] = value
        else:
            if len(self.cache) >= self.capacity:
                self.cache.popitem(last=False)
                self.cache[key] = value
            else:
                self.cache[key] = value
Example #3
1
class LRUCache(OrderedDict):
    """不能存储可变类型对象,不能并发访问set()"""

    def __init__(self, capacity):
        self.capacity = capacity
        self.cache = OrderedDict()

    def get(self, key):
        if self.cache.has_key(key):
            value = self.cache.pop(key)
            self.cache[key] = value
        else:
            value = None

        return value

    def set(self, key, value):
        if self.cache.has_key(key):
            value = self.cache.pop(key)
            self.cache[key] = value
        else:
            if len(self.cache) == self.capacity:
                self.cache.popitem(last=False)  # pop出第一个item
                self.cache[key] = value
            else:
                self.cache[key] = value
Example #4
1
class ATimeCache(object):
    """Cache class (dictionary) with a limited size, where only the
    'max_entries' most recently added or accessed entries are stored."""

    def __init__(self, max_entries):
        self._cache = OrderedDict()
        self._max_entries = max_entries

    def _shrink(self):
        while len(self._cache) > self._max_entries:
            self._cache.popitem(last=False)

    def get_max_entries(self):
        return self._max_entries

    def set_max_entries(self, value):
        self._max_entries = value
        self._shrink()

    max_entries = property(get_max_entries, set_max_entries, None, "Set or get the cache size")

    def has_key(self, key):
        return self._cache.has_key(key)

    def __eq__(self, other):
        try:
            return self._cache.__eq__(other._cache)
        except:
            return False

    def __len__(self):
        return self._cache.__len__()

    def __getitem__(self, key):
        value = self._cache.pop(key)
        self._cache[key] = value
        return value

    def __setitem__(self, key, value):
        if self._cache.has_key(key):
            self._cache.pop(key)
        self._cache.__setitem__(key, value)
        self._shrink()

    def __contains__(self, key):
        return self.has_key(key)

    def __str__(self):
        return self.cache.__str__()

    def __iter__(self):
        # Iterate directly on the underlying dict, rather than on this
        # class, in order to change the order of cached items (as
        # opposed to []/__getitem__, which will reinsert an item on top
        # of the stack whenever it is looked up.
        return iter(self._cache)
def parseFile(filename, patIdx, medIdx, diagIdx, labelIdx, delim="|"):
    """ 
    Parse a csv file using the delimiter and the appropriate columns of interest.
    The resultant sparse tensor has patient on the 0th mode, diagnosis on the 1st mode,
    and medications on the 2nd mode.
    
    Tensor info contains the axis information for each mode.
    """
    print "Creating the tensor for " + filename

    patList = OrderedDict(sorted({}.items(), key=lambda t:t[1]))
    medList = OrderedDict(sorted({}.items(), key=lambda t:t[1]))
    diagList = OrderedDict(sorted({}.items(), key=lambda t:t[1]))
    patClass = OrderedDict(sorted({}.items(), key=lambda t:t[1]))

    ## storing tensor class as empty array
    tensorIdx = np.array([[0, 0, 0]])
    datfile = open(filename)

    for i, line in enumerate(datfile):
        line = line.rstrip('\r\n')
        parse = line.split(delim)
        
        # insert them into the list if necessary
        if not patList.has_key(parse[patIdx]):
            patList[parse[patIdx]] = len(patList)
            patClass[parse[patIdx]] = parse[labelIdx]
        if not diagList.has_key(parse[diagIdx]):
            diagList[parse[diagIdx]] = len(diagList)
        if not medList.has_key(parse[medIdx]):
            medList[parse[medIdx]] = len(medList)
        
        patId = patList.get(parse[patIdx])
        diagId = diagList.get(parse[diagIdx])
        medId = medList.get(parse[medIdx])
    
        # we know the first one is already mapped
        if i > 1:
            tensorIdx = np.append(tensorIdx, [[patId, diagId, medId]], axis=0)

    tensorVal = np.ones((tensorIdx.shape[0], 1))
    # initialize size
    siz = np.array([len(patList), len(diagList), len(medList)])
    X = sptensor.sptensor(tensorIdx, tensorVal, siz)
    
    tensorInfo = {}
    tensorInfo['axis'] = [patList.keys(), diagList.keys(), medList.keys()]
    tensorInfo['pat'] = patList.keys()
    tensorInfo['med'] = medList.keys()
    tensorInfo['diag'] = diagList.keys()
    tensorInfo['class'] = patClass.values()
      
    return X, tensorInfo
Example #6
1
    def get_net_param_info(self):
        param_set = OrderedDict()
        total_count = 0
        for layer in self.layers:
            if not isinstance(layer, la.InputLayer):
                if not param_set.has_key(layer.params.weights):
                    param_set[layer.params.weights] = True
                    total_count += layer.params.weights.count
                if not param_set.has_key(layer.params.biases):
                    param_set[layer.params.biases] = True
                    total_count += layer.params.biases.count

        return param_set, total_count
    def loadSettings(self):
        returnData = OD()
        cur_dict = None

        try:
            inifile = open(self.currentFile, "r")
        except:
            raise Exception("Error opening file at %s" % self.currentFile)

        # Here's the business end of the function
        for line in inifile:
            # Ignore any commented out lines
            if line[0] == "#":
                continue

            # New group
            if line[0] == "[":
                grpname = re.match("\[(.*?)\]", line).group(1)
                cur_dict = grpname
                if returnData.has_key("grpname") is False:
                    returnData[grpname] = OD()

            # Options
            elif re.match("(.*?)(?:\s+)?=(?:\s+)?(.*)", line) is not None:
                if cur_dict is not None:
                    option_line = re.match("(.*?)(?:\s+)?=(?:\s+)?(.*)", line)
                    returnData[cur_dict][str(option_line.group(1)).lower()] = str(option_line.group(2))
                else:
                    pass

        # Gotta close it before we quit
        inifile.close()

        return returnData
Example #8
0
def longest_amicable_chain_min_member(n):  # ), queue_size=600):
    """Return the smallest member of the longest amicable chain with no element exceeding n-1. Assumed
    to be unique, with chain length <= queue_size."""
    d, x, c = DCalculator(n).divisor_sum(), 1, np.zeros((n + 1,), dtype=np.int)
    c.fill(-1)  # Initially, set all nodes to unvisited. 0=visited. >0: cycle length at smallest element of the cycle
    while x < n:  # x points to next unvisited element
        # print 'x', x
        y, p = x, 0
        q = OrderedDict([(y, p)])
        # print '\t', y, p
        while y < n:
            # Advance along cycle
            y, p = d[y], p + 1
            appeared_before = q.has_key(y)
            # print '\t', y, p
            if y > n or c[y] > 0 or appeared_before:
                break
            q[y] = p
        # print '\tlast', y, p, q
        r = q.keys()
        c[r] = 0
        if appeared_before:
            i = q[y]
            z = r[i + np.argmin(r[i:])]
            c[z] = p - i
            print x, "\t", "c[%d] = %d, i=%d" % (z, c[z], i)
        while c[x] >= 0 and x < n:
            x += 1  # Advance to next unvisited element
    #     np.set_printoptions(threshold=np.nan)
    #     print c
    return np.argmax(c)
Example #9
0
def surrogate_parents(g, node, max_depth, min_depth=1, successors=True):
    """Return a dictionary of surrogate-parent-to-depth of the node 'node' in the DAG g. Parents with
    min_depth <= depth <= max_depth are returned. If successors=False, successors of 'node' are omitted. """
    # Build predecessor list. No need for a set here, since g is a DAG and predecessors of depth l
    # cannot intersect predecessors of depth < l
    result = OrderedDict()
    ancestors = [node]
    result[node] = 0
    for l in xrange(1, max_depth + 1):
        ancestors = reduce(list.__add__, (g.predecessors(x) for x in ancestors))
        result.update((x, l) for x in ancestors)
        if not ancestors:
            break

    # For each predecessor of depth l, find all successors of depth <= d-l. These can intersect.
    # m is the shortest path length between node and y, so do not update y's entry if it's already
    # in result with a lower m-value.
    for x, l in result.items():
        result.update(
            (y, m)
            for (y, m) in (
                (y, l + len(path) - 1)
                for y, path in nx.single_source_shortest_path(g, x, cutoff=max_depth - l).iteritems()
                if (True if successors else (node not in path))
            )
            if not result.has_key(y) or result[y] > m
        )
    # Remove entries of nodes with depth <= min_depth
    for y in (y for y, m in result.iteritems() if m < min_depth):
        del (result[y])
    return result
Example #10
0
def treasures():
    results = sqlrelay.execute_results(
        """
SELECT
   GROUP_ID
 , ITEM_ID
 , FREQUENCY
FROM
   ARPG_BT_TREASURE
"""
    )

    treasure_list = OrderedDict()
    for r in results:
        group_id = int(r[0])
        item_id = int(r[1])
        frequency = int(r[2])

        if treasure_list.has_key(group_id):
            treasure = treasure_list[group_id]
        else:
            treasure = OrderedDict()
            treasure_list[group_id] = treasure

        treasure[item_id] = {"frequency": frequency}

    # 토탈 확률계산
    for treasure in treasure_list.values():
        total = 0
        for item in treasure.values():
            total += item["frequency"]
        treasure["total"] = total

    return treasure_list
Example #11
0
class Cost:
    def __init__(self, cost, params, constants=None):
        self.cost = cost
        self.grads = OrderedDict()
        self.computed_cost = False

        self.params = OrderedDict()
        for p in params:
            self.params[p] = True

        self.constants = OrderedDict()
        constants = [] if constants is None else constants
        for c in constants:
            self.constants[c] = True

    def compute_gradients(self, lr, multipliers=None):
        multipliers = OrderedDict() if multipliers is None else multipliers
        grads = T.grad(
            self.cost, self.params.keys(), consider_constant=self.constants.keys(), disconnected_inputs="ignore"
        )
        for param, gparam in zip(self.params.keys(), grads):
            param_lr = multipliers.get(param.name, 1.0) * lr
            self.grads[param] = param_lr * gparam
        self.computed_cost = True

    def update_gradient(self, param, new_grad):
        assert self.computed_cost
        assert self.grads.has_key(param)
        self.grads[param] = new_grad
Example #12
0
def clients():
    cli = MQSyncReq(app.zmq_context)
    msg = MQMessage()
    msg.set_action("client.list.get")
    res = cli.request("manager", msg.get(), timeout=10)
    if res is not None:
        client_list = res.get_data()
    else:
        client_list = {}

    client_list_per_host_per_type = OrderedDict()
    for client in client_list:
        cli_type = client_list[client]["type"]
        cli_host = client_list[client]["host"]

        if not client_list_per_host_per_type.has_key(cli_host):
            client_list_per_host_per_type[cli_host] = {}

        if not client_list_per_host_per_type[cli_host].has_key(cli_type):
            client_list_per_host_per_type[cli_host][cli_type] = {}

        client_list_per_host_per_type[cli_host][cli_type][client] = client_list[client]

    return render_template(
        "clients.html",
        mactive="clients",
        overview_state="collapse",
        clients=client_list,
        client_list_per_host_per_type=client_list_per_host_per_type,
    )
Example #13
0
def client_devices_new(client_id):
    detail = get_client_detail(client_id)
    data = detail["data"]

    device_types_keys = sorted(data["device_types"])
    device_types_list = OrderedDict()
    for key in device_types_keys:
        device_types_list[key] = data["device_types"][key]
    products = {}
    products_per_type = OrderedDict()
    if "products" in data:
        products_list = data["products"]
        products_list = sorted(products_list, key=itemgetter("name"))
        for prod in products_list:
            product_label = data["device_types"][prod["type"]]["name"]
            products[prod["name"]] = prod["type"]
            # if not products_per_type.has_key(prod["type"]):
            if not products_per_type.has_key(product_label):
                products_per_type[product_label] = OrderedDict()
            products_per_type[product_label][prod["name"]] = prod["type"]
    # TODO : include products icons

    return render_template(
        "client_device_new.html",
        device_types=device_types_list,
        products=products,
        products_per_type=products_per_type,
        clientid=client_id,
        mactive="clients",
        active="devices",
        client_detail=detail,
    )
Example #14
0
def get_notes(sno):
    db = connect.connect()
    cursor = db.cursor()
    cursor.execute(
        """SELECT line from notes where serialno = %s
    order by lineno""",
        sno,
    )
    results = cursor.fetchall()
    cursor.close()

    notes_dict = OrderedDict()
    ndate, op = "", ""

    # a line is like ('\x01REC\x0c\x08m\x0c\x08m\n\x08',)
    for (line,) in results:
        ntype, note, operator, date2 = notes.decipher_noteline(line)
        if date2 != "":
            ndate = date2
        if operator != "":
            op = operator

        key = (ndate, op)
        if notes_dict.has_key(key):
            notes_dict[key].append((ntype, note))
        else:
            notes_dict[key] = [(ntype, note)]

    return notes_dict
Example #15
0
    def get_parts_by_supplier(self, currency):
        """Return list of Parts by supplier and currency, ordered by name.

        Returns: OrderedDict, suppliers are keys, values are part lists.
        FIXME: The Part query is a bit inefficient when the parts have
        many alternative suppliers. Too many queries, too many filtered out.
        """
        by_supplier = OrderedDict()
        parts_collected = set()  # probably redundant, not necessary
        qry = Suppliers.query(ancestor=self.key).order(Suppliers.name)
        for supplier in qry.iter():
            partlist = []
            q = Part.query(
                Part.supplier_names == supplier.name, Part.supplier_currencies == currency, ancestor=self.key
            ).order(Part.name)
            for part in q.iter():
                # make sure each part is added only once
                if part.uuid not in parts_collected:
                    # check if supplier_name matches currency
                    # looking for one pair where supplier and currency matches
                    got_match = False
                    for i in range(len(part.supplier_names)):
                        if part.supplier_names[i] == supplier.name and part.supplier_currencies[i] == currency:
                            got_match = True
                            break
                    if got_match:
                        # supplier actually matches currency
                        parts_collected.add(part.uuid)
                        if not by_supplier.has_key(supplier.name):
                            by_supplier[supplier.name] = []
                        by_supplier[supplier.name].append(part)
        return by_supplier
Example #16
0
def delete_duplicates(input_file, my_file):
    content = ""
    last_first = re.compile("\w+,\w+")
    last_first_middle = re.compile("\w+,\w+ \w+")
    fid = open(input_file)
    fid_wrt = open(my_file, "w+")
    lines = fid.readlines()
    dict = OrderedDict()

    for line in lines:
        if len(re.split(":", line)) != 1:
            name, SSN = re.split(":", line)
            SSN = SSN.strip("\n")
            if dict.has_key(SSN):
                dict[SSN].append(name)
            else:
                dict.update({SSN: []})
                dict[SSN].append(name)

    for SSN in dict.iterkeys():
        name_array = dict[SSN]
        split_name = []
        name_array.sort(key=lambda item: (-len(item), item))
        if re.match(last_first_middle, name_array[0]) or re.match(last_first, name_array[0]):
            split_name = re.findall(r"[\w']+", name_array[0])
            if re.match(last_first_middle, name_array[0]):
                normal_form = split_name[1] + " " + split_name[2] + " " + split_name[0]
            elif re.match(last_first, name_array[0]):
                normal_form = split_name[1] + " " + split_name[0]
        else:
            normal_form = name_array[0]
        content += normal_form + ":" + SSN + "\n"

    fid_wrt.write(content.rstrip("\n"))
    fid_wrt.close()
Example #17
0
    def format(self, data, keys=None, group_by=None, domain=None):
        rows_dict = OrderedDict()
        tmp_data = OrderedDict()
        sorted_data = []
        value_chains = get_domain_configuration(domain).by_type_hierarchy
        for key, row in data.items():
            to_list = list(key)

            def find_name(list, deep):
                for element in list:
                    if deep == len(key) - 3 and key[deep + 1] == element.val:
                        return element.text
                    elif key[deep + 1] == element.val:
                        return find_name(element.next, deep + 1)

            name = find_name(value_chains, 0)
            to_list[2] = name
            tmp_data.update({tuple(to_list): row})
        if tmp_data:
            sorted_data = sorted(tmp_data.items(), key=lambda x: (x[0][0], x[0][2]))

        for row in sorted_data:
            formatted_row = self._format.format_row(row[1])
            if not rows_dict.has_key(formatted_row[0]):
                rows_dict[formatted_row[0]] = []
            rows_dict[formatted_row[0]].append(formatted_row[1])

        for key, row in rows_dict.items():
            total_column = self.calculate_total_column(row)
            res = [key, total_column]
            res.extend(row)
            yield res
Example #18
0
def from_files(filenames):
    """Return an iterator that provides a sequence of Histograms for
the histograms defined in filenames.
    """
    all_histograms = OrderedDict()
    for filename in filenames:
        parser = FILENAME_PARSERS[os.path.basename(filename)]
        histograms = parser(filename)

        # OrderedDicts are important, because then the iteration order over
        # the parsed histograms is stable, which makes the insertion into
        # all_histograms stable, which makes ordering in generated files
        # stable, which makes builds more deterministic.
        if not isinstance(histograms, OrderedDict):
            raise BaseException, "histogram parser didn't provide an OrderedDict"

        for (name, definition) in histograms.iteritems():
            if all_histograms.has_key(name):
                raise DefinitionException, "duplicate histogram name %s" % name
            all_histograms[name] = definition

    # We require that all USE_COUNTER2_* histograms be defined in a contiguous
    # block.
    use_counter_indices = filter(lambda x: x[1].startswith("USE_COUNTER2_"), enumerate(all_histograms.iterkeys()))
    if use_counter_indices:
        lower_bound = use_counter_indices[0][0]
        upper_bound = use_counter_indices[-1][0]
        n_counters = upper_bound - lower_bound + 1
        if n_counters != len(use_counter_indices):
            raise DefinitionException, "use counter histograms must be defined in a contiguous block"

    for (name, definition) in all_histograms.iteritems():
        yield Histogram(name, definition)
Example #19
0
    def tags_sorted(self):
        tags = reversed(sorted(self.tags.iteritems(), key=operator.itemgetter(1)))
        grouped = OrderedDict()
        totals = {}

        for tag in tags:
            temp = tag[0].split(":")
            if len(temp) > 1:
                g = temp[0]
                t = temp[1]

            else:
                g = "Ungrouped"
                t = temp[0]

            if grouped.has_key(g):
                grouped[g].append([t, None, tag[1]])
                totals[g] += tag[1]

            else:
                grouped[g] = [[t, None, tag[1]]]
                totals[g] = tag[1]

        for key in grouped.keys():
            if key != "Ungrouped":
                total = 0
                for t in grouped[key]:
                    total += t[2]

                for t in grouped[key]:
                    t[1] = round((float(t[2]) / total) * 100, 1)

        return grouped.items()
Example #20
0
def get_readgroup_and_seq_dict_from_bam(bam_files, allow_collision=False):
    print "Gather seq dict and read groups from %s bam files" % len(bam_files)
    all_read_groups = {}
    all_seq_dict = OrderedDict()
    for bam_file in bam_files:
        command = "samtools view -H %s | egrep '@RG|@SQ' " % bam_file
        stdout, process = utils_commands.get_output_stream_from_command(command)
        for line in stdout:
            if line.startswith("@RG"):
                read_group_dict = {}
                for element in line.strip().split("\t"):
                    if element != "@RG":
                        key, value = element.split(":")
                        read_group_dict[key] = value
                if read_group_dict.has_key("ID") and read_group_dict.get("ID") not in all_read_groups:
                    all_read_groups[read_group_dict.get("ID")] = read_group_dict
            if line.startswith("@SQ"):
                seq_dict = {}
                for element in line.strip().split("\t"):
                    if element != "@SQ":
                        key, value = element.split(":")
                        if key == "LN":
                            value = int(value)
                        seq_dict[key] = value
                if seq_dict.has_key("SN"):
                    name = seq_dict.get("SN")
                    if all_seq_dict.has_key(name) and not allow_collision:
                        raise StandardError(
                            "Identical sequence dictionary name %s in %s and previous bam entry and collision not allowed"
                            % (name, bam_file)
                        )
                    all_seq_dict[name] = seq_dict

    return all_read_groups.values(), all_seq_dict.values()
Example #21
0
def parse(dat):
    f = open(dat, "r")
    first = True
    labels = []
    lines = f.readlines()
    data = OrderedDict()
    min = OrderedDict()
    max = OrderedDict()
    scale_by = None
    cur_name = None
    it = iter(xrange(len(lines)))
    for i in it:
        if first:
            cur_name = lines[i]
            labels.append(lines[i])
            first = False
        elif lines[i] == "\n":
            for d in data.values():
                if not d.has_key(cur_name):
                    d[cur_name] = 0
            for d in min.values():
                if not d.has_key(cur_name):
                    d[cur_name] = 0
            for d in max.values():
                if not d.has_key(cur_name):
                    d[cur_name] = 0

            first = True
            scale_by = None
        else:
            print lines[i], lines[i + 1]
            if not scale_by:
                scale_by = 1
            # float(lines[i+1])
            if not data.has_key(lines[i]):
                data[lines[i]] = OrderedDict()
                min[lines[i]] = OrderedDict()
                max[lines[i]] = OrderedDict()
                # initialize previously missed values to 0
                for l in labels:
                    data[lines[i]][l] = 0
                    min[lines[i]][l] = 0
                    max[lines[i]][l] = 0
            raw_dat = eval(lines[i + 1])  # very safe lol
            throughputs = raw_dat
            print "XXXXX"
            print throughputs
            print median(throughputs)
            print numpy.amin(throughputs)
            print numpy.amax(throughputs)
            data[lines[i]][cur_name] = median(throughputs) / scale_by
            min[lines[i]][cur_name] = numpy.amin(throughputs) / scale_by
            max[lines[i]][cur_name] = numpy.amax(throughputs) / scale_by
            next(it)

    print data

    return data, min, max, labels
Example #22
0
def find_group(sheet, startrowidx, colidx):
    row, col = find_boundary(sheet)
    ret = OrderedDict()
    start = 1
    lastv = None
    for i in range(startrowidx, row + 1):
        v = sheet.cell_value(i, colidx)
        if isinstance(v, float) or isinstance(v, int):
            v = str(v)
        if len(v) > 0:
            lastv = v
            if not ret.has_key(v):
                ret[v] = []
            ret[v].append(i)
        else:
            if lastv and ret.has_key(lastv):
                ret[lastv].append(i)
    return ret
Example #23
0
 def run(self):
     start_time = time.time()
     op_data = sqlfuncs.do(self.ast, self.data[:])  # COPY!!!
     response = OrderedDict()
     for row in op_data:
         for key in row.keys():
             if not response.has_key(key):
                 response[key] = []
             response[key].append(row[key])
     Table(response, start_time).prnt()
Example #24
0
class StatusConsole:
    """ A static console for displaying parameters that change over time. """

    def __init__(self, x, y, width):
        self.parameters = OrderedDict()
        self.paramCount = 0
        self.x = x
        self.y = y
        self.title = ""
        self.width = width
        self.label = pyglet.text.Label(
            self.title,
            font_name="Consolas",
            font_size=10,
            color=(255, 255, 255, 255),
            x=self.x,
            y=self.y,
            width=self.width,
            multiline=True,
        )

    def addParameter(self, parameter):
        if not self.parameters.has_key(parameter):
            self.parameters[parameter] = None
            self.modified = True

    def setParameter(self, parameter, value):
        if self.parameters.has_key(parameter):
            self.parameters[parameter] = value
            self.modified = True

    def setTitle(self, string=""):
        self.title = string

    def updateLabel(self):
        s = "".join([str(a) + " : " + str(b) + "\n" for a, b in self.parameters.items()])
        s = self.title + "\n" + s
        self.label.text = s

    def draw(self):
        if self.modified:
            self.updateLabel()
        self.label.draw()
def Optimal(pages, N):
    tamCache = N
    cache = OrderedDict()
    p_f = 0
    k = 0
    print ("Evaluando una caché " + algoritmo + " con " + tam_cache + " entradas. " + "\n")
    for i in range(len(pages)):
        page = pages[i]
        if len(cache) < tamCache:
            if not (cache.has_key(page)):
                cache[page] = k
                p_f += 1
                k += 1
        else:
            if not (cache.has_key(page)):
                indice = 0
                ind = 0
                pal = ""

                for p, j in cache.items():
                    try:
                        to = datetime.datetime.now()
                        mayor = pages[i:].index(p)
                        tf = datetime.datetime.now()
                        t = tf - to
                        print t
                    except:
                        mayor = len(pages[i:])
                        ind = j
                        pal = p
                        break
                    if mayor > indice:
                        indice = mayor
                        ind = j
                        pal = p
                try:
                    del cache[p]
                    cache[page] = ind
                    p_f += 1
                except:
                    pass
    return p_f
def nonRepeated(s):

    result = OrderedDict()

    for i in s:
        if result.has_key(i):
            result[i] += 1
        else:
            result[i] = 1

    return sorted(result.items(), key=lambda t: t[1])[0][0]
Example #27
0
class Group:
    def __init__(self, groupId, alert, escalate, ack, suppress, clear):
        self.__groupId = groupId
        self.__shouldAlert = alert
        self.__shouldEscalate = escalate
        self.__shouldAck = ack
        self.__shouldSuppress = suppress
        self.__shouldClear = clear
        self.__timeProfiles = []
        self.__holidayProfiles = []
        self.__levels = OrderedDict()
        
    def Id(self):
        return self.__groupId
        
    def AddLevel(self, index, users, escalateTime):
        if(self.__levels.has_key(index)):
            #choosing to ignore escalateTime here since it was already set
            self.__levels[index].AddUsers(users)
        else:
            level = Level(escalateTime)
            level.AddUsers(users)
            self.__levels[index] = level
    
    def GetRecipients(self, level):
        recipients = []
        print 'Getting recipients for level: ' + str(level)
        for i in range(level+1):
            key = self.__GetKey(i)
            recipients.extend(self.__levels[key].GetUsers())
        return recipients
    
    def GetSize(self):
        return len(self.__levels)
    
    def GetEscalateTime(self, level):
        key = self.__GetKey(level)
        return self.__levels[key].GetEscalateTime()
        
    def __GetKey(self, level):
        return self.__levels.keys()[level]
    
    def ShouldSend(self, alertType):
        if(alertType == 'alert'):
            return self.__shouldAlert
        if(alertType == 'escalate'):
            return self.__shouldEscalate
        if(alertType == 'ack'):
            return self.__shouldAck
        if(alertType == 'suppress'):
            return self.__shouldEscalate
        if(alertType == 'clear'):
            return self.__shouldClear
        return False
Example #28
0
File: qdb.py Project: jankim/qb
    def questions_with_pages(self):
        page_map = OrderedDict()

        questions = self.query('from questions where page != ""', ())
        questions = questions.values()

        for ii in sorted(questions, key=lambda x: x.answer):
            if not page_map.has_key(ii.page):
                page_map[ii.page] = []
            page_map[ii.page].append(ii)
        return page_map
Example #29
0
    def node(self):
        result = OrderedDict({})
        result[u"id"] = self.j_id
        result[u"text"] = self.text
        result[u"icon"] = self.icon

        for child in self.__class__.objects.filter(parent=self):
            if not result.has_key(u"children"):
                result[u"children"] = []
            result[u"children"].append(child.node())
        return result
Example #30
0
def merge_gradients(*gradient_list):
    """Take and merge multiple ordered dicts 
    """
    merged = OrderedDict()
    for gradients in gradient_list:
        assert isinstance(gradients, (dict, OrderedDict))
        for key, val in gradients.items():
            if merged.has_key(key):
                merged[key] = merged[key] + val
            else:
                merged[key] = val
    return merged