def analyse_amperf(jsn, busy_thresh, output_filename):
    # Each of these maps a tolerance to a count
    bad_pexecs = OrderedDict()
    bad_iters = OrderedDict()
    for tol in TOLERANCES:
        # The totals are the same on eacch iteration, so it's OK to use the
        # value from the final iteration
        total_pexecs, total_iters, bad_pexecs[tol], bad_iters[tol] = \
            _analyse_amperf(jsn, busy_thresh, tol)

    bad_pexecs_xs, bad_pexecs_ys = zip(*bad_pexecs.iteritems())
    bad_iters_xs, bad_iters_ys = zip(*bad_iters.iteritems())

    # JSON can't deal with tuples
    bad_pexecs_xs, bad_pexecs_ys = list(bad_pexecs_xs), list(bad_pexecs_ys)
    bad_iters_xs, bad_iters_ys = list(bad_iters_xs), list(bad_iters_ys)

    ratios = _collect_busy_ratios(jsn, busy_thresh)

    dct = {
        "bad_pexecs": [bad_pexecs_xs, bad_pexecs_ys],
        "bad_iters": [bad_iters_xs, bad_iters_ys],
        "total_pexecs": total_pexecs,
        "total_iters": total_iters,
        "ratios": ratios,
    }
    print("\ndumping to %s" % output_filename)
    with open(output_filename, "w") as fh:
        json.dump(dct, fh, indent=2)
Пример #2
0
def output_benchmarks_exp3():
    tp_file = open("exp3_throughput.dat", "w")
    lat_file = open("exp3_delay.dat", "w")
    tp_columns = OrderedDict({second: [] for second in range(21)})
    lat_columns = OrderedDict({second: [] for second in range(21)})

    for tcp in TCP_VARIANTS:
        for queue in QUEUING_VARIANTS:
            fname = "exp3_trace/" + tcp + "_" + queue + ".tr"
            for time, tp, lat in calculate_benchmarks_exp3(fname):
                tp_columns[time].append(tp)
                lat_columns[time].append(lat)

    for time, tps in tp_columns.iteritems():
        if tps:
            tp_file.write("%d\t%.3f\t%.3f\t%.3f\t%.3f\n" % (time, tps[0], tps[1], tps[2], tps[3]))
        else: tp_file.write("0\t0.000\t0.000\t0.000\t0.000\n")    
    for time, lats in lat_columns.iteritems():
        if lats:
            lat_file.write("%d\t%.3f\t%.3f\t%.3f\t%.3f\n" % (time, lats[0], lats[1], lats[2], lats[3]))
        else:
            lat_file.write("0\t0.000\t0.000\t0.000\t0.000\n")
    
    tp_file.close()
    lat_file.close()
Пример #3
0
class FreqDict(object):
    def __init__(self, terms, reverse=False):
        assert type(terms) in [list, set, np.ndarray], 'input must be list, set or ndarray.'
        fd = defaultdict(int)
        for term in terms:
            fd[term] += 1
        self.__fd = OrderedDict()
        for k, v in sorted(fd.iteritems(), key=lambda d: d[1], reverse=reverse):
            self.__fd[k] = v

    def __getitem__(self, item):
        return self.__fd[item]

    def __iter__(self):
        return iter(self.__fd.iteritems())

    def keys(self):
        return self.__fd.keys()

    def values(self):
        return self.__fd.values()

    def print_freq_dict(self):
        for k, v in self.__fd.iteritems():
            print k, v

    def plot_pie(self):
        plt.pie(x=self.__fd.values(), labels=self.__fd.keys(), autopct='%1.1f%%', shadow=True)
        plt.show()
Пример #4
0
def processQuery(str):    
    qtf = defaultdict(float)
    qtfidf = defaultdict(float)    
    qwords=re.findall('\w+', str.lower(),re.UNICODE)
    sum=0
    for word in qwords:
        qtf[word]=qtf[word]+1;
    for k,v in qtf.iteritems():
        qtf[k]=(1+math.log(qtf[k],2))
        qtfidf[k]=qtf[k]*idf[k]
        sum+=qtfidf[k]*qtfidf[k]
        if(qtfidf[k]==0):
            print "no match"
            return
    norm=math.sqrt(sum)            
    for i,j in qtfidf.iteritems():
        qtfidf[i]=qtfidf[i]/norm
    pgrank=buildRank(qtfidf)                    
    newdict=OrderedDict(sorted(pgrank.iteritems(), key=lambda t: t[1],reverse=True)[:50])    
    topdict=OrderedDict()
    for key,val in newdict.iteritems():
        topdict[key]=val                                                 
    combdict=defaultdict(list)        
    for key,val in topdict.iteritems():
        if(topdict[key]!=0):
            score=tweetuserscore[tweetid[key]]
            combdict[key].append(val)
            combdict[key].append(score)
    finaldict=OrderedDict(sorted(combdict.iteritems(), key=lambda t: (t[1][0],t[1][1]),reverse=True)[:50])
    rank=1    
    for key,val in finaldict.iteritems():
        print "rank: ",rank," tweetid: ",tweetid[key]," tweet: ",tweets[key].encode('utf-8')
        rank+=1         
    return
Пример #5
0
def createStructured(element):
    valuemap = OrderedDict()
    name = element.get("Name")
    print "-- " + name

    lengthfields = set()
    for child in element:
        if child.get("LengthField"):
            lengthfields.add(child.get("LengthField"))
    
    for child in element:
        if child.tag == "{http://opcfoundation.org/BinarySchema/}Documentation":
            print "-- " + child.text
        elif child.tag == "{http://opcfoundation.org/BinarySchema/}Field":
            if child.get("Name") in lengthfields:
                continue
            childname = camlCase2AdaCase(child.get("Name"))
            if childname in printed_types:
                childname = childname + "_Value" # attributes may not have the name of a type
            typename = stripTypename(child.get("TypeName"))
            if childname == "Response_Header" or childname == "Request_Header":
                continue
            if typename in indefinite_types:
                valuemap[childname] = typename + "s.Pointer"
            elif child.get("LengthField"):
                valuemap[childname] = "ListOf" + typename + ".Pointer"
            else:
                valuemap[childname] = typename

    if "Response" in name[len(name)-9:]:
        print("type " + name + " is new Response_Base with "),
    elif "Request" in name[len(name)-9:]:
        print ("type " + name + " is new Request_Base with "),
    else:
        print ("type " + name + " is new UA_Builtin with "),
    if len(valuemap) > 0:
        print "record"
        for n,t in valuemap.iteritems():
            print n + " : " + t + ";"
        print "end record;"
    else:
        print "null record;"
    print "function NodeId_Nr(Item : in " + name + ") return UInt16 is (SID." + name + "_Id);" # increase id by 2 to get the binary_encoding id
    if "Response" in name[len(name)-9:]:
        print("function Binary_Size(Item : " + name + ") return Int32 is ( Binary_Size(Item.Response_Header)"), 
    elif "Request" in name[len(name)-9:]:
        print("function Binary_Size(Item : " + name + ") return Int32 is ( Binary_Size(Item.Request_Header)"), 
    else:
        print("function Binary_Size(Item : " + name + ") return Int32 is ( 0"), # 0 for the null records
    for n,t in valuemap.iteritems():
        if t in elementary_size:
            print('+ ' + str(elementary_size[t])),
        else:
            if t in enum_types:
                print('+ 8') # enums are all 32 bit
            elif t.find(".Pointer") != -1 or t.find("ListOf") != -1:
                print('+ ' + t[0:t.find(".")+1] + 'Binary_Size(Item.' + n + ')'),
            else:
                print('+ Binary_Size(Item.' + n + ')'),
    print ");\n"
Пример #6
0
def from_xpaths(container, xpaths):
    '''
    Generate a Table of Contents from a list of XPath expressions. Each
    expression in the list corresponds to a level of the generate ToC. For
    example: :code:`['//h:h1', '//h:h2', '//h:h3']` will generate a three level
    table of contents from the ``<h1>``, ``<h2>`` and ``<h3>`` tags.
    '''
    tocroot = TOC()
    xpaths = [XPath(xp) for xp in xpaths]
    level_prev = {i+1:None for i in xrange(len(xpaths))}
    level_prev[0] = tocroot

    # Find those levels that have no elements in all spine items
    maps = OrderedDict()
    empty_levels = {i+1 for i, xp in enumerate(xpaths)}
    for spinepath in container.spine_items:
        name = container.abspath_to_name(spinepath)
        root = container.parsed(name)
        level_item_map = maps[name] = {i+1:frozenset(xp(root)) for i, xp in enumerate(xpaths)}
        for lvl, elems in level_item_map.iteritems():
            if elems:
                empty_levels.discard(lvl)
    # Remove empty levels from all level_maps
    if empty_levels:
        for name, lmap in tuple(maps.iteritems()):
            lmap = {lvl:items for lvl, items in lmap.iteritems() if lvl not in empty_levels}
            lmap = sorted(lmap.iteritems(), key=itemgetter(0))
            lmap = {i+1:items for i, (l, items) in enumerate(lmap)}
            maps[name] = lmap

    for name, level_item_map in maps.iteritems():
        root = container.parsed(name)
        item_level_map = {e:i for i, elems in level_item_map.iteritems() for e in elems}
        item_dirtied = False

        for item in root.iterdescendants(etree.Element):
            lvl = plvl = item_level_map.get(item, None)
            if lvl is None:
                continue
            parent = None
            while parent is None:
                plvl -= 1
                parent = level_prev[plvl]
            lvl = plvl + 1
            if item_at_top(item):
                dirtied, elem_id = False, None
            else:
                dirtied, elem_id = ensure_id(item)
            text = elem_to_toc_text(item)
            item_dirtied = dirtied or item_dirtied
            toc = parent.add(text, name, elem_id)
            toc.dest_exists = True
            level_prev[lvl] = toc
            for i in xrange(lvl+1, len(xpaths)+1):
                level_prev[i] = None

        if item_dirtied:
            container.commit_item(name, keep_parsed=True)

    return tocroot
Пример #7
0
class DiGraph(object):

    def __init__(self, size):
        self.size = size
        self.g = OrderedDict()
        for i in range(size):
            self.g[i] = []

    def add_edge(self, s, d):
        if self.connected(s, d):
            return
        self.g[s].append(d)

    def connected(self, s, d):
        for v in self.vertices(s):
            if d == v:
                return True
        return False

    def vertices(self, v):
        return self.g[v]

    def __str__(self):
        out = []
        for k, children in self.g.iteritems():
            out.append("{} -> {}".format(k, children))
        return "\n".join(out)

    def __iter__(self):
        for k, v in self.g.iteritems():
            yield k, v
def permutation(expr):
    result = []
    weight_table = OrderedDict()

    for item in expr:
        weight = get_weight(item)
        if weight in weight_table:
            weight_table[weight].append(item)
        else:
            weight_table[weight] = [item]

    for key, value in weight_table.iteritems():
        if key > 0 and len(value) > 1:
            weight_table[key] = map(list, permutations(value))

    if 0 in weight_table:
        result.append(weight_table[0])

    del weight_table[0]
            
    for key, value in weight_table.iteritems():
        if len(value) < 2:
            result = map(lambda x: x+value, result)
        else:
            result = map(list,product(result, value))
            result = map(lambda x: x[0]+x[1], result)
            
    return result
Пример #9
0
class FusedDocSentence:
    """
        store multiple DocSentence
        feature_files in tuple (feature_name, file_path)
    """
    def __init__(self, feature_files, **kwargs):
        loglevel = logging.ERROR if 'loglevel' not in kwargs else kwargs['loglevel']
        logging.basicConfig(format='[%(levelname)s][%(name)s] %(message)s')
        self.logger = logging.getLogger(__name__+'.'+self.__class__.__name__)
        self.logger.setLevel(loglevel)

        self.features = OrderedDict()
        for feature_name, feature_file in feature_files:
            self.features[feature_name] = DocSentence(feature_file)

    def get_fused_feature_vector_by_idx(self, doc_idx):

        fused_feature_vector = None
        for k, v in self.features.iteritems():
            if fused_feature_vector is None:
                fused_feature_vector = v.get_feature_vector_by_idx(doc_idx)
            else:
                fused_feature_vector = np.concatenate((fused_feature_vector, v.get_feature_vector_by_idx(doc_idx)), axis=1)

        return fused_feature_vector

    def get_num_doc(self):
        for k, v in self.features.iteritems():
            return v.get_num_doc()
Пример #10
0
def getStatistics(keyfile, resfile, n=10):
    confusion = getConfusion(keyfile, resfile)
    correct = defaultdict(float)
    error  = defaultdict(float)
    correctDist = defaultdict(float)
    errorDist = defaultdict(float)
    # for tag in alltags:
    for (true, pred), value in confusion.iteritems():
        if pred == true:
            correct[true] += value
        else:
            error[true] += value

    for tag, value in correct.iteritems():
        correctDist[tag] = correct[tag]/(correct[tag]+error[tag])

    for tag, value in error.iteritems():
        errorDist[tag] = error[tag]/(correct[tag]+error[tag])

    best = OrderedDict(sorted(correctDist.items(), key=lambda t: t[1], reverse=True)[:n])
    worst = OrderedDict(sorted(errorDist.items(), key=lambda t: t[1], reverse=True)[:n])

    print '----- Best prediction -----'
    for key, value in best.iteritems():
        print key, value, correct[key]

    print
    print '---- Worst Prediction ----'
    for key, value in worst.iteritems():
        print key, value, error[key]
    return best, worst
Пример #11
0
class SettingsManager(object):
    def __init__(self, name="settings"):
        self._name = name
        self._sections = OrderedDict()

    def add_section(self, name, doc=""):
        self._sections[name] = Section(name, doc)
        setattr(self, name, self._sections[name])
        return self._sections[name]

    def __repr__(self):
        txt = ""
        for sec_name, section in self._sections.iteritems():
            if isinstance(section, Section):
                for key, value in section.iteritems():
                    txt += "{}.{}.{} = {}\n".format(self._name, sec_name, key, repr(value))
        return txt

    def _repr_html_(self):
        from radiopadre import render
        data = []
        styles = {}
        for sec_name, section in self._sections.iteritems():
            if isinstance(section, Section):
                section._repr_table(data, styles, self._name+".")
        return render.render_table(data, ("name", "value", "description"), html=set(["name","description"]),
                                   styles=styles, header=False, numbering=False)

    def show(self):
        from IPython.display import display,HTML
        return display(HTML(self._repr_html_()))
Пример #12
0
def stream_tree_top(streams):
    '''
    ' This function should create the top level tree structure for the datastreams, and return it as a dictionary.
    ' Only the keys should really be necessary, though.
    '
    ' Keyword Args:
    '    streams - An iterable of datastreams.
    '
    ' Returns:
    '    A dictionary containing the top level of the tree as strings.  The values are True for nodes, and
    '    False for leaves.
    '''
    nodes = {}
    leaves = {}
    for s in streams:
        # Assume for now that names are unique.
        # TODO: Validate (in save/models, etc.) that no 2 objects can have names such that name and name::other
        #  exist.
        spart = s.name.partition('|')
        if spart[2] != '':
            if spart[0] not in nodes:
                nodes[spart[0]] = None
        else:
            leaves[spart[0]] = s.id

    nodes = OrderedDict(sorted(nodes.iteritems(), key = lambda t: t[0]))
    leaves = OrderedDict(sorted(leaves.iteritems(), key = lambda t: t[0]))
    return {'nodes': nodes, 'leaves': leaves}
Пример #13
0
 def run_cloners(cls, old_event, new_event):
     selected = {x[7:] for x in request.values.getlist('cloners') if x.startswith('cloner_')}
     all_cloners = OrderedDict((name, cloner_cls(old_event))
                               for name, cloner_cls in get_event_cloners().iteritems())
     if any(cloner.is_internal for name, cloner in all_cloners.iteritems() if name in selected):
         raise Exception('An internal cloner was selected')
     # enable internal cloners that are enabled by default or required by another cloner
     selected |= {c.name
                  for c in all_cloners.itervalues()
                  if c.is_internal and (c.is_default or c.required_by_deep & selected)}
     # enable unavailable cloners that may be pulled in as a dependency nonetheless
     extra = {c.name
              for c in all_cloners.itervalues()
              if not c.is_available and c.always_available_dep and c.required_by_deep & selected}
     selected |= extra
     active_cloners = OrderedDict((name, cloner) for name, cloner in all_cloners.iteritems() if name in selected)
     if not all((c.is_internal or c.is_visible) and c.is_available
                for c in active_cloners.itervalues()
                if c.name not in extra):
         raise Exception('An invisible/unavailable cloner was selected')
     for name, cloner in active_cloners.iteritems():
         if not (selected >= cloner.requires_deep):
             raise Exception('Cloner {} requires {}'.format(name, ', '.join(cloner.requires_deep - selected)))
     shared_data = {}
     cloner_names = set(active_cloners)
     for name, cloner in active_cloners.iteritems():
         shared_data[name] = cloner.run(new_event, cloner_names, cloner._prepare_shared_data(shared_data))
Пример #14
0
def test_brnn(
    model_path='yuv_scala_4_frm10.npy',  # The model path
):

    options = locals().copy()


    print('... Loading data')
    test_path = OrderedDict()

    test_path['Dirty_Dancing'] = '../data/test/58_Dirty_Dancing_scale_x4.mat'
    test_path['Turbine'] = '../data/test/350_Turbine_scale_x4.mat'
    test_path['Star_Fan'] = '../data/test/300_Star_Fan_scale_x4.mat'
    test_path['Flag'] = '../data/test/290_Flag_scale_x4.mat'
    test_path['Treadmill'] = '../data/test/300_Treadmill_scale_x4.mat'


    test_set = OrderedDict()
    for k, v in test_path.iteritems():
        test_set_x, test_set_y = load_data(v)
        test_set[k] = [test_set_x, test_set_y]

    # for k, v in test_set.iteritems():
    #     name = k
    #     x = np.round(v[0] * 255);
    #     y = np.round(v[1] * 255);
    #     ps = pnsr(x, y)
    #     print(name, ps / (x.shape[0] * x.shape[1]))

#     options['size_spa'] = 32
#     options['stride_spa'] = 14
#     options['stride_tem'] = 8
#     options['size_tem'] = 10
#     options['scale'] = 4
#     options['n_timestep'] = 10
    options['filter_shape'] = [
        [64, 1, 9, 9],
        [32, 64, 1, 1],
        [1, 32, 5, 5]
    ]
    options['rec_filter_size'] = [
        [64, 64, 1, 1],
        [32, 32, 1, 1]
    ]

    options['padding'] = np.sum([(i[-1] - 1) / 2 for i in options['filter_shape']])
    print("model options", options)
    print('... Building model')

    net = BiRecConvNet(options)

    model = load_model('../model/' + model_path)
    (x, y, f_x, cost, params) = net.build_net(model)
    print('... Test data')
    test_psnr = OrderedDict()

    for k, v in test_set.iteritems():
        test_psnr[k] = pred_error(f_x, v[0], v[1], options)

    print(' ...Test_PSNR: ', test_psnr)
Пример #15
0
    def _blockData(self, data_d):

        blocks = OrderedDict({})
        coverage = {}

        for field in self.blocker.tfidf_fields :
            self.blocker.tfIdfBlock(((record_id, record[field])
                                     for record_id, record 
                                     in data_d.iteritems()),
                                    field)

        for block_key, record_id in self.blocker(data_d.iteritems()) :
            blocks.setdefault(block_key, []).append((record_id, 
                                                     data_d[record_id]))

        # Redundant-free Comparisons from Kolb et al, "Dedoop:
        # Efficient Deduplication with Hadoop"
        # http://dbs.uni-leipzig.de/file/Dedoop.pdf
        for block_id, (block, records) in enumerate(blocks.iteritems()) :
            for record_id, record in records :
                coverage.setdefault(record_id, []).append(block_id)

        for block_id, (block_key, records) in enumerate(blocks.iteritems()) :
            tuple_records = []
            for record_id, record in records :
                smaller_ids = set([covered_id for covered_id 
                                   in coverage[record_id] 
                                   if covered_id < block_id])
                tuple_records.append((record_id, record, smaller_ids))

            yield tuple_records
def process_uris(args):
    urisFile = args.list
    checkout_dir = args.path
    vcsData = args.map
    vcsData = OrderedDict(sorted(vcsData.iteritems(), key=lambda x: -len(x[0])))

    uri_cleaner = re.compile(r"[^a-z0-9]")
    for l in urisFile:
        found = False
        uri = l.strip()
        name = uri_cleaner.sub("", "".join(uri.split(":")[1:]).split("#")[0])
        res = open("data/%s.xml" % name,"w")
        for uriprefix,rule in vcsData.iteritems():
            if uri.startswith(uriprefix):
                found = True
                r = dict.copy(rule)
                if not r.has_key("path"):
                    r["path"] = uri.replace(uriprefix,"").split("#")[0]

                if rule["vcs"]=="cvs":
                    print_cvs_log(uri, r, res, checkout_dir)
                elif rule["vcs"]=="hg":
                    print_hg_log(uri, r, res, checkout_dir)
                elif rule["vcs"]=="git":
                    print_git_log(uri, r, res, checkout_dir, args.ghissues)
            if found:
                break
        if not found:
            print "No match for %s" % uri
Пример #17
0
    def _print_summary(self):
        print 'Time elapsed: ' + str(self._end_time - self._start_time)

        sorted_openings = OrderedDict(sorted(self._evaluated_scores.items(), key=lambda x: x[1], reverse=True))

        print 'Best openings: '

        i = 0

        for (word, (points, starting_point)) in sorted_openings.iteritems():
            if i == 10:
                break

            print str(i + 1) + '. ' + word + ' = ' + str(points) + ' starting at: ' + str(starting_point)
            i += 1

        sorted_best_characters = OrderedDict(
            filter(lambda x: len(x[0]) == 7,
                   sorted(self.best_letters_evaluator.get_evaluation_results().items(), key=lambda x: x[1],
                          reverse=True)))

        print 'Best letters: '

        i = 0

        for letters, score in sorted_best_characters.iteritems():
            if i == 10:
                break

            print str(i + 1) + '. ' + letters + ' = ' + str(score)
            i += 1
def createEnumerated(element):	
    valuemap = OrderedDict()
    name = "UA_" + element.get("Name")
    fixed_size.add(name)
    printh("") # newline
    for child in element:
        if child.tag == "{http://opcfoundation.org/BinarySchema/}Documentation":
            printh("/** @brief " + child.text + " */")
        if child.tag == "{http://opcfoundation.org/BinarySchema/}EnumeratedValue":
            valuemap[name + "_" + child.get("Name")] = child.get("Value")
    valuemap = OrderedDict(sorted(valuemap.iteritems(), key=lambda (k,v): int(v)))
    # printh("typedef UA_Int32 " + name + ";")
    printh("typedef enum " + name + " { \n\t" +
           ",\n\t".join(map(lambda (key, value) : key.upper() + " = " + value, valuemap.iteritems())) +
           "\n} " + name + ";")
    if args.export_prototypes:
        printh("UA_TYPE_PROTOTYPES(" + name + ")")
    else:
        printh("UA_TYPE_PROTOTYPES_NOEXPORT(" + name + ")")
    printh("UA_TYPE_BINARY_ENCODING(" + name + ")")
    printc("UA_TYPE_AS(" + name + ", UA_Int32)")
    printc("UA_TYPE_BINARY_ENCODING_AS(" + name + ", UA_Int32)")
    if args.with_xml:
        printh("UA_TYPE_XML_ENCODING(" + name + ")\n")
        printc('''UA_TYPE_METHOD_CALCSIZEXML_NOTIMPL(%(name)s)
UA_TYPE_METHOD_ENCODEXML_NOTIMPL(%(name)s)
UA_TYPE_METHOD_DECODEXML_NOTIMPL(%(name)s\n)''')
Пример #19
0
    def result(self):
        standings = OrderedDict()
        for event in self.events.all():
            result = event.result
            for contestant in result:
                player = contestant.player
                if not player in standings:
                    standings[player] = defaultdict(int)
                standings[player]['initial_handicap'] = contestant.initial_handicap
                standings[player]['points'] += result[contestant]['points_earned'] or 0
                standings[player]['handicap'] = result[contestant]['handicap']
                standings[player]['events_attended'] += 1 
                standings[player]['rounds_played'] += sum([c.scores.filter(contestant=contestant).count() for c in event.cards.all()])
        # flag playeres who have not completed enough rounds to be ranked
        for player in standings:
            if standings[player]['initial_handicap'] == None and standings[player]['rounds_played'] < self.handicap_min_rounds:
                standings[player]['valid_hc'] = False
            else:
                standings[player]['valid_hc'] = True

        # add rank data
        standings = OrderedDict(sorted(standings.iteritems(), key=lambda x: x[1]['points'], reverse=True))
        rank = 0
        for player in standings:
            index = standings.keys().index(player)
            if index > 0 and (standings[standings.keys()[index - 1]]['points'] != standings[player]['points']):
                rank = index
            standings[player]['rank'] = rank + 1
        # sort by rank
        standings = OrderedDict(sorted(standings.iteritems(), key=lambda x: x[1]['rank']))
        return standings
Пример #20
0
def log_stats(logs):
    response_codes = []
    data_size = 0
    host_regex = re.compile("https?://([^/]+)/.*$")
    all_hosts_data = {}
    for log in logs:
        with open(log, "rb") as l:
            for line in l:
                # Annotations can contain whitespace so limit the split.
                fields = line.split(None, 15)
                match = host_regex.match(fields[3])
                if match is not None:
                    host = match.group(1)
                    try:
                        host_data = all_hosts_data[host]
                    except KeyError:
                        all_hosts_data[host] = { "data_size": 0, "response_codes": [] }
                        host_data = all_hosts_data[host]
                    host_data["response_codes"].append(fields[1])
                    if fields[2] != "-":
                        host_data["data_size"] += int(fields[2])
                    if "serverMaxSuccessKb" in fields[-1]:
                        host_data["reached_cap"] = True
    all_hosts_data = OrderedDict(sorted(all_hosts_data.iteritems(), key=operator.itemgetter(1), reverse=True))
    for host, data in all_hosts_data.iteritems():
        data["response_codes"] = Counter(data["response_codes"])
        data["data_size"] = humanReadable(data["data_size"])
    return all_hosts_data
Пример #21
0
    def _blockData(self, data_1, data_2):

        blocks = OrderedDict({})
        coverage = {}

        for field in self.blocker.tfidf_fields:
            fields_1 = ((record_id, record[field]) for record_id, record in data_1.iteritems())
            fields_2 = ((record_id, record[field]) for record_id, record in data_2.iteritems())

            self.blocker.tfIdfBlock(fields_1, fields_2, field)

        for block_key, record_id in self.blocker(data_1.iteritems()):
            blocks.setdefault(block_key, ([], []))[0].append((record_id, data_1[record_id]))

        for block_key, record_id in self.blocker(data_2.iteritems()):
            if block_key in blocks:
                blocks[block_key][1].append((record_id, data_2[record_id]))

        for block_id, (block, sources) in enumerate(blocks.iteritems()):
            for source in sources:
                for record_id, record in source:
                    coverage.setdefault(record_id, []).append(block_id)

        for block_id, (block_key, sources) in enumerate(blocks.iteritems()):
            tuple_block = []
            for source in sources:
                tuple_source = []
                for record_id, record in source:
                    smaller_ids = set([covered_id for covered_id in coverage[record_id] if covered_id < block_id])
                    tuple_source.append((record_id, record, smaller_ids))
                tuple_block.append(tuple_source)

            yield tuple_block
Пример #22
0
class Tipalti:
    def __init__(self, payer_name, payer_id, private_key, debug=False):
        self.base_url = "https://ui.sandbox.tipalti.com" if debug else "https://ui.tipalti.com"

        self.params = OrderedDict()
        self.params["payer"] = payer_name
        self.params["idap"] = payer_id
        self.private_key = private_key

        self.user_params = OrderedDict()

    def add_param(self, param, value):
        self.user_params[param] = value

    def get_url(self, page_to_open):
        self.params["ts"] = int(time())
        uri = "{0}%s?{1}" % page_to_open
        res = uri.format(self.base_url, self._query_string())
        self.user_params = OrderedDict()
        return res

    def _query_string(self):
        params = OrderedDict()
        chn = chain(self.params.iteritems(), self.user_params.iteritems())
        for param, value in chn:
            params[param] = value
        hash_key = self._hash(urlencode(params), self.private_key)
        return urlencode(OrderedDict(params, hashkey=hash_key))

    @staticmethod
    def _hash(value, secret):
        return hmac.new(secret, value, digestmod=hashlib.sha256).hexdigest()
Пример #23
0
def output_new_dictionary(new_regions, original_regions):

    for file in new_regions:
        original_regions.update(file)

    ordered_map = OrderedDict(sorted(original_regions.iteritems()))

    with open(output_file, "wb") as output:
        writer = csv.writer(output)
        writer.writerow(["file", "reg_num", "total_reg_num",
                         "onset", "offset", "rank", "subr_version"])

        for filename, value in ordered_map.iteritems():
            ordered_region = OrderedDict(sorted(value.iteritems()))
            for reg_num, region in ordered_region.iteritems():
                if "onset" in region:
                    onset = region["onset"]
                else:
                    onset = None
                if "offset" in region:
                    offset = region["offset"]
                else:
                    offset = None
                if "rank" in region:
                    rank = region["rank"]
                else:
                    rank = None
                writer.writerow([filename, reg_num, region["total_reg_num"],
                                 onset, offset, rank, region["subr_version"]])
Пример #24
0
class AttributeList(object):
    def __init__(self):
        self._od = OrderedDict()
    
    def append(self, x):
        assert len(x) == 2
        tag = x[0]
        value = x[1]
        assert isinstance(tag, unicode)
        if tag in self._od:
            raise ValueError(b"Attributes must have unique tags")
        if not isinstance(value, Presentable):
            raise ValueError(b"Unrecognized attribute value type")
        self._od[tag] = value
    
    def as_table(self):
        val = "<table>"
        for k, v in self._od.iteritems():
            cell1 = html.escape(ugettext(k))
            cell2 = present(v)
            val += "<tr><th>{0}</th><td>{1}</td></tr>".format(cell1, cell2)
        val += "</table>"
        return val
    
    def find(self, tag, default=None):
        return self._od.get(tag, default)
    
    def __iter__(self):
        return self._od.iteritems()
    
    def __len__(self):
        return len(self._od)
Пример #25
0
    def rank(self, import_export_data):
        """
        rank countries based on trade balance
        rank products based on import values
        """


        country_profit = defaultdict(float)
        product_im_val = defaultdict(float)

        for country, records in import_export_data.iteritems():
            if not country in self.country_list:
                continue

            for each_record in records:
                if not each_record['id'] in self.product_list:
                    continue


                country_profit[country] += each_record['export'] - each_record['import']

                product_im_val[each_record['id']] += each_record['import']


        # sort
        country_profit = OrderedDict(sorted(country_profit.iteritems(), key=lambda d:d[1], reverse=True))
        product_im_val = OrderedDict(sorted(product_im_val.iteritems(), key=lambda d:d[1], reverse=True))

        return country_profit, product_im_val
Пример #26
0
class TagCalculator(object):

    def __init__(self, iskValueByTypeID):
        self.iskValueByTypeID = OrderedDict(sorted(((typeID, value) for typeID, value in iskValueByTypeID.iteritems()), key=itemgetter(1), reverse=True))

    def GetIskAsTags(self, bountyAmount):
        return {tagTypeID:tagCount for tagTypeID, tagCount, _ in self.GetTagInfo(bountyAmount)}

    def GetAllTagsAndAmount(self, bountyAmount):
        tagInfoByTypeID = {typeID:(tagCount, iskAmount) for typeID, tagCount, iskAmount in self.GetTagInfo(bountyAmount)}
        ret = []
        for typeID, iskValue in self.iskValueByTypeID.iteritems():
            if typeID in tagInfoByTypeID:
                tagCount, iskAmount = tagInfoByTypeID[typeID]
                ret.append((typeID, tagCount, iskValue))
            else:
                ret.append((typeID, 0, iskValue))

        return ret

    def GetTagInfo(self, bountyAmount):
        bountyLeft = bountyAmount
        tags = []
        for typeID, value in self.iskValueByTypeID.iteritems():
            if value <= bountyLeft:
                tagCount = int(bountyLeft) / int(value)
                iskAmount = tagCount * value
                tags.append((typeID, tagCount, iskAmount))
                bountyLeft -= iskAmount

        return tags
Пример #27
0
def _simpleconfig_normalize(me, parent_section):
    # must not attempt to modify anything from parent_section in-place
    parent_tag, parent_options, parent_sections = parent_section
    parent_sections_new = []
    for s in parent_sections:
        tag, options, sections = s
        opts = OrderedDict()
        for n, v in options:
            vals = opts.setdefault(n, [])
            if v in vals:
                log.warning("omitting duplicated `({0}, {1}}' option from"
                            " normalization".format(n, v))
            else:
                vals.append(v)
        options = []
        for n, vals in opts.iteritems():
            options.append((n, vals[0]))
            vals[:] = vals[1:]
        parent_sections_new.append((tag, options, [me(s) for s in sections]))
        for i, (n, vals) in enumerate((n, vals) for n, vals in opts.iteritems()
                                    if vals):
            if not i and sections:
                log.warning("current section `{0}' needs normalization but"
                            " contains subsections (not expected)".format(tag))
            for v in vals:
                parent_sections_new.append((tag, ((n, v), ), ()))
    return (parent_tag, parent_options, parent_sections_new)
def EntropyGain(att, D):
    dict_count = {}
    dict_succ = {}
    count = 0
    I = 0

    for i in range(0, len(D)):
        if D[i][att] != '?':
            count = count + 1
            if D[i][att] in dict_count:
                dict_count[D[i][att]] = dict_count[D[i][att]] + 1
                if D[i][15] == '+':
                    dict_succ[D[i][att]] = dict_succ[D[i][att]] + 1

            else:
                dict_count[D[i][att]] = 1
                if D[i][15] == '+':
                    dict_succ[D[i][att]] = 1
                else:
                    dict_succ[D[i][att]] = 0

    dict_count = OrderedDict(sorted(dict_count.iteritems()))
    dict_succ = OrderedDict(sorted(dict_succ.iteritems()))

    for i in dict_succ:
        if dict_succ[i] != 0 and dict_succ[i] != dict_count[i]:
            I = I + (dict_count[i]/float(count)) * \
            (-1 * (dict_succ[i]/float(dict_count[i])) * math.log((dict_succ[i]/float(dict_count[i])),2)) + \
            (dict_count[i]/float(count)) * (-1 * ((dict_count[i] - dict_succ[i])/float(dict_count[i])) * \
                math.log(((dict_count[i] - dict_succ[i])/float(dict_count[i])),2))

    return I, dict_count
def format_request_part(key, value):
    'Format request part recursively'
    # If value is a dictionary or tuple
    if isinstance(value, tuple):
        value = OrderedDict(value)
    if hasattr(value, 'iteritems'):
        part = ET.Element(key)
        for k, v in value.iteritems():
            part.extend(format_request_part(k, v))
        return [part]
    # If value is a list
    elif isinstance(value, list):
        parts = list()
        for entry in value:
            # List of tuples is actually preferred to maintain order of elements
            # also seems to be an issue passing OrderedDict on queue last I checked
            if isinstance(entry, tuple):
                entry = OrderedDict(entry)
            # If value is a list of dictionaries,
            if hasattr(entry, 'iteritems'):
                part = ET.Element(key)
                for k, v in entry.iteritems():
                    part.extend(format_request_part(k, v))
                parts.append(part)
            # If value is a list of repeating elements
            else:
                parts += format_request_part(key, entry)
        return parts
    # If value is neither a dictionary nor a list,
    else:
        part = ET.Element(key)
        part.text = unicode(value)
        return [part]
Пример #30
0
    def cum_rank(self, country_profit_list, product_im_val_list):
        # compute avg scores
        cum_country_profit = defaultdict(float)
        cum_product_im_val = defaultdict(float)

        # country
        for country_profit in country_profit_list:
            for country, profit in country_profit.iteritems():
                cum_country_profit[country] += profit

        # for product
        for product_im_val in product_im_val_list:
            for product, im_val in product_im_val.iteritems():
                cum_product_im_val[product] += im_val

        # avg
        for country, profits in cum_country_profit.iteritems():
            cum_country_profit[country] = profits / float(len(country_profit_list))

        for product, im_val in cum_product_im_val.iteritems():
            cum_product_im_val[product] = im_val / float(len(product_im_val_list))


         # sort
        cum_country_profit = OrderedDict(sorted(cum_country_profit.iteritems(), key=lambda d:d[1], reverse=True))
        cum_product_im_val = OrderedDict(sorted(cum_product_im_val.iteritems(), key=lambda d:d[1], reverse=True))


        return cum_country_profit, cum_product_im_val
Пример #31
0
def volumemanager(request):

    if request.method == "POST":
        form = forms.VolumeManagerForm(request.POST)
        if form.is_valid() and form.save():
            events = []
            form.done(request, events)
            return JsonResp(
                request,
                message=_("Volume successfully added."),
                events=events,
            )
        else:
            return JsonResp(request, form=form, formsets={'layout': form._formset})
    disks = []

    # Grab disk list
    # Root device already ruled out
    for disk, info in notifier().get_disks().items():
        disks.append(forms.Disk(
            info['devname'],
            info['capacity'],
            serial=info.get('ident')
        ))
    disks = sorted(disks, key=lambda x: (x.size, x.dev), cmp=_diskcmp)

    # Exclude what's already added
    used_disks = []
    for v in models.Volume.objects.all():
        used_disks.extend(v.get_disks())

    qs = iSCSITargetExtent.objects.filter(iscsi_target_extent_type='Disk')
    used_disks.extend([i.get_device()[5:] for i in qs])

    bysize = dict()
    for d in list(disks):
        if d.dev in used_disks:
            continue
        hsize = forms.humanize_number_si(d.size)
        if hsize not in bysize:
            bysize[hsize] = []
        display_name = d.dev
        if '/' in display_name:
            display_name = display_name.split('/')[-1]
        bysize[hsize].append({
            'dev': d.dev,
            'name': str(d),
            'displayName': display_name,
            'size': d.size,
            'serial': d.serial,
        })

    bysize = OrderedDict(sorted(bysize.iteritems(), reverse=True))

    qs = models.Volume.objects.filter(vol_fstype='ZFS')
    swap = Advanced.objects.latest('id').adv_swapondrive

    return render(request, "storage/volumemanager.html", {
        'disks': json.dumps(bysize),
        'dedup_warning': forms.DEDUP_WARNING,
        'swap_size': swap * 1024 * 1024 * 1024,
        'manual_url': reverse('storage_volumemanager_zfs'),
        'extend': json.dumps(
            [{'value': '', 'label': '-----'}] +
            [{
                'label': x.vol_name,
                'value': x.vol_name,
                'enc': x.vol_encrypt > 0
            } for x in qs if x.is_decrypted()]
        ),
    })
from collections import OrderedDict

seasons = {
    1: ObjectId("50c6197ea2fc8e1110000001"),
    2: ObjectId("50c61e51a2fc8e1110000002"),
    3: ObjectId("50c62517a2fc8e1110000003"),
    4: ObjectId("50e477293ae740a45f000001"),
    5: ObjectId("51ad041f3ae7401ecc000001"),
    6: ObjectId("51f158983ae74082bb000001"),
    7: ObjectId("5331cce91bccd304b6000001"),
    8: ObjectId("54cfc76387ee0404d5000001"),
    9: ObjectId("55a3d6cf3ae74036bc000001"),  # actually Lost Season
    10: ObjectId("56a63b3b41479b0042000001")  # actually Season 9
}

seasons = OrderedDict((season, oid) for season, oid in seasons.iteritems())


def get_season_str(oid):
    season = -1
    for i_season, i_oid in seasons.iteritems():
        if oid == i_oid:
            season = i_season
    if season == 10:
        season = 9
    elif season == 9:
        season = "Lost Season"
    return season


# load pickle file
Пример #33
0
def get_trimmed_coverage_from_output(log, sample, assembly_pth, coverage,
                                     assembler):
    log.info("Screening and filtering contigs for coverage (3x ends, 5x avg.)")
    if assembler == "trinity" or assembler == "trinity2":
        regex = re.compile("({}).*:(\d+)".format(
            get_user_param("headers", "trinity")))
    elif assembler == "velvet":
        regex = re.compile("({}.*):(\d+)".format(
            get_user_param("headers", "velvet")))
    elif assembler == "abyss":
        regex = re.compile("({}.*):(\d+)".format(
            get_user_param("headers", "abyss")))
    elif assembler == "idba":
        regex = re.compile("({}.*):(\d+)".format(
            get_user_param("headers", "idba")))
    elif assembler == "spades":
        regex = re.compile("({}.*):(\d+)".format(
            get_user_param("headers", "spades")))
    # setup starting values
    previous_match = None
    contig_depth = []
    contig_data = OrderedDict()
    overall_coverage = []
    overall_length = []
    overall_count = 1
    overall_contigs = {}
    pbc = os.path.join(assembly_pth,
                       '{}-TRIMMED-per-base-coverage.txt.gz'.format(sample))
    pcc = os.path.join(assembly_pth,
                       '{}-TRIMMED-per-contig-coverage.txt'.format(sample))
    upcc = os.path.join(assembly_pth,
                        '{}-UNTRIMMED-per-contig-coverage.txt'.format(sample))
    with open(coverage, 'rU') as infile:
        with gzip.open(pbc, 'w') as per_base_cov:
            with open(pcc, 'w') as per_contig_cov:
                with open(upcc, 'w') as unt_per_contig_cov:
                    # read header line
                    gatk_header = infile.readline()
                    # write headers to outfiles
                    per_contig_cov.write(
                        "name\tbeginning-length\tbeginning-mean-cov\ttrim-start\ttrim-end\tend-length\tend-mean-cov\n"
                    )
                    unt_per_contig_cov.write(
                        "name\tbeginning-length\tbeginning-mean-cov\n")
                    per_base_cov.write(gatk_header)
                    for line in infile:
                        ls = line.split()
                        search = regex.search(ls[0])
                        match_name, pos = search.groups()
                        if previous_match is None or match_name == previous_match:
                            # hold onto current match_name
                            previous_match = match_name
                            # compute metrics on current position
                            contig_data[int(pos)] = line
                            contig_depth.append(int(ls[1]))
                        elif match_name != previous_match:
                            metadata = compute_coverage_metrics(contig_depth,
                                                                trim=True)
                            unt_per_contig_cov.write("{}\t{}\t{}\n".format(
                                previous_match, metadata["beginning-length"],
                                metadata["beginning-mean-cov"]))
                            if metadata["ending-mean-cov"] >= 5.0:
                                per_contig_cov.write(
                                    "{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(
                                        previous_match,
                                        metadata["beginning-length"],
                                        metadata["beginning-mean-cov"],
                                        metadata["trim-start"],
                                        metadata["trim-end"],
                                        metadata["ending-length"],
                                        metadata["ending-mean-cov"]))
                                for pos, line in contig_data.iteritems():
                                    if pos - 1 >= metadata[
                                            "trim-start"] and pos - 1 < metadata[
                                                "trim-end"]:
                                        per_base_cov.write(line)
                                overall_contigs[previous_match] = metadata
                                overall_count += 1
                                overall_coverage.append(
                                    metadata["ending-mean-cov"])
                                overall_length.append(
                                    metadata["ending-length"])
                            # reset previous match to current
                            previous_match = match_name
                            # reset containers
                            contig_depth = []
                            contig_data = OrderedDict()
                            # compute metrics on current first position
                            contig_data[int(pos)] = line
                            contig_depth.append(int(ls[1]))
    log.info(
        "\t{} contigs, mean coverage = {:.1f}, mean length = {:.1f}".format(
            overall_count, numpy.mean(overall_coverage),
            numpy.mean(overall_length)))
    return overall_contigs
Пример #34
0
class CSVDataFrameBuilder(AbstractDataFrameBuilder):

    def __init__(self, username, id_string, filter_query=None,
                 group_delimiter=DEFAULT_GROUP_DELIMITER,
                 split_select_multiples=True, binary_select_multiples=False):
        super(CSVDataFrameBuilder, self).__init__(
            username, id_string, filter_query, group_delimiter,
            split_select_multiples, binary_select_multiples)
        self.ordered_columns = OrderedDict()

    def _setup(self):
        super(CSVDataFrameBuilder, self)._setup()

    @classmethod
    def _reindex(cls, key, value, ordered_columns, parent_prefix=None):
        """
        Flatten list columns by appending an index, otherwise return as is
        """
        d = {}

        # check for lists
        if type(value) is list and len(value) > 0 \
                and key != NOTES and key != ATTACHMENTS:
            for index, item in enumerate(value):
                # start at 1
                index += 1
                # for each list check for dict, we want to transform the key of
                # this dict
                if type(item) is dict:
                    for nested_key, nested_val in item.iteritems():
                        # given the key "children/details" and nested_key/
                        # abbreviated xpath
                        # "children/details/immunization/polio_1",
                        # generate ["children", index, "immunization/polio_1"]
                        xpaths = [
                            "%s[%s]" % (
                                nested_key[:nested_key.index(key) + len(key)],
                                index),
                            nested_key[nested_key.index(key) + len(key) + 1:]]
                        # re-create xpath the split on /
                        xpaths = "/".join(xpaths).split("/")
                        new_prefix = xpaths[:-1]
                        if type(nested_val) is list:
                            # if nested_value is a list, rinse and repeat
                            d.update(cls._reindex(
                                nested_key, nested_val,
                                ordered_columns, new_prefix))
                        else:
                            # it can only be a scalar
                            # collapse xpath
                            if parent_prefix:
                                xpaths[0:len(parent_prefix)] = parent_prefix
                            new_xpath = u"/".join(xpaths)
                            # check if this key exists in our ordered columns
                            if key in ordered_columns.keys():
                                if new_xpath not in ordered_columns[key]:
                                    ordered_columns[key].append(new_xpath)
                            d[new_xpath] = nested_val
                else:
                    d[key] = value
        else:
            # anything that's not a list will be in the top level dict so its
            # safe to simply assign
            if key == NOTES:
                d[key] = u"\r\n".join(value)
            elif key == ATTACHMENTS:
                d[key] = []
            else:
                d[key] = value
        return d

    @classmethod
    def _build_ordered_columns(cls, survey_element, ordered_columns,
                               is_repeating_section=False):
        """
        Build a flat ordered dict of column groups

        is_repeating_section ensures that child questions of repeating sections
        are not considered columns
        """
        for child in survey_element.children:
            # child_xpath = child.get_abbreviated_xpath()
            if isinstance(child, Section):
                child_is_repeating = False
                if isinstance(child, RepeatingSection):
                    ordered_columns[child.get_abbreviated_xpath()] = []
                    child_is_repeating = True
                cls._build_ordered_columns(child, ordered_columns,
                                           child_is_repeating)
            elif isinstance(child, Question) and not \
                question_types_to_exclude(child.type) and not\
                    is_repeating_section:  # if is_repeating_section,
                    # its parent already initiliased an empty list
                    # so we dont add it to our list of columns,
                    # the repeating columns list will be
                    # generated when we reindex
                ordered_columns[child.get_abbreviated_xpath()] = None

    def _format_for_dataframe(self, cursor):
        # TODO: check for and handle empty results
        # add ordered columns for select multiples
        if self.split_select_multiples:
            for key, choices in self.select_multiples.items():
                # HACK to ensure choices are NOT duplicated
                self.ordered_columns[key] = \
                    remove_dups_from_list_maintain_order(choices)
        # add ordered columns for gps fields
        for key in self.gps_fields:
            gps_xpaths = self.dd.get_additional_geopoint_xpaths(key)
            self.ordered_columns[key] = [key] + gps_xpaths
        data = []
        for record in cursor:
            # split select multiples
            if self.split_select_multiples:
                record = self._split_select_multiples(
                    record, self.select_multiples,
                    self.BINARY_SELECT_MULTIPLES)
            # check for gps and split into components i.e. latitude, longitude,
            # altitude, precision
            self._split_gps_fields(record, self.gps_fields)
            self._tag_edit_string(record)
            flat_dict = {}
            # re index repeats
            for key, value in record.iteritems():
                reindexed = self._reindex(key, value, self.ordered_columns)
                flat_dict.update(reindexed)

            # if delimetr is diferent, replace within record as well
            if self.group_delimiter != DEFAULT_GROUP_DELIMITER:
                flat_dict = dict((self.group_delimiter.join(k.split('/')), v)
                                 for k, v in flat_dict.iteritems())
            data.append(flat_dict)
        return data

    def export_to(self, file_or_path, data_frame_max_size=30000):
        from math import ceil
        # get record count
        record_count = self._query_mongo(query=self.filter_query, count=True)

        self.ordered_columns = OrderedDict()
        self._build_ordered_columns(self.dd.survey, self.ordered_columns)

        # pandas will only export 30k records in a dataframe to a csv
        # - we need to create multiple 30k dataframes if required,
        # we need to go through all the records though so that
        # we can figure out the columns we need for repeats
        datas = []
        num_data_frames = \
            int(ceil(float(record_count) / float(data_frame_max_size)))
        for i in range(num_data_frames):
            cursor = self._query_mongo(
                self.filter_query, start=(i * data_frame_max_size),
                limit=data_frame_max_size)
            data = self._format_for_dataframe(cursor)
            datas.append(data)

        columns = list(chain.from_iterable(
            [[xpath] if cols is None else cols
             for xpath, cols in self.ordered_columns.iteritems()]))

        # use a different group delimiter if needed
        if self.group_delimiter != DEFAULT_GROUP_DELIMITER:
            columns = [self.group_delimiter.join(col.split("/"))
                       for col in columns]

        # add extra columns
        columns += [col for col in self.ADDITIONAL_COLUMNS]

        header = True
        if hasattr(file_or_path, 'read'):
            csv_file = file_or_path
            close = False
        else:
            csv_file = open(file_or_path, "wb")
            close = True

        for data in datas:
            writer = CSVDataFrameWriter(data, columns)
            writer.write_to_csv(csv_file, header=header)
            header = False
        if close:
            csv_file.close()
class LiveVis(object):
    '''Runs the demo'''

    def __init__(self, settings):
        self.settings = settings
        self.bindings = bindings

        self.app_classes = OrderedDict()
        self.apps = OrderedDict()

        for module_path, app_name in settings.installed_apps:
            module = importlib.import_module(module_path)
            print 'got module', module
            app_class  = getattr(module, app_name)
            print 'got app', app_class
            self.app_classes[app_name] = app_class

        for app_name, app_class in self.app_classes.iteritems():
            app = app_class(settings, self.bindings)
            self.apps[app_name] = app
        self.help_mode = False
        self.window_name = 'Deep Visualization Toolbox    |    Model: %s' % (settings.model_to_load)
        self.quit = False
        self.debug_level = 0

        self.debug_pane_defaults = {
            'face': getattr(cv2, self.settings.help_face),
            'fsize': self.settings.help_fsize,
            'clr': pane_debug_clr,
            'thick': self.settings.help_thick
        }
        self.help_pane_defaults = {
            'face': getattr(cv2, self.settings.help_face),
            'fsize': self.settings.help_fsize,
            'clr': to_255(self.settings.help_clr),
            'thick': self.settings.help_thick
        }


    def init_window(self):
        cv2.namedWindow(self.window_name)
        max_i, max_j = 0, 0
        if len(self.settings.window_panes) == 0:
            raise ImproperlyConfigured('settings.window_panes is empty.')
        self.panes = OrderedDict()
        for pane_name, pane_dimensions in self.settings.window_panes:
            if len(pane_dimensions) != 4:
                raise ImproperlyConfigured('pane dimensions should be a tuple of length 4, but it is "%s"' % repr(pane_dimensions))
            i_begin, j_begin, i_size, j_size = pane_dimensions
            max_i = max(max_i, i_begin + i_size)
            max_j = max(max_j, j_begin + j_size)
            if pane_name in self.panes:
                raise Exception('Duplicate pane name in settings: %s' % pane_name)
            self.panes[pane_name] = Pane(i_begin, j_begin, i_size, j_size)
        self.buffer_height = max_i
        self.buffer_width = max_j

        self.window_buffer = np.tile(np.array(np.array(self.settings.window_background) * 255, 'uint8'),
                                     (max_i,max_j,1))
        #print 'BUFFER IS:', self.window_buffer.shape, self.window_buffer.min(), self.window_buffer.max()

        for _,pane in self.panes.iteritems():
            pane.data = self.window_buffer[pane.i_begin:pane.i_end, pane.j_begin:pane.j_end]

        # Allocate help pane
        for ll in self.settings.help_pane_loc:
            assert ll >= 0 and ll <= 1, 'help_pane_loc values should be in [0,1]'
        self.help_pane = Pane(int(self.settings.help_pane_loc[0]*max_i),
                              int(self.settings.help_pane_loc[1]*max_j),
                              int(self.settings.help_pane_loc[2]*max_i),
                              int(self.settings.help_pane_loc[3]*max_j))
        self.help_buffer = self.window_buffer.copy() # For rendering help mode
        self.help_pane.data = self.help_buffer[self.help_pane.i_begin:self.help_pane.i_end, self.help_pane.j_begin:self.help_pane.j_end]

        # add listener for mouse clicks
        cv2.setMouseCallback(self.window_name, self.on_mouse_click)

    def on_mouse_click(self, event, x, y, flags, param):
        '''
        Handle all button presses.
        '''

        if event == cv2.EVENT_LBUTTONUP:
            for app_name, app in self.apps.iteritems():
                with WithTimer('%s:on_mouse_click' % app_name, quiet=self.debug_level < 1):
                    key = app.handle_mouse_left_click(x, y, flags, param, self.panes)

    def check_for_control_height_update(self):

        if hasattr(self.settings, '_calculated_control_pane_height') and \
           self.settings._calculated_control_pane_height != self.panes['caffevis_control'].i_size:

            self.panes['caffevis_control'].reset(
                self.settings.window_panes[4][1][0],
                self.settings.window_panes[4][1][1],
                self.settings._calculated_control_pane_height,
                self.settings.window_panes[4][1][3])

            self.panes['caffevis_layers'].reset(
                self.settings._calculated_control_pane_height,
                self.settings.window_panes[5][1][1],
                self.settings.window_panes[5][1][2] + 3*20 - self.settings._calculated_control_pane_height,
                self.settings.window_panes[5][1][3])

            for _, pane in self.panes.iteritems():
                pane.data = self.window_buffer[pane.i_begin:pane.i_end, pane.j_begin:pane.j_end]

            return True

        else:
            return False

        pass

    def run_loop(self):
        self.quit = False
        # Setup
        self.init_window()
        #cap = cv2.VideoCapture(self.settings.capture_device)
        from input_fetcher import InputImageFetcher

        self.input_updater = InputImageFetcher(self.settings)
        self.input_updater.bind_camera()
        self.input_updater.start()

        heartbeat_functions = [self.input_updater.heartbeat]
        for app_name, app in self.apps.iteritems():
            print 'Starting app:', app_name
            app.start(self)
            heartbeat_functions.extend(app.get_heartbeats())

        ii = 0
        since_keypress = 999
        since_redraw = 999
        since_imshow = 0
        last_render = time.time() - 999
        latest_frame_idx = None
        latest_frame_data = None
        frame_for_apps = None
        redraw_needed = True    # Force redraw the first time
        imshow_needed = True
        while not self.quit:
            # Call any heartbeats
            for heartbeat in heartbeat_functions:
                #print 'Heartbeat: calling', heartbeat
                heartbeat()

            # Handle key presses
            keys = []
            # Collect key presses (multiple if len(range)>1)
            for cc in range(1):
                with WithTimer('LiveVis:waitKey', quiet = self.debug_level < 2):
                    key = cv2.waitKey(self.settings.main_loop_sleep_ms)
                if key == -1:
                    break
                else:
                    if (key != 255):
                        keys.append(key)
                    #print 'Got key:', key
            now = time.time()
            #print 'Since last:', now - last_render

            skip_imshow = False
            #if now - last_render > .05 and since_imshow < 1:
            #    skip_imshow = True

            if skip_imshow:
                since_imshow += 1
            else:
                since_imshow = 0
                last_render = now

            #print '                                                         Number of keys:', len(keys)
            for key in keys:
                since_keypress = 0
                #print 'Got Key:', key
                key,do_redraw = self.handle_key_pre_apps(key)
                redraw_needed |= do_redraw
                imshow_needed |= do_redraw
                for app_name, app in self.apps.iteritems():
                    with WithTimer('%s:handle_key' % app_name, quiet = self.debug_level < 1):
                        key = app.handle_key(key, self.panes)
                key = self.handle_key_post_apps(key)
                if self.quit:
                    break
            for app_name, app in self.apps.iteritems():
                redraw_needed |= app.redraw_needed()

            redraw_needed |= self.check_for_control_height_update()

            # Grab latest frame from input_updater thread
            fr_idx,fr_data,fr_label,fr_filename = self.input_updater.get_frame()
            is_new_frame = (fr_idx != latest_frame_idx and fr_data is not None)
            if is_new_frame:
                latest_frame_idx = fr_idx
                latest_frame_data = fr_data
                latest_label = fr_label
                latest_filename = fr_filename
                frame_for_apps = fr_data

            if is_new_frame:
                with WithTimer('LiveVis.display_frame', quiet = self.debug_level < 1):
                    self.display_frame(latest_frame_data)
                imshow_needed = True

            do_handle_input = (ii == 0 or
                               since_keypress >= self.settings.keypress_pause_handle_iterations)
            if frame_for_apps is not None and do_handle_input:
                # Pass frame to apps for processing
                for app_name, app in self.apps.iteritems():
                    with WithTimer('%s:handle_input' % app_name, quiet = self.debug_level < 1):
                        app.handle_input(latest_frame_data, latest_label, latest_filename, self.panes)
                frame_for_apps = None

            # Tell each app to draw
            do_redraw = (redraw_needed and
                         (since_keypress >= self.settings.keypress_pause_redraw_iterations or
                          since_redraw >= self.settings.redraw_at_least_every))
            if redraw_needed and do_redraw:
                for app_name, app in self.apps.iteritems():
                    with WithTimer('%s:draw' % app_name, quiet = self.debug_level < 1):
                        imshow_needed |= app.draw(self.panes)
                redraw_needed = False
                since_redraw = 0

            # Render buffer
            if imshow_needed:
                # Only redraw pane debug if display will be updated
                if hasattr(self.settings, 'debug_window_panes') and self.settings.debug_window_panes:
                    for pane_name,pane in self.panes.iteritems():
                        print pane_name, pane
                        pane.data[:] = pane.data * .5
                        line = [FormattedString('%s |' % pane_name, self.debug_pane_defaults),
                                FormattedString('pos: %d,%d |' % (pane.i_begin, pane.j_begin), self.debug_pane_defaults),
                                FormattedString('shape: %d,%d' % (pane.i_size, pane.j_size), self.debug_pane_defaults)]
                        cv2_typeset_text(pane.data, line, (5,20), line_spacing = 5, wrap = True)
                        pane.data[:1,:] = pane_debug_clr
                        pane.data[-1:,:] = pane_debug_clr
                        pane.data[:,:1] = pane_debug_clr
                        pane.data[:,-1:] = pane_debug_clr

                with WithTimer('LiveVis:imshow', quiet = self.debug_level < 1):
                    if self.help_mode:
                        # Copy main buffer to help buffer
                        self.help_buffer[:] = self.window_buffer[:]
                        self.draw_help()
                        cv2_imshow_rgb(self.window_name, self.help_buffer)
                    else:
                        cv2_imshow_rgb(self.window_name, self.window_buffer)
                    imshow_needed = False

            ii += 1
            since_keypress += 1
            since_redraw += 1
            if ii % 2 == 0 and self.settings.print_dots:
                sys.stdout.write('.')
            sys.stdout.flush()
            # Extra sleep just for debugging. In production all main loop sleep should be in cv2.waitKey.
            #time.sleep(2)

        print '\n\nTrying to exit run_loop...'
        self.input_updater.quit = True
        self.input_updater.join(.01 + float(self.settings.input_updater_sleep_after_read_frame) * 5)
        if self.input_updater.is_alive():
            raise Exception('Could not join self.input_updater thread')
        else:
            self.input_updater.free_camera()

        for app_name, app in self.apps.iteritems():
            print 'Quitting app:', app_name
            app.quit()

        print 'Input thread joined and apps quit; exiting run_loop.'

    def handle_key_pre_apps(self, key):
        tag = self.bindings.get_tag(key)
        if tag == 'freeze_cam':
            self.input_updater.freeze_cam = not self.input_updater.freeze_cam
        elif tag == 'toggle_input_mode':
            self.input_updater.toggle_input_mode()
        elif tag == 'static_file_increment':
            self.input_updater.next_image()
        elif tag == 'static_file_decrement':
            self.input_updater.prev_image()
        elif tag == 'help_mode':
            self.toggle_help_mode()
        elif tag == 'stretch_mode':
            self.input_updater.toggle_stretch_mode()
            print 'Stretch mode is now', self.input_updater.static_file_stretch_mode
        elif tag == 'debug_level':
            self.debug_level = (self.debug_level + 1) % 3
            for app_name, app in self.apps.iteritems():
                app.set_debug(self.debug_level)
        else:
            return key, False
        return None, True

    def handle_key_post_apps(self, key):
        tag = self.bindings.get_tag(key)
        if tag == 'quit':
            self.set_quit_flag()
        elif key == None:
            pass
        else:
            key_label, masked_vals = self.bindings.get_key_label_from_keycode(key, extra_info = True)
            masked_vals_pp = ', '.join(['%d (%s)' % (mv, hex(mv)) for mv in masked_vals])
            if key_label is None:
                print 'Got key code %d (%s), did not match any known key (masked vals tried: %s)' % (key, hex(key), masked_vals_pp)
            elif tag is None:
                print 'Got key code %d (%s), matched key "%s", but key is not bound to any function' % (key, hex(key), key_label)
            else:
                print 'Got key code %d (%s), matched key "%s", bound to "%s", but nobody handled "%s"' % (
                    key, hex(key), key_label, tag, tag)

    def display_frame(self, frame):

        full_pane_shape = self.panes['input'].data.shape[:2][::-1]
        if self.settings.is_siamese and ((type(frame),len(frame)) == (tuple,2)):
            frame1 = frame[0]
            frame2 = frame[1]

            half_pane_shape = (full_pane_shape[0], full_pane_shape[1]/2)
            frame_disp1 = ensure_uint255_and_resize_without_fit(frame1[:], half_pane_shape)
            frame_disp2 = ensure_uint255_and_resize_without_fit(frame2[:], half_pane_shape)
            frame_disp = np.concatenate((frame_disp1, frame_disp2), axis=1)
        else:
            frame_disp = ensure_uint255_and_resize_without_fit(frame[:], full_pane_shape)

        if self.settings._calculated_is_gray_model:
            frame_disp = gray_to_color(frame_disp)

        self.panes['input'].data[:] = frame_disp

    def draw_help(self):
        self.help_buffer[:] = self.help_buffer[:] * .7
        self.help_pane.data[:] = self.help_pane.data[:] * .7

        loc = self.settings.help_loc[::-1]   # Reverse to OpenCV c,r order
        defaults = self.help_pane_defaults
        lines = []
        lines.append([FormattedString('~ ~ ~ Deep Visualization Toolbox ~ ~ ~', defaults, align='center', width=self.help_pane.j_size)])

        locy, boxes = cv2_typeset_text(self.help_pane.data, lines, loc,
                                       line_spacing = self.settings.help_line_spacing)

        for app_name, app in self.apps.iteritems():
            locy = app.draw_help(self.help_pane, locy)

    def toggle_help_mode(self):
        self.help_mode = not self.help_mode

    def set_quit_flag(self):
        self.quit = True
Пример #36
0
def add_pipeline_options(parser, plumber):
    groups = OrderedDict((
        ('', ('', [
            'input_profile',
            'output_profile',
        ])),
        (_('LOOK AND FEEL'),
         (_('Options to control the look and feel of the output'), [
             'base_font_size',
             'disable_font_rescaling',
             'font_size_mapping',
             'embed_font_family',
             'subset_embedded_fonts',
             'embed_all_fonts',
             'line_height',
             'minimum_line_height',
             'linearize_tables',
             'extra_css',
             'filter_css',
             'transform_css_rules',
             'expand_css',
             'smarten_punctuation',
             'unsmarten_punctuation',
             'margin_top',
             'margin_left',
             'margin_right',
             'margin_bottom',
             'change_justification',
             'insert_blank_line',
             'insert_blank_line_size',
             'remove_paragraph_spacing',
             'remove_paragraph_spacing_indent_size',
             'asciiize',
             'keep_ligatures',
         ])),
        (_('HEURISTIC PROCESSING'),
         (_('Modify the document text and structure using common'
            ' patterns. Disabled by default. Use %(en)s to enable. '
            ' Individual actions can be disabled with the %(dis)s options.') %
          dict(en='--enable-heuristics', dis='--disable-*'),
          ['enable_heuristics'] + HEURISTIC_OPTIONS)),
        (_('SEARCH AND REPLACE'),
         (_('Modify the document text and structure using user defined patterns.'
            ), [
                'sr1_search',
                'sr1_replace',
                'sr2_search',
                'sr2_replace',
                'sr3_search',
                'sr3_replace',
                'search_replace',
            ])),
        (_('STRUCTURE DETECTION'),
         (_('Control auto-detection of document structure.'), [
             'chapter',
             'chapter_mark',
             'prefer_metadata_cover',
             'remove_first_image',
             'insert_metadata',
             'page_breaks_before',
             'remove_fake_margins',
             'start_reading_at',
         ])),
        (_('TABLE OF CONTENTS'),
         (_('Control the automatic generation of a Table of Contents. By '
            'default, if the source file has a Table of Contents, it will '
            'be used in preference to the automatically generated one.'), [
                'level1_toc',
                'level2_toc',
                'level3_toc',
                'toc_threshold',
                'max_toc_links',
                'no_chapters_in_toc',
                'use_auto_toc',
                'toc_filter',
                'duplicate_links_in_toc',
            ])),
        (_('METADATA'), (
            _('Options to set metadata in the output'),
            plumber.metadata_option_names + ['read_metadata_from_opf'],
        )),
        (_('DEBUG'), (_('Options to help with debugging the conversion'), [
            'verbose',
            'debug_pipeline',
        ])),
    ))

    for group, (desc, options) in groups.iteritems():
        if group:
            group = OptionGroup(parser, group, desc)
            parser.add_option_group(group)
        add_option = group.add_option if group != '' else parser.add_option

        for name in options:
            rec = plumber.get_option_by_name(name)
            if rec.level < rec.HIGH:
                option_recommendation_to_cli_option(add_option, rec)
Пример #37
0
class AnalysisGrid(object):
    """A grid of analysis points.

    Attributes:
        analysis_points: A collection of analysis points.
    """

    __slots__ = ('_analysis_points', '_name', '_sources', '_wgroups', '_directFiles',
                 '_totalFiles')

    def __init__(self, analysis_points, name=None, window_groups=None):
        """Initialize a AnalysisPointGroup.

        analysis_points: A collection of AnalysisPoints.
        name: A unique name for this AnalysisGrid.
        window_groups: A collection of window_groups which contribute to this grid.
            This input is only meaningful in studies such as daylight coefficient
            and multi-phase studies that the contribution of each source will be
            calculated separately (default: None).
        """
        self.name = name
        # name of sources and their state. It's only meaningful in multi-phase daylight
        # analysis. In analysis for a single time it will be {None: [None]}
        self._sources = OrderedDict()

        if window_groups:
            self._wgroups = tuple(wg.name for wg in window_groups)
        else:
            self._wgroups = ()

        for ap in analysis_points:
            assert hasattr(ap, '_dir'), \
                '{} is not an AnalysisPoint.'.format(ap)

        self._analysis_points = analysis_points
        self._directFiles = []  # list of results files
        self._totalFiles = []  # list of results files

    @classmethod
    def from_json(cls, ag_json):
        """Create an analysis grid from json objects."""
        analysis_points = tuple(AnalysisPoint.from_json(pt)
                                for pt in ag_json["analysis_points"])
        return cls(analysis_points=analysis_points, name=ag_json["name"],
                   window_groups=None)

    @classmethod
    def from_points_and_vectors(cls, points, vectors=None,
                                name=None, window_groups=None):
        """Create an analysis grid from points and vectors.

        Args:
            points: A flatten list of (x, y ,z) points.
            vectors: An optional list of (x, y, z) for direction of test points.
                If not provided a (0, 0, 1) vector will be assigned.
        """
        vectors = vectors or ()
        points, vectors = match_data(points, vectors, (0, 0, 1))
        aps = tuple(AnalysisPoint(pt, v) for pt, v in izip(points, vectors))
        return cls(aps, name, window_groups)

    @classmethod
    def from_file(cls, file_path):
        """Create an analysis grid from a pts file.

        Args:
            file_path: Full path to points file
        """
        assert os.path.isfile(file_path), IOError("Can't find {}.".format(file_path))
        ap = AnalysisPoint  # load analysis point locally for better performance
        with open(file_path, 'rb') as inf:
            points = tuple(ap.from_raw_values(*l.split()) for l in inf)

        return cls(points)

    @property
    def isAnalysisGrid(self):
        """Return True for AnalysisGrid."""
        return True

    @property
    def name(self):
        """AnalysisGrid name."""
        return self._name

    @name.setter
    def name(self, n):
        self._name = n or random_name()

    @property
    def window_groups(self):
        """A list of window group names that are related to this analysis grid."""
        return self._wgroups

    @window_groups.setter
    def window_groups(self, wgs):
        self._wgroups = tuple(wg.name for wg in wgs)

    @property
    def points(self):
        """A generator of points as x, y, z."""
        return (ap.location for ap in self._analysis_points)

    @property
    def vectors(self):
        """Get generator of vectors as x, y , z."""
        return (ap.direction for ap in self._analysis_points)

    @property
    def analysis_points(self):
        """Return a list of analysis points."""
        return self._analysis_points

    @property
    def sources(self):
        """Get sorted list fo sources."""
        if not self._sources:
            return self.analysis_points[0].sources
        else:
            srcs = range(len(self._sources))
            for name, d in self._sources.iteritems():
                srcs[d['id']] = name
                return srcs

    @property
    def has_values(self):
        """Check if this analysis grid has result values."""
        return self.analysis_points[0].has_values

    @property
    def has_direct_values(self):
        """Check if direct values are available for this point.

        In point-in-time and 3phase recipes only total values are available.
        """
        return self.analysis_points[0].has_direct_values

    @property
    def hoys(self):
        """Return hours of the year for results if any."""
        return self.analysis_points[0].hoys

    @property
    def is_results_point_in_time(self):
        """Return True if the grid has the results only for an hour."""
        return len(self.hoys) == 1

    @property
    def result_files(self):
        """Return result files as a list [[total files], [direct files]]."""
        return self._totalFiles, self._directFiles

    def add_result_files(self, file_path, hoys, start_line=None, is_direct=False,
                         header=True, mode=0):
        """Add new result files to grid.

        Use this methods if you want to get annual metrics without loading the values
        for each point. This method is only useful for cases with no window groups and
        dynamic blind states. After adding the files you can call 'annualMetrics' method.
        """
        ResultFile = namedtuple(
            'ResultFile', ('path', 'hoys', 'start_line', 'header', 'mode'))

        inf = ResultFile(file_path, hoys, start_line, header, mode)

        if is_direct:
            self._directFiles.append(inf)
        else:
            self._totalFiles.append(inf)

    def set_values(self, hoys, values, source=None, state=None, is_direct=False):
        # assign the values to points
        for count, hourlyValues in enumerate(values):
            self.analysis_points[count].set_values(
                hourlyValues, hoys, source, state, is_direct)

    def parse_header(self, inf, start_line, hoys, check_point_count=False):
        """Parse radiance matrix header."""
        # read the header
        for i in xrange(40):
            line = inf.next()
            if line[:6] == 'FORMAT':
                inf.next()  # pass empty line
                break  # done with the header!
            elif start_line == 0 and line[:5] == 'NROWS':
                points_count = int(line.split('=')[-1])
                if check_point_count:
                    assert len(self._analysis_points) == points_count, \
                        "Length of points [{}] must match the number " \
                        "of rows [{}].".format(
                            len(self._analysis_points), points_count)

            elif start_line == 0 and line[:5] == 'NCOLS':
                hours_count = int(line.split('=')[-1])
                if hoys:
                    assert hours_count == len(hoys), \
                        "Number of hours [{}] must match the " \
                        "number of columns [{}]." \
                        .format(len(hoys), hours_count)
                else:
                    hoys = xrange(0, hours_count)

        return inf, hoys

    def set_values_from_file(self, file_path, hoys=None, source=None, state=None,
                             start_line=None, is_direct=False, header=True,
                             check_point_count=True, mode=0):
        """Load values for test points from a file.

        Args:
            file_path: Full file path to the result file.
            hoys: A collection of hours of the year for the results. If None the
                default will be range(0, len(results)).
            source: Name of the source.
            state: Name of the state.
            start_line: Number of start lines after the header from 0 (default: 0).
            is_direct: A Boolean to declare if the results is direct illuminance
                (default: False).
            header: A Boolean to declare if the file has header (default: True).
            mode: 0 > load the values 1 > load values as binary. Any non-zero value
                will be 1. This is useful for studies such as sunlight hours. 2 >
                load the values divided by mode number. Use this mode for daylight
                factor or radiation analysis.
        """

        if os.path.getsize(file_path) < 2:
            raise EmptyFileError(file_path)

        st = start_line or 0

        with open(file_path, 'rb') as inf:
            if header:
                inf, _ = self.parse_header(inf, st, hoys, check_point_count)

            self.add_result_files(file_path, hoys, st, is_direct, header, mode)

            for i in xrange(st):
                inf.next()

            end = len(self._analysis_points)
            if mode == 0:
                values = (tuple(int(float(r)) for r in inf.next().split())
                          for count in xrange(end))
            elif mode == 1:
                # binary 0-1
                values = (tuple(1 if float(r) > 0 else 0 for r in inf.next().split())
                          for count in xrange(end))
            else:
                # divide values by mode (useful for daylight factor calculation)
                values = (tuple(int(float(r) / mode) for r in inf.next().split())
                          for count in xrange(end))

            # assign the values to points
            for count, hourlyValues in enumerate(values):
                self.analysis_points[count].set_values(
                    hourlyValues, hoys, source, state, is_direct)

    def set_coupled_values_from_file(
            self, total_file_path, direct_file_path, hoys=None, source=None, state=None,
            start_line=None, header=True, check_point_count=True, mode=0):
        """Load direct and total values for test points from two files.

        Args:
            file_path: Full file path to the result file.
            hoys: A collection of hours of the year for the results. If None the
                default will be range(0, len(results)).
            source: Name of the source.
            state: Name of the state.
            start_line: Number of start lines after the header from 0 (default: 0).
            header: A Boolean to declare if the file has header (default: True).
            mode: 0 > load the values 1 > load values as binary. Any non-zero value
                will be 1. This is useful for studies such as sunlight hours. 2 >
                load the values divided by mode number. Use this mode for daylight
                factor or radiation analysis.
        """

        for file_path in (total_file_path, direct_file_path):
            if os.path.getsize(file_path) < 2:
                raise EmptyFileError(file_path)

        st = start_line or 0

        with open(total_file_path, 'rb') as inf, open(direct_file_path, 'rb') as dinf:
            if header:
                inf, _ = self.parse_header(inf, st, hoys, check_point_count)
                dinf, _ = self.parse_header(dinf, st, hoys, check_point_count)

            self.add_result_files(total_file_path, hoys, st, False, header, mode)
            self.add_result_files(direct_file_path, hoys, st, True, header, mode)

            for i in xrange(st):
                inf.next()
                dinf.next()

            end = len(self._analysis_points)

            if mode == 0:
                coupled_values = (
                    tuple((int(float(r)), int(float(d))) for r, d in
                          izip(inf.next().split(), dinf.next().split()))
                    for count in xrange(end))
            elif mode == 1:
                # binary 0-1
                coupled_values = (tuple(
                    (int(float(1 if float(r) > 0 else 0)),
                     int(float(1 if float(d) > 0 else 0)))
                    for r, d in izip(inf.next().split(), dinf.next().split()))
                    for count in xrange(end))
            else:
                # divide values by mode (useful for daylight factor calculation)
                coupled_values = (
                    tuple((int(float(r) / mode), int(float(d) / mode)) for r, d in
                          izip(inf.next().split(), dinf.next().split()))
                    for count in xrange(end))

            # assign the values to points
            for count, hourlyValues in enumerate(coupled_values):
                self.analysis_points[count].set_coupled_values(
                    hourlyValues, hoys, source, state)

    def combined_value_by_id(self, hoy=None, blinds_state_ids=None):
        """Get combined value from all sources based on state_id.

        Args:
            hoy: hour of the year.
            blinds_state_ids: List of state ids for all the sources for an hour. If you
                want a source to be removed set the state to -1.

        Returns:
            total, direct values.
        """
        if self.digit_sign == 1:
            self.load_values_from_files()

        hoy = hoy or self.hoys[0]

        return (p.combined_value_by_id(hoy, blinds_state_ids) for p in self)

    def combined_values_by_id(self, hoys=None, blinds_state_ids=None):
        """Get combined value from all sources based on state_ids.

        Args:
            hoys: A collection of hours of the year.
            blinds_state_ids: List of state ids for all the sources for input hoys. If
                you want a source to be removed set the state to -1.

        Returns:
            Return a generator for (total, direct) values.
        """
        if self.digit_sign == 1:
            self.load_values_from_files()

        return (p.combined_value_by_id(hoys, blinds_state_ids) for p in self)

    def sum_values_by_id(self, hoys=None, blinds_state_ids=None):
        """Get sum of value for all the hours.

        This method is mostly useful for radiation and solar access analysis.

        Args:
            hoys: A collection of hours of the year.
            blinds_state_ids: List of state ids for all the sources for input hoys. If
                you want a source to be removed set the state to -1.

        Returns:
            Return a collection of sum values as (total, direct) values.
        """
        if self.digit_sign == 1:
            self.load_values_from_files()

        return (p.sum_values_by_id(hoys, blinds_state_ids) for p in self)

    def max_values_by_id(self, hoys=None, blinds_state_ids=None):
        """Get maximum value for all the hours.

        Args:
            hoys: A collection of hours of the year.
            blinds_state_ids: List of state ids for all the sources for input hoys. If
                you want a source to be removed set the state to -1.

        Returns:
            Return a tuple for sum of (total, direct) values.
        """
        if self.digit_sign == 1:
            self.load_values_from_files()

        return (p.max_values_by_id(hoys, blinds_state_ids) for p in self)

    def annual_metrics(self, da_threshhold=None, udi_min_max=None, blinds_state_ids=None,
                       occ_schedule=None):
        """Calculate annual metrics.

        Daylight autonomy, continious daylight autonomy and useful daylight illuminance.

        Args:
            da_threshhold: Threshhold for daylight autonomy in lux (default: 300).
            udi_min_max: A tuple of min, max value for useful daylight illuminance
                (default: (100, 3000)).
            blinds_state_ids: List of state ids for all the sources for input hoys. If
                you want a source to be removed set the state to -1.
            occ_schedule: An annual occupancy schedule.

        Returns:
            Daylight autonomy, Continious daylight autonomy, Useful daylight illuminance,
            Less than UDI, More than UDI
        """
        results_loaded = True
        if not self.has_values and not self.result_files[0]:
            raise ValueError('No values are assigned to this analysis grid.')
        elif not self.has_values:
            # results are not loaded but are available
            assert len(self.result_files[0]) == 1, \
                ValueError(
                    'Annual recipe can currently only handle '
                    'a single merged result file.'
            )
            results_loaded = False
            print('Loading the results from result files.')

        res = ([], [], [], [], [])

        da_threshhold = da_threshhold or 300.0
        udi_min_max = udi_min_max or (100, 3000)
        hoys = self.hoys
        occ_schedule = occ_schedule or Schedule.eight_am_to_six_pm()

        if results_loaded:
            blinds_state_ids = blinds_state_ids or [[0] * len(self.sources)] * len(hoys)

            for sensor in self.analysis_points:
                for c, r in enumerate(sensor.annual_metrics(da_threshhold,
                                                            udi_min_max,
                                                            blinds_state_ids,
                                                            occ_schedule
                                                            )):
                    res[c].append(r)
        else:
            # This is a method for annual recipe to load the results line by line
            # which unlike the other method doesn't load all the values to the memory
            # at once.
            blinds_state_ids = [[0] * len(self.sources)] * len(hoys)
            calculate_annual_metrics = self.analysis_points[0]._calculate_annual_metrics

            for file_data in self.result_files[0]:
                file_path, hoys, start_line, header, mode = file_data

                # read the results line by line and caluclate the values
                if os.path.getsize(file_path) < 2:
                    raise EmptyFileError(file_path)

                assert mode == 0, \
                    TypeError(
                        'Annual results can only be calculated from '
                        'illuminance studies.')

                st = start_line or 0

                with open(file_path, 'rb') as inf:
                    if header:
                        inf, _ = self.parse_header(inf, st, hoys, False)

                    for i in xrange(st):
                        inf.next()

                    end = len(self._analysis_points)

                    # load one line at a time
                    for count in xrange(end):
                        values = (int(float(r)) for r in inf.next().split())
                        for c, r in enumerate(
                            calculate_annual_metrics(
                                values, hoys, da_threshhold, udi_min_max,
                                blinds_state_ids, occ_schedule)):

                            res[c].append(r)

        return res

    def spatial_daylight_autonomy(self, da_threshhold=None, target_da=None,
                                  blinds_state_ids=None, occ_schedule=None):
        """Calculate Spatial Daylight Autonomy (sDA).

        Args:
            da_threshhold: Minimum illuminance threshhold for daylight (default: 300).
            target_da: Minimum threshhold for daylight autonomy in percentage
                (default: 50%).
            blinds_state_ids:  List of state ids for all the sources for input hoys. If
                you want a source to be removed set the state to -1.
            occ_schedule: An annual occupancy schedule.

        Returns:
            sDA: Spatial daylight autonomy as percentage of analysis points.
            DA: Daylight autonomy for each analysis point.
            Problematic points: List of problematic points.
        """
        results_loaded = True
        if not self.has_values and not self.result_files[0]:
            raise ValueError('No values are assigned to this analysis grid.')
        elif not self.has_values:
            # results are not loaded but are available
            assert len(self.result_files[0]) == 1, \
                ValueError(
                    'Annual recipe can currently only handle '
                    'a single merged result file.'
            )
            results_loaded = False
            print('Loading the results from result files.')

        res = ([], [])

        da_threshhold = da_threshhold or 300.0
        target_da = target_da or 50.0
        hoys = self.hoys
        occ_schedule = occ_schedule or Schedule.eight_am_to_six_pm()

        if results_loaded:
            blinds_state_ids = blinds_state_ids or [[0] * len(self.sources)] * len(hoys)

            for sensor in self.analysis_points:
                for c, r in enumerate(sensor.daylight_autonomy(da_threshhold,
                                                               blinds_state_ids,
                                                               occ_schedule
                                                               )):
                    res[c].append(r)
        else:
            # This is a method for annual recipe to load the results line by line
            # which unlike the other method doesn't load all the values to the memory
            # at once.
            blinds_state_ids = [[0] * len(self.sources)] * len(hoys)
            calculate_daylight_autonomy = \
                self.analysis_points[0]._calculate_daylight_autonomy

            for file_data in self.result_files[0]:
                file_path, hoys, start_line, header, mode = file_data

                # read the results line by line and caluclate the values
                if os.path.getsize(file_path) < 2:
                    raise EmptyFileError(file_path)

                assert mode == 0, \
                    TypeError(
                        'Annual results can only be calculated from '
                        'illuminance studies.')

                st = start_line or 0

                with open(file_path, 'rb') as inf:
                    if header:
                        inf, _ = self.parse_header(inf, st, hoys, False)

                    for i in xrange(st):
                        inf.next()

                    end = len(self._analysis_points)

                    # load one line at a time
                    for count in xrange(end):
                        values = (int(float(r)) for r in inf.next().split())
                        for c, r in enumerate(
                            calculate_daylight_autonomy(
                                values, hoys, da_threshhold,
                                blinds_state_ids, occ_schedule)):

                            res[c].append(r)

        daylight_autonomy = res[0]
        problematic_points = []
        for pt, da in izip(self.analysis_points, daylight_autonomy):
            if da < target_da:
                problematic_points.append(pt)
        try:
            sda = (1 - len(problematic_points) / len(self.analysis_points)) * 100
        except ZeroDivisionError:
            sda = 0

        return sda, daylight_autonomy, problematic_points

    def annual_sunlight_exposure(self, threshhold=None, blinds_state_ids=None,
                                 occ_schedule=None, target_hours=None, target_area=None):
        """Annual Solar Exposure (ASE)

        As per IES-LM-83-12 ase is the percent of sensors that are
        found to be exposed to more than 1000lux of direct sunlight for
        more than 250hrs per year. For LEED credits No more than 10% of
        the points in the grid should fail this measure.

        Args:
            threshhold: Threshhold for for solar exposure in lux (default: 1000).
            blinds_state_ids: List of state ids for all the sources for input hoys.
                If you want a source to be removed set the state to -1. ase must
                be calculated without dynamic blinds but you can use this option
                to study the effect of different blind states.
            occ_schedule: An annual occupancy schedule.
            target_hours: Minimum targe hours for each point (default: 250).
            target_area: Minimum target area percentage for this grid (default: 10).

        Returns:
            Success as a Boolean, ase values for each point, Percentage area,
            Problematic points, Problematic hours for each point
        """
        results_loaded = True
        if not self.has_direct_values and not self.result_files[1]:
            raise ValueError(
                'Direct values are not available to calculate ASE.\nIn most of the cases'
                ' this is because you are using a point in time recipe or the three-'
                'phase recipe. You should use one of the daylight coefficient based '
                'recipes or the 5 phase recipe instead.')
        elif not self.has_direct_values:
            # results are not loaded but are available
            assert len(self.result_files[1]) == 1, \
                ValueError(
                    'Annual recipe can currently only handle '
                    'a single merged result file.'
            )
            results_loaded = False
            print('Loading the results from result files.')

        res = ([], [], [])
        threshhold = threshhold or 1000
        target_hours = target_hours or 250
        target_area = target_area or 10
        hoys = self.hoys
        occ_schedule = occ_schedule or set(hoys)

        if results_loaded:
            blinds_state_ids = blinds_state_ids or [[0] * len(self.sources)] * len(hoys)

            for sensor in self.analysis_points:
                for c, r in enumerate(sensor.annual_sunlight_exposure(threshhold,
                                                                      blinds_state_ids,
                                                                      occ_schedule,
                                                                      target_hours
                                                                      )):
                    res[c].append(r)
        else:
            # This is a method for annual recipe to load the results line by line
            # which unlike the other method doesn't load all the values to the memory
            # at once.
            blinds_state_ids = [[0] * len(self.sources)] * len(hoys)
            calculate_annual_sunlight_exposure = \
                self.analysis_points[0]._calculate_annual_sunlight_exposure

            for file_data in self.result_files[1]:
                file_path, hoys, start_line, header, mode = file_data

                # read the results line by line and caluclate the values
                if os.path.getsize(file_path) < 2:
                    raise EmptyFileError(file_path)

                assert mode == 0, \
                    TypeError(
                        'Annual results can only be calculated from '
                        'illuminance studies.')

                st = start_line or 0

                with open(file_path, 'rb') as inf:
                    if header:
                        inf, _ = self.parse_header(inf, st, hoys, False)

                    for i in xrange(st):
                        inf.next()

                    end = len(self._analysis_points)

                    # load one line at a time
                    for count in xrange(end):
                        values = (int(float(r)) for r in inf.next().split())
                        for c, r in enumerate(
                            calculate_annual_sunlight_exposure(
                                values, hoys, threshhold, blinds_state_ids, occ_schedule,
                                target_hours)):

                            res[c].append(r)

        # calculate ase for the grid
        ap = self.analysis_points  # create a local copy of points for better performance
        problematic_point_count = 0
        problematic_points = []
        problematic_hours = []
        ase_values = []
        for i, (success, ase, pHours) in enumerate(izip(*res)):
            ase_values.append(ase)  # collect annual ase values for each point
            if success:
                continue
            problematic_point_count += 1
            problematic_points.append(ap[i])
            problematic_hours.append(pHours)

        per_problematic = 100 * problematic_point_count / len(ap)
        return per_problematic < target_area, ase_values, per_problematic, \
            problematic_points, problematic_hours

    def parse_blind_states(self, blinds_state_ids):
        """Parse input blind states.

        The method tries to convert each state to a tuple of a list. Use this method
        to parse the input from plugins.

        Args:
            blinds_state_ids: List of state ids for all the sources for an hour. If you
                want a source to be removed set the state to -1. If not provided
                a longest combination of states from sources (window groups) will
                be used. Length of each item in states should be equal to number
                of sources.
        """
        return self.analysis_points[0].parse_blind_states(blinds_state_ids)

    def load_values_from_files(self):
        """Load grid values from self.result_files."""
        # remove old results
        for ap in self._analysis_points:
            ap._sources = OrderedDict()
            ap._values = []
        r_files = self.result_files[0][:]
        d_files = self.result_files[1][:]
        self._totalFiles = []
        self._directFiles = []
        # pass
        if r_files and d_files:
            # both results are available
            for rf, df in izip(r_files, d_files):
                rfPath, hoys, start_line, header, mode = rf
                dfPath, hoys, start_line, header, mode = df
                fn = os.path.split(rfPath)[-1][:-4].split("..")
                source = fn[-2]
                state = fn[-1]
                print(
                    '\nloading total and direct results for {} AnalysisGrid'
                    ' from {}::{}\n{}\n{}\n'.format(
                        self.name, source, state, rfPath, dfPath))
                self.set_coupled_values_from_file(
                    rfPath, dfPath, hoys, source, state, start_line, header,
                    False, mode
                )
        elif r_files:
            for rf in r_files:
                rfPath, hoys, start_line, header, mode = rf
                fn = os.path.split(rfPath)[-1][:-4].split("..")
                source = fn[-2]
                state = fn[-1]
                print('\nloading the results for {} AnalysisGrid form {}::{}\n{}\n'
                      .format(self.name, source, state, rfPath))
                self.set_values_from_file(
                    rf, hoys, source, state, start_line, is_direct=False,
                    header=header, check_point_count=False, mode=mode
                )
        elif d_files:
            for rf in d_files:
                rfPath, hoys, start_line, header, mode = rf
                fn = os.path.split(rfPath)[-1][:-4].split("..")
                source = fn[-2]
                state = fn[-1]
                print('\nloading the results for {} AnalysisGrid form {}::{}\n{}\n'
                      .format(self.name, source, state, rfPath))
                self.set_values_from_file(
                    rf, hoys, source, state, start_line, is_direct=True,
                    header=header, check_point_count=False, mode=mode
                )

    def unload(self):
        """Remove all the sources and values from analysis_points."""
        self._totalFiles = []
        self._directFiles = []

        for ap in self._analysis_points:
            ap._sources = OrderedDict()
            ap._values = []

    def duplicate(self):
        """Duplicate AnalysisGrid."""
        aps = tuple(ap.duplicate() for ap in self._analysis_points)
        dup = AnalysisGrid(aps, self._name)
        dup._sources = aps[0]._sources
        dup._wgroups = self._wgroups
        return dup

    def to_rad_string(self):
        """Return analysis points group as a Radiance string."""
        return "\n".join((ap.to_rad_string() for ap in self._analysis_points))

    def ToString(self):
        """Overwrite ToString .NET method."""
        return self.__repr__()

    def to_json(self):
        """Create json object from analysisGrid."""
        analysis_points = [ap.to_json() for ap in self.analysis_points]
        return {
            "name": self._name,
            "analysis_points": analysis_points
        }

    def __add__(self, other):
        """Add two analysis grids and create a new one.

        This method won't duplicate the analysis points.
        """
        assert isinstance(other, AnalysisGrid), \
            TypeError('Expected an AnalysisGrid not {}.'.format(type(other)))

        assert self.hoys == other.hoys, \
            ValueError('Two analysis grid must have the same hoys.')

        if not self.has_values:
            sources = self._sources.update(other._sources)
        else:
            assert self._sources == other._sources, \
                ValueError(
                    'Two analysis grid with values must have the same window_groups.'
                )
            sources = self._sources

        points = self.analysis_points + other.analysis_points
        name = '{}+{}'.format(self.name, other.name)
        addition = AnalysisGrid(points, name)
        addition._sources = sources

        return addition

    def __len__(self):
        """Number of points in this group."""
        return len(self._analysis_points)

    def __getitem__(self, index):
        """Get value for an index."""
        return self._analysis_points[index]

    def __iter__(self):
        """Iterate points."""
        return iter(self._analysis_points)

    def __str__(self):
        """String repr."""
        return self.to_rad_string()

    @property
    def digit_sign(self):
        if not self.has_values:
            if len(self.result_files[0]) + len(self.result_files[1]) == 0:
                # only x, y, z datat is available
                return 0
            else:
                # results are available but are not loaded yet
                return 1
        elif self.is_results_point_in_time:
            # results is loaded for a single hour
            return 2
        else:
            # results is loaded for multiple hours
            return 3

    @property
    def _sign(self):
        if not self.has_values:
            if len(self.result_files[0]) + len(self.result_files[1]) == 0:
                # only x, y, z datat is available
                return '[.]'
            else:
                # results are available but are not loaded yet
                return '[/]'
        elif self.is_results_point_in_time:
            # results is loaded for a single hour
            return '[+]'
        else:
            # results is loaded for multiple hours
            return '[*]'

    def __repr__(self):
        """Return analysis points and directions."""
        return 'AnalysisGrid::{}::#{}::{}'.format(
            self._name, len(self._analysis_points), self._sign
        )
Пример #38
0
        print "[x] failed to find next module"
        break

    skip_bytes = sig_offset - 0x20
    data = data[skip_bytes:]
    global_offset += skip_bytes
    #print "\n>> skip 0x%x" % skip_bytes

    ilo_num += 1

print "\n-------------------------------------------------------------------------------"
print "[+] Modules summary (%d)" % (ilo_num + 1)

for i, mod in enumerate(mod_list):
    print "    %2x) %30s, type 0x%02x, size 0x%08x, crc 0x%08x" % (
        i, mod.module, mod.type, mod.decompressed_size, mod.img_crc)

#------------------------------------------------------------------------------
# output offsets map

print "\n[+] Firmware offset map"
for part, offset in offsets_map.iteritems():
    print "  > %30s at 0x%08x" % (part, offset)

with open(outdir + "/firmware.map", "wb") as fff:
    fff.write(
        json.dumps(offsets_map,
                   sort_keys=True,
                   indent=4,
                   separators=(',', ': ')))
Пример #39
0
class SubsectionGrade(object):
    """
    Class for Subsection Grades.
    """
    def __init__(self, subsection):
        self.location = subsection.location
        self.display_name = block_metadata_utils.display_name_with_default_escaped(
            subsection)
        self.url_name = block_metadata_utils.url_name_for_block(subsection)

        self.format = getattr(subsection, 'format', '')
        self.due = getattr(subsection, 'due', None)
        self.graded = getattr(subsection, 'graded', False)

        self.course_version = getattr(subsection, 'course_version', None)
        self.subtree_edited_timestamp = subsection.subtree_edited_on

        self.graded_total = None  # aggregated grade for all graded problems
        self.all_total = None  # aggregated grade for all problems, regardless of whether they are graded
        self.locations_to_scores = OrderedDict(
        )  # dict of problem locations to ProblemScore

    @property
    def scores(self):
        """
        List of all problem scores in the subsection.
        """
        return self.locations_to_scores.values()

    @property
    def attempted(self):
        """
        Returns whether any problem in this subsection
        was attempted by the student.
        """

        assert self.all_total is not None, (
            "SubsectionGrade not fully populated yet.  Call init_from_structure or init_from_model "
            "before use.")
        return self.all_total.attempted

    def init_from_structure(self, student, course_structure,
                            submissions_scores, csm_scores):
        """
        Compute the grade of this subsection for the given student and course.
        """
        for descendant_key in course_structure.post_order_traversal(
                filter_func=possibly_scored,
                start_node=self.location,
        ):
            self._compute_block_score(descendant_key, course_structure,
                                      submissions_scores, csm_scores)

        self.all_total, self.graded_total = graders.aggregate_scores(
            self.scores)
        self._log_event(log.debug, u"init_from_structure", student)
        return self

    def init_from_model(self, student, model, course_structure,
                        submissions_scores, csm_scores):
        """
        Load the subsection grade from the persisted model.
        """
        for block in model.visible_blocks.blocks:
            self._compute_block_score(block.locator, course_structure,
                                      submissions_scores, csm_scores, block)

        self.graded_total = AggregatedScore(
            tw_earned=model.earned_graded,
            tw_possible=model.possible_graded,
            graded=True,
            attempted=model.first_attempted is not None,
        )
        self.all_total = AggregatedScore(
            tw_earned=model.earned_all,
            tw_possible=model.possible_all,
            graded=False,
            attempted=model.first_attempted is not None,
        )
        self._log_event(log.debug, u"init_from_model", student)
        return self

    @classmethod
    def bulk_create_models(cls, student, subsection_grades, course_key):
        """
        Saves the subsection grade in a persisted model.
        """
        return PersistentSubsectionGrade.bulk_create_grades(
            [
                subsection_grade._persisted_model_params(student)
                for subsection_grade in subsection_grades
            ],  # pylint: disable=protected-access
            course_key,
        )

    def create_model(self, student):
        """
        Saves the subsection grade in a persisted model.
        """
        self._log_event(log.debug, u"create_model", student)
        return PersistentSubsectionGrade.create_grade(
            **self._persisted_model_params(student))

    def update_or_create_model(self, student):
        """
        Saves or updates the subsection grade in a persisted model.
        """
        self._log_event(log.debug, u"update_or_create_model", student)
        return PersistentSubsectionGrade.update_or_create_grade(
            **self._persisted_model_params(student))

    def _compute_block_score(
        self,
        block_key,
        course_structure,
        submissions_scores,
        csm_scores,
        persisted_block=None,
    ):
        """
        Compute score for the given block. If persisted_values
        is provided, it is used for possible and weight.
        """
        block = course_structure[block_key]

        if getattr(block, 'has_score', False):
            problem_score = get_score(
                submissions_scores,
                csm_scores,
                persisted_block,
                block,
            )
            if problem_score:
                self.locations_to_scores[block_key] = problem_score

    def _persisted_model_params(self, student):
        """
        Returns the parameters for creating/updating the
        persisted model for this subsection grade.
        """
        return dict(user_id=student.id,
                    usage_key=self.location,
                    course_version=self.course_version,
                    subtree_edited_timestamp=self.subtree_edited_timestamp,
                    earned_all=self.all_total.earned,
                    possible_all=self.all_total.possible,
                    earned_graded=self.graded_total.earned,
                    possible_graded=self.graded_total.possible,
                    visible_blocks=self._get_visible_blocks,
                    attempted=self.attempted)

    @property
    def _get_visible_blocks(self):
        """
        Returns the list of visible blocks.
        """
        return [
            BlockRecord(location, score.weight, score.raw_possible,
                        score.graded)
            for location, score in self.locations_to_scores.iteritems()
        ]

    def _log_event(self, log_func, log_statement, student):
        """
        Logs the given statement, for this instance.
        """
        log_func(u"Persistent Grades: SG.{}, subsection: {}, course: {}, "
                 u"version: {}, edit: {}, user: {},"
                 u"total: {}/{}, graded: {}/{}".format(
                     log_statement,
                     self.location,
                     self.location.course_key,
                     self.course_version,
                     self.subtree_edited_timestamp,
                     student.id,
                     self.all_total.earned,
                     self.all_total.possible,
                     self.graded_total.earned,
                     self.graded_total.possible,
                 ))
Пример #40
0
def plot_pie(filename):
    from itertools import chain
    import numpy as np
    from collections import OrderedDict, defaultdict
    import csv
    import matplotlib.pyplot as plt
    from matplotlib import cm
    from map_test import get_json

    def flatten(listOfLists):
        "Flatten one level of nesting"
        return chain.from_iterable(listOfLists)

    category_map, categories = get_json()

    memo = []
    price = []
    finalmemo = []
    finalprice = []

    with open(filename, 'rU') as budget:
        reader = csv.reader(budget)
        for rows in reader:

            memo.append(rows[3:4])
            price.append(rows[4:5])

    flatmemo = flatten(memo)
    flatprice = flatten(price)

    for rows in flatmemo:
        finalmemo.append(rows)

    for rows in flatprice:
        finalprice.append(rows)

    del finalmemo[0]
    del finalprice[0]

    data = zip(finalmemo, finalprice)

    d = defaultdict(list)

    for k, v in data:
        if k != '' and v != '':
            d[k].append(v)

    orderddata = OrderedDict(d)

    cats = ['Total', 'Other']
    cats.extend(categories)
    category = OrderedDict.fromkeys(cats, 0.0)

    for key, val in orderddata.iteritems():
        if val:
            category["Total"] += abs(sum(map(float, val)))
        for cat_key in category_map.keys():
            if cat_key in key[-4:]:
                category[category_map[cat_key]] += abs(sum(map(float, val)))

    category["Other"] = category["Total"] - sum(category.values()[2:])

    category.pop("Total")

    for key, val in category.items():
        if val == 0:
            category.pop(key)

    sizes = category.values()
    labels = category.keys()

    N = int(len(sizes))

    cs = cm.Set1(np.arange(N) / float(N))

    percent = [str(round(100. * x / sum(sizes), 1)) for x in sizes]

    legendent = [j + " " + percent[i] + "%" for i, j in enumerate(labels)]

    patches, text = plt.pie(sizes,
                            colors=cs,
                            pctdistance=0.7,
                            shadow=True,
                            startangle=90)

    plt.legend(patches,
               legendent,
               fontsize=8,
               loc="upper left",
               bbox_to_anchor=(0.75, 1))
    plt.axis('equal')
    plt.show()
Пример #41
0
def GenTreeCode(tree, objlist, multithread=4):
    """
    Args:
        tree (TTree): TTree to process
        objlist (list): List of histograms (TH1, TH2 etc) to fill
        multithread (int): Number of threads to use (0 = no multithreading)

    Returns:
        str: Name of the function that was generated
    """
    fname = 'RunTree%i' % GenTreeCode.counter
    # static variable to keep ensure we can give
    # each function a unique name
    GenTreeCode.counter += 1

    objs_by_sel = defaultdict(list)
    indexed_wts = OrderedDict()

    # Figure out the list of variable names
    varnames = set()
    varregex = re.compile('[_a-zA-Z][_a-zA-Z0-9]*')

    classnames = list(objlist.keys())
    nobjs_total = 0
    for clname in classnames:
        nobjs_total += len(objlist[clname])
        for i, obj in enumerate(objlist[clname]):
            obj._draw_idx = i
            varnames.update(varregex.findall(obj.sel))
            for var in obj.var:
                varnames.update(varregex.findall(var))
            varnames.update(varregex.findall(obj.wt))
            objs_by_sel[obj.sel].append(obj)
            # Give each object an index for the weight expression
            if obj.wt not in indexed_wts:
                indexed_wts[obj.wt] = len(indexed_wts)
            obj._wt_idx = indexed_wts[obj.wt]

    code = CodeLines()
    if multithread:
        code.Add('R__LOAD_LIBRARY(libTreePlayer)')
    code.Add('void %s(TTree *tree, TObjArray *hists) {' % fname)
    code.idlevel += 1
    obj_offset = 0  # offset in the full list
    for clname in classnames:
        nobjs = len(objlist[clname])
        code.Add('std::vector<%s *> v_%s_hists(%i);' % (clname, clname, nobjs))
        code.Add('for (unsigned i = 0; i < %i; ++i) {' % nobjs)
        code.idlevel += 1
        code.Add(
            'v_%s_hists[i] = static_cast<%s*>(hists->UncheckedAt(i + %i));' %
            (clname, clname, obj_offset))
        code.idlevel -= 1
        code.Add('}')
        obj_offset += nobjs
    if multithread:
        code.Add('ROOT::EnableImplicitMT(%i);' % multithread)
        for clname in classnames:
            nobjs = len(objlist[clname])
            code.Add(
                'std::vector<std::unique_ptr<ROOT::TThreadedObject<%s>>> v_%s_t_hists(%i);'
                % (clname, clname, nobjs))
            code.Add('for (unsigned i = 0; i < %i; ++i) {' % nobjs)
            code.idlevel += 1
            code.Add(
                'v_%s_t_hists[i] = std::make_unique<ROOT::TThreadedObject<%s>>(*(v_%s_hists[i]));'
                % (clname, clname, clname))
            code.idlevel -= 1
            code.Add('}')
        code.Add('ROOT::TTreeProcessorMT tp(*tree);')
        code.Add('unsigned ncalled = 0;')
        code.Add('auto myFunction = [&](TTreeReader &reader) {')
        code.idlevel += 1
        code.Add('ncalled += 1;')
    else:
        code.Add('TTreeReader reader(tree);')

    branches = tree.GetListOfBranches()
    binfos = []
    for i in range(branches.GetEntriesFast()):
        branch = branches.UncheckedAt(i)
        bname = branch.GetName()
        btype = branch.GetListOfLeaves().UncheckedAt(0).GetTypeName()
        binfos.append((bname, btype))
        if bname in varnames:
            code.Add('TTreeReaderValue<%s> %s_(reader, "%s");' %
                     (btype, bname, bname))
    if multithread:
        for clname in classnames:
            nobjs = len(objlist[clname])
            code.Add('std::vector<std::shared_ptr<%s>> v_%s_l_hists(%i);' %
                     (clname, clname, nobjs))
            code.Add('for (unsigned i = 0; i < %i; ++i) {' % nobjs)
            code.idlevel += 1
            code.Add('v_%s_l_hists[i] = v_%s_t_hists[i]->Get();' %
                     (clname, clname))
            code.idlevel -= 1
            code.Add('}')

    code.Add('while (reader.Next()) {')
    code.idlevel += 1
    for bname, btype in binfos:
        if bname in varnames:
            code.Add('%s %s = *%s_;' % (btype, bname, bname))
    for wt_expr, wt_idx in indexed_wts.iteritems():
        code.Add('double weightexpr_%i_ = %s;' % (wt_idx, wt_expr))
    for sel, objs in objs_by_sel.iteritems():
        code.Add('if (%s) {' % sel)
        code.idlevel += 1
        for obj in objs:
            if multithread:
                code.Add('v_%s_l_hists[%i]->Fill(%s, weightexpr_%i_);' %
                         (obj.classname, obj._draw_idx, ', '.join(
                             obj.var), obj._wt_idx))
            else:
                code.Add('v_%s_hists[%i]->Fill(%s, weightexpr_%i_);' %
                         (obj.classname, obj._draw_idx, ', '.join(
                             obj.var), obj._wt_idx))
        code.idlevel -= 1
        code.Add('}')
    code.idlevel -= 1
    code.Add('}')
    if multithread:
        code.idlevel -= 1
        code.Add('};')
        # Sometimes it seems small Trees don't have proper clusters assigned (we end up splitting with 1 per event)
        # This seems to be the only way to detect this...
        code.Add('auto it = tree->GetClusterIterator(0); it.Next();')
        code.Add('if (it.GetNextEntry() == 1) {')
        code.idlevel += 1
        code.Add(
            'std::cout << ">> Tree does not have proper clusters, running single-threaded" << std::endl;'
        )
        code.Add('TTreeReader reader(tree);')
        code.Add('myFunction(reader);')
        code.idlevel -= 1
        code.Add('} else {')
        code.idlevel += 1
        code.Add('tp.Process(myFunction);')
        code.idlevel -= 1
        code.Add('}')
        for clname in classnames:
            nobjs = len(objlist[clname])
            code.Add('for (unsigned i = 0; i < %i; ++i) {' % nobjs)
            code.idlevel += 1
            code.Add('v_%s_t_hists[i]->Merge()->Copy(*(v_%s_hists[i]));' %
                     (clname, clname))
            code.idlevel -= 1
            code.Add('}')
        code.Add('ROOT::DisableImplicitMT();')
        code.Add(
            'std::cout << ">> Draw function was called " << ncalled << " times" << std::endl;'
        )
    code.idlevel -= 1
    code.Add('}')
    fullcode = '\n'.join(code.res)
    # If we want to dump the code for testing...
    # includes = [
    #     '#include "TTreeReader.h"',
    #     '#include "TTree.h"',
    #     '#include "TObjArray.h"',
    #     '#include "TH1F.h"',
    #     '#include "TH2F.h"',
    #     '#include "ROOT/TTreeProcessorMT.hxx"',
    #     '#include <memory>',
    #     ''
    # ]
    # with open('%s.cc' % fname, 'w') as out_file:
    #     out_file.write('\n'.join(includes + code.res))
    # print fullcode
    start = time.time()
    ROOT.gInterpreter.Declare(fullcode)
    end = time.time()
    print '>> JIT compiled function %s with %i objects in %.2g seconds (%i cores)' % (
        fname, nobjs_total, (end - start), multithread)
    return fname
Пример #42
0
def pp_scal_s_mod_3(mydoc, file_pp, PRINT_HEADER = True):
    """
    post processing of data
    Arguments:
    - `output`:
    - `file_pp`:
    """
    # SCALE_TAG_LIST = [#'+-Solve',
    #                   '+-SLM',
    #                   '+-FMM',
    #                   'Merge',
    #                   '+-RefineTree',
    #                   '+-Balance21'
                      # ]

    # ppnode_title  = mydoc.np
    # solve_tavg_sum = 0.0
    # solve_favg_sum = 0.0
    # tn_counter = 0
    teval_sum = 0.0
    feval_sum = 0.0

    tmerge_sum = 0.0
    fmerge_avg_sum = 0.0

    tsort_sum = 0.0
    tcomm_sum = 0.0

    tref_sum = 0.0
    fref_avg_sum = 0.0

    tsolve_sum = 0.0
    fsolve_sum = 0.0
    fsolve_avg_sum = 0.0

    tfmm = 0.0
    ffmm = 0.0

    tslm = 0.0
    fslm = 0.0

    ppnode_values = OrderedDict()
    ppnode_values['NP'] = mydoc.np
    for node in mydoc.node_list:
        scale_tag = '+-RefineTree'
        if  scale_tag in node.title:
            tref_sum =  float(node.values['t_max'])
            fref_avg_sum =  float(node.values['f_avg'])

        scale_tag = 'Merge'
        if  scale_tag in node.title:
            # ppnode_values['T'+scale_tag.replace('+-','')] = node.values['t_max']
            tmerge_sum = tmerge_sum + float(node.values['t_max'])
            fmerge_avg_sum = fmerge_avg_sum + float(node.values['f_avg'])

        scale_tag = 'Solve'
        if  scale_tag in node.title:
            # ppnode_values['T'+scale_tag.replace('+-','')] = node.values['t_max']
            tsolve_sum = tsolve_sum + float(node.values['t_max'])
            fsolve_sum = fsolve_sum + float(node.values['f/s_total'])
            fsolve_avg_sum = fsolve_avg_sum + float(node.values['f_avg'])

        # FMM
        scale_tag = '+-FMM'
        if  scale_tag in node.title:
            tfmm = float(node.values['t_max'])
            ffmm = float(node.values['f/s_total'])

        # SLM
        scale_tag = '+-SLM'
        if  scale_tag in node.title:
            tslm = float(node.values['t_max'])
            fslm = float(node.values['f/s_total'])

        scale_tag = '+-InEvaluation'
        if  scale_tag in node.title:
            teval_sum = teval_sum + float(node.values['t_max'])
            feval_sum = feval_sum + float(node.values['f_avg'])
        scale_tag = '+-OutEvaluation'
        if  scale_tag in node.title:
            teval_sum = teval_sum + float(node.values['t_max'])
            feval_sum = feval_sum + float(node.values['f_avg'])

        scale_tag = '+-LclHQSort'
        if  scale_tag in node.title:
            tsort_sum = tsort_sum + float(node.values['t_max'])

        scale_tag = '+-OutScatterIndex'
        if  scale_tag in node.title:
            tcomm_sum = tcomm_sum + float(node.values['t_max'])
        scale_tag = '+-OutScatterForward'
        if  scale_tag in node.title:
            tcomm_sum = tcomm_sum + float(node.values['t_max'])
        scale_tag = '+-OutScatterReverse'
        if  scale_tag in node.title:
            tcomm_sum = tcomm_sum + float(node.values['t_max'])

    nn =    float( ppnode_values['NP'])
    ppnode_values['TREF']   = "{:.4f}".format(tref_sum)
    ppnode_values['TSORT']  = "{:.4f}".format(tsort_sum)

    ppnode_values['TEVAl']  = "{:.4f}".format(teval_sum)
    feval = feval_sum/teval_sum if teval_sum else 0
    ppnode_values['FEVAl']  = "{:.4f}".format(feval)
    # ppnode_values['FEVAlN']  = "{:.4f}".format(feval/nn if nn else 0)

    ppnode_values['TMRG']   = "{:.4f}".format(tmerge_sum)
    ppnode_values['TCOMM']  = "{:.4f}".format(tcomm_sum)

    ppnode_values['TRFMRG']   = "{:.4f}".format(tref_sum+tmerge_sum)

    ppnode_values['TFMM']   = "{:.4f}".format(tfmm)
    ppnode_values['FFMM']   = "{:.4f}".format(ffmm)
    ppnode_values['FFMMN']  = "{:.4f}".format(ffmm/nn if nn else 0)

    ppnode_values['TSLM']   = "{:.4f}".format(tslm)
    ppnode_values['FSLM']   = "{:.4f}".format(fslm)
    ppnode_values['FSLMN']  = "{:.4f}".format(fslm/nn if nn else 0)

    tsolve = tsolve_sum+tmerge_sum+tref_sum;
    fsolve_sum = fsolve_avg_sum+fmerge_avg_sum+fref_avg_sum;
    fsolve = fsolve_sum/tsolve if tsolve else 0

    ppnode_values['TSOLVE'] = "{:.4f}".format(tsolve)
    ppnode_values['FSOLVE'] = "{:.4f}".format(fsolve)
    # ppnode_values['FSOLVEN'] = "{:.4f}".format(fsolve/nn if nn else 0)

    header_format = ''
    values_format = ''

    for key, val in ppnode_values.iteritems():
        header_format += "{:>12}".format(key)
        values_format += "{:>12}".format(val)
    header_format += "\n"
    values_format += "\n"
    file_pp.write(header_format)
    file_pp.write(values_format)
Пример #43
0
                    if deltaR(ev.GenVisTau_eta[igen], ev.GenVisTau_phi[igen],
                              ev.GenVisTau_eta[jgen],
                              ev.GenVisTau_phi[jgen]) < 0.5:
                        skipthis = 1
                        break
                if skipthis == 1: continue
            GoodGenTaus.append(igen)

            ###########################

        Matched, DoubleCountRate = MatchTausToRefs(DoubleCountRate,
                                                   GoodGenTaus)

        for igen in GoodGenTaus:
            # initialise before filling
            for k, v in tofill_tau.iteritems():
                tofill_tau[k] = -999

            # per event quantities
            for ibranch in branches_event:
                tofill_tau[ibranch.name()] = ibranch.value(ev)

            # per gen tau quantities
            for ibranch in branches_gen:
                tofill_tau[ibranch.name()] = ibranch.value(ev)[igen]

            # loop on all reco taus and match to gen taus
            #best_match_idx_tau = -1
            #dRmax = 0.5
            #for itau in range(ev.nTau):
            #    dR = deltaR(ev.Tau_eta[itau], ev.Tau_phi[itau], ev.GenVisTau_eta[igen], ev.GenVisTau_phi[igen])
Пример #44
0
                     t2_template_res=t2_template_res,
                     **fmap_args)),
            ("FreeSurfer",
             partial(run_freesurfer,
                     path=args.output_dir,
                     subject="sub-%s" % subject_label,
                     n_cpus=args.n_cpus)),
            ("PostFreeSurfer",
             partial(run_post_freesurfer,
                     path=args.output_dir,
                     subject="sub-%s" % subject_label,
                     grayordinatesres=grayordinatesres,
                     lowresmesh=lowresmesh,
                     n_cpus=args.n_cpus))
        ])
        for stage, stage_func in struct_stages_dict.iteritems():
            if stage in args.stages:
                stage_func()

        bolds = [
            f.filename for f in layout.get(subject=subject_label,
                                           type='bold',
                                           extensions=["nii.gz", "nii"])
        ]
        for fmritcs in bolds:
            fmriname = "_".join(
                fmritcs.split("sub-")[-1].split("_")[1:]).split(".")[0]
            assert fmriname

            fmriscout = fmritcs.replace("_bold", "_sbref")
            if not os.path.exists(fmriscout):
Пример #45
0
        "kwargs": {},
        "paths": [path.abspath(path.join("tests", "unit"))],
        "include_arg": "test_name"
    }),
    ("compiletest", {
        "kwargs": {
            "release": False
        },
        "paths": [path.abspath(path.join("tests", "compiletest"))],
        "include_arg": "test_name"
    })
])

TEST_SUITES_BY_PREFIX = {
    path: k
    for k, v in TEST_SUITES.iteritems() if "paths" in v for path in v["paths"]
}


def create_parser_wpt():
    parser = wptcommandline.create_parser()
    parser.add_argument('--release',
                        default=False,
                        action="store_true",
                        help="Run with a release build of servo")
    parser.add_argument(
        '--chaos',
        default=False,
        action="store_true",
        help="Run under chaos mode in rr until a failure is captured")
    parser.add_argument('--pref',
Пример #46
0
    def test(self,
             params,
             render_mode=DEFAULT_RENDER_MODE,
             release=False,
             tidy_all=False,
             no_progress=False,
             self_test=False,
             all_suites=False):
        suites = copy.deepcopy(TEST_SUITES)
        suites["tidy"]["kwargs"] = {
            "all_files": tidy_all,
            "no_progress": no_progress,
            "self_test": self_test
        }
        suites["wpt"]["kwargs"] = {"release": release}
        suites["css"]["kwargs"] = {"release": release}
        suites["unit"]["kwargs"] = {}
        suites["compiletest"]["kwargs"] = {"release": release}

        selected_suites = OrderedDict()

        if params is None:
            if all_suites:
                params = suites.keys()
            else:
                print(
                    "Specify a test path or suite name, or pass --all to run all test suites.\n\nAvailable suites:"
                )
                for s in suites:
                    print("    %s" % s)
                return 1

        for arg in params:
            found = False
            if arg in suites and arg not in selected_suites:
                selected_suites[arg] = []
                found = True
            else:
                suite = self.suite_for_path(arg)
                if suite is not None:
                    if suite not in selected_suites:
                        selected_suites[suite] = []
                    selected_suites[suite].append(arg)
                    found = True
                    break

            if not found:
                print("%s is not a valid test path or suite name" % arg)
                return 1

        test_start = time()
        for suite, tests in selected_suites.iteritems():
            props = suites[suite]
            kwargs = props.get("kwargs", {})
            if tests:
                kwargs[props["include_arg"]] = tests

            Registrar.dispatch("test-%s" % suite,
                               context=self.context,
                               **kwargs)

        elapsed = time() - test_start

        print("Tests completed in %0.2fs" % elapsed)
Пример #47
0
class CompileDBBackend(CommonBackend):
    def _init(self):
        CommonBackend._init(self)
        if not util.check_top_objdir(self.environment.topobjdir):
            raise Exception()

        # The database we're going to dump out to.
        self._db = OrderedDict()

        # The cache for per-directory flags
        self._flags = {}

        self._envs = {}
        self._includes = defaultdict(list)
        self._defines = defaultdict(list)
        self._local_flags = defaultdict(dict)
        self._per_source_flags = defaultdict(list)
        self._extra_includes = defaultdict(list)
        self._gyp_dirs = set()
        self._dist_include_testing = '-I%s' % mozpath.join(
            self.environment.topobjdir, 'dist', 'include', 'testing')

    def consume_object(self, obj):
        # Those are difficult directories, that will be handled later.
        if obj.relativedir in (
                'build/unix/elfhack',
                'build/unix/elfhack/inject',
                'build/clang-plugin',
                'build/clang-plugin/tests',
                'toolkit/crashreporter/google-breakpad/src/common'):
            return True

        consumed = CommonBackend.consume_object(self, obj)

        if consumed:
            return True

        if isinstance(obj, DirectoryTraversal):
            self._envs[obj.objdir] = obj.config
            for var in ('STL_FLAGS', 'VISIBILITY_FLAGS', 'WARNINGS_AS_ERRORS'):
                value = obj.config.substs.get(var)
                if value:
                    self._local_flags[obj.objdir][var] = value

        elif isinstance(obj, (Sources, GeneratedSources)):
            # For other sources, include each source file.
            for f in obj.files:
                self._build_db_line(obj.objdir, obj.relativedir, obj.config, f,
                                    obj.canonical_suffix)

        elif isinstance(obj, LocalInclude):
            self._includes[obj.objdir].append('-I%s' % mozpath.normpath(
                obj.path.full_path))

        elif isinstance(obj, Linkable):
            if isinstance(obj.defines, Defines): # As opposed to HostDefines
                for d in obj.defines.get_defines():
                    if d not in self._defines[obj.objdir]:
                        self._defines[obj.objdir].append(d)
            self._defines[obj.objdir].extend(obj.lib_defines.get_defines())
            if isinstance(obj, SimpleProgram) and obj.is_unit_test:
                if (self._dist_include_testing not in
                        self._extra_includes[obj.objdir]):
                    self._extra_includes[obj.objdir].append(
                        self._dist_include_testing)

        elif isinstance(obj, VariablePassthru):
            if obj.variables.get('IS_GYP_DIR'):
                self._gyp_dirs.add(obj.objdir)
            for var in ('MOZBUILD_CFLAGS', 'MOZBUILD_CXXFLAGS',
                        'MOZBUILD_CMFLAGS', 'MOZBUILD_CMMFLAGS',
                        'RTL_FLAGS', 'VISIBILITY_FLAGS'):
                if var in obj.variables:
                    self._local_flags[obj.objdir][var] = obj.variables[var]
            if (obj.variables.get('DISABLE_STL_WRAPPING') and
                    'STL_FLAGS' in self._local_flags[obj.objdir]):
                del self._local_flags[obj.objdir]['STL_FLAGS']
            if (obj.variables.get('ALLOW_COMPILER_WARNINGS') and
                    'WARNINGS_AS_ERRORS' in self._local_flags[obj.objdir]):
                del self._local_flags[obj.objdir]['WARNINGS_AS_ERRORS']

        elif isinstance(obj, PerSourceFlag):
            self._per_source_flags[obj.file_name].extend(obj.flags)

        return True

    def consume_finished(self):
        CommonBackend.consume_finished(self)

        db = []

        for (directory, filename, unified), cmd in self._db.iteritems():
            env = self._envs[directory]
            cmd = list(cmd)
            if unified is None:
                cmd.append(filename)
            else:
                cmd.append(unified)
            local_extra = list(self._extra_includes[directory])
            if directory not in self._gyp_dirs:
                for var in (
                    'NSPR_CFLAGS',
                    'NSS_CFLAGS',
                    'MOZ_JPEG_CFLAGS',
                    'MOZ_PNG_CFLAGS',
                    'MOZ_ZLIB_CFLAGS',
                    'MOZ_PIXMAN_CFLAGS',
                ):
                    f = env.substs.get(var)
                    if f:
                        local_extra.extend(f)
            variables = {
                'LOCAL_INCLUDES': self._includes[directory],
                'DEFINES': self._defines[directory],
                'EXTRA_INCLUDES': local_extra,
                'DIST': mozpath.join(env.topobjdir, 'dist'),
                'DEPTH': env.topobjdir,
                'MOZILLA_DIR': env.topsrcdir,
                'topsrcdir': env.topsrcdir,
                'topobjdir': env.topobjdir,
            }
            variables.update(self._local_flags[directory])
            c = []
            for a in cmd:
                a = expand_variables(a, variables).split()
                if not a:
                    continue
                if isinstance(a, types.StringTypes):
                    c.append(a)
                else:
                    c.extend(a)
            per_source_flags = self._per_source_flags.get(filename)
            if per_source_flags is not None:
                c.extend(per_source_flags)
            db.append({
                'directory': directory,
                'command': ' '.join(shell_quote(a) for a in c),
                'file': filename,
            })

        import json
        # Output the database (a JSON file) to objdir/compile_commands.json
        outputfile = os.path.join(self.environment.topobjdir, 'compile_commands.json')
        with self._write_file(outputfile) as jsonout:
            json.dump(db, jsonout, indent=0)

    def _process_unified_sources(self, obj):
        # For unified sources, only include the unified source file.
        # Note that unified sources are never used for host sources.
        for f in obj.unified_source_mapping:
            self._build_db_line(obj.objdir, obj.relativedir, obj.config, f[0],
                                obj.canonical_suffix)
            for entry in f[1]:
                self._build_db_line(obj.objdir, obj.relativedir, obj.config,
                                    entry, obj.canonical_suffix, unified=f[0])

    def _handle_idl_manager(self, idl_manager):
        pass

    def _handle_ipdl_sources(self, ipdl_dir, sorted_ipdl_sources,
                             unified_ipdl_cppsrcs_mapping):
        for f in unified_ipdl_cppsrcs_mapping:
            self._build_db_line(ipdl_dir, None, self.environment, f[0],
                                '.cpp')

    def _handle_webidl_build(self, bindings_dir, unified_source_mapping,
                             webidls, expected_build_output_files,
                             global_define_files):
        for f in unified_source_mapping:
            self._build_db_line(bindings_dir, None, self.environment, f[0],
                                '.cpp')

    COMPILERS = {
        '.c': 'CC',
        '.cpp': 'CXX',
        '.m': 'CC',
        '.mm': 'CXX',
    }

    CFLAGS = {
        '.c': 'CFLAGS',
        '.cpp': 'CXXFLAGS',
        '.m': 'CFLAGS',
        '.mm': 'CXXFLAGS',
    }

    def _build_db_line(self, objdir, reldir, cenv, filename,
                       canonical_suffix, unified=None):
        if canonical_suffix not in self.COMPILERS:
            return
        db = self._db.setdefault((objdir, filename, unified),
            cenv.substs[self.COMPILERS[canonical_suffix]].split() +
            ['-o', '/dev/null', '-c'])
        reldir = reldir or mozpath.relpath(objdir, cenv.topobjdir)

        def append_var(name):
            value = cenv.substs.get(name)
            if not value:
                return
            if isinstance(value, types.StringTypes):
                value = value.split()
            db.extend(value)

        if canonical_suffix in ('.mm', '.cpp'):
            db.append('$(STL_FLAGS)')

        db.extend((
            '$(VISIBILITY_FLAGS)',
            '$(DEFINES)',
            '-I%s' % mozpath.join(cenv.topsrcdir, reldir),
            '-I%s' % objdir,
            '$(LOCAL_INCLUDES)',
            '-I%s/dist/include' % cenv.topobjdir,
            '$(EXTRA_INCLUDES)',
        ))
        append_var('DSO_CFLAGS')
        append_var('DSO_PIC_CFLAGS')
        if canonical_suffix in ('.c', '.cpp'):
            db.append('$(RTL_FLAGS)')
        append_var('OS_COMPILE_%s' % self.CFLAGS[canonical_suffix])
        append_var('OS_CPPFLAGS')
        append_var('OS_%s' % self.CFLAGS[canonical_suffix])
        append_var('MOZ_DEBUG_FLAGS')
        append_var('MOZ_OPTIMIZE_FLAGS')
        append_var('MOZ_FRAMEPTR_FLAGS')
        db.append('$(WARNINGS_AS_ERRORS)')
        db.append('$(MOZBUILD_%s)' % self.CFLAGS[canonical_suffix])
        if canonical_suffix == '.m':
            append_var('OS_COMPILE_CMFLAGS')
            db.append('$(MOZBUILD_CMFLAGS)')
        elif canonical_suffix == '.mm':
            append_var('OS_COMPILE_CMMFLAGS')
            db.append('$(MOZBUILD_CMMFLAGS)')
Пример #48
0
    def parse_authors(self, root):
        # Build a dict of authors with their contribution if any in values
        authors_elements = root.xpath(
            "//span[@title='%s']/preceding-sibling::node()" % u'출판사')

        if not authors_elements:
            return

        authors_type_map = OrderedDict()
        #authors_elements_len = len(authors_elements)
        authors_elements.reverse()

        # 거꾸로 검색하면서 "역할"을 할당한다.
        contrib = ''
        for el in authors_elements:
            # print div_authors[n-1]
            #el = authors_elements[n-1]
            self._removeTags(el, ["div", "script", "style"])
            if isinstance(el, lxml.html.HtmlElement) and contrib:
                if el.get("class") != "name": continue
                spliter = ","
                if re.search("detailViewEng", self.url): spliter = "/"
                authors_splits = re.sub("\s{2,}", " ",
                                        el.text_content().strip()).replace(
                                            "/", "/").split(spliter)
                authors_splits.reverse()
                for authors_split in authors_splits:
                    if '(' in authors_split:
                        #log.info('Stripping off series(')
                        authors_split = authors_split.rpartition('(')[0]
                    authors_split = re.sub("(\s외|\s편|著 |\[著\]|編 )", "",
                                           authors_split).strip()
                    if authors_split in authors_type_map.keys():
                        del authors_type_map[authors_split]
                    authors_type_map[authors_split] = contrib
            elif isinstance(el, lxml.etree._ElementUnicodeResult):
                if el.strip():
                    contrib = el.strip()
        item = authors_type_map.items()
        item.reverse()
        authors_type_map = OrderedDict(item)

        # User either requests all authors, or only the primary authors (latter is the default)
        # If only primary authors, only bring them in if:
        # 1. They have no author type specified
        # 2. They have an author type of 'Kyobobook Author'
        # 3. There are no authors from 1&2 and they have an author type of 'Editor'
        get_all_authors = cfg.plugin_prefs[cfg.STORE_NAME][
            cfg.KEY_GET_ALL_AUTHORS]
        authors = []
        valid_contrib = None
        for a, contrib in authors_type_map.iteritems():
            if get_all_authors:
                authors.append(a)
            else:
                if not contrib or contrib == u'지음' or contrib == u'저자':
                    authors.append(a)
                elif len(authors) == 0:
                    authors.append(a)
                    valid_contrib = contrib
                elif contrib == valid_contrib:
                    authors.append(a)
                else:
                    break
        return authors
Пример #49
0
        Exception.__init__(self, msg)


# a dictionary that maps handler type in str to handler type class object
handler_type_mapping = OrderedDict([('Init', InitHandler), \
                        ('Pose', PoseHandler), \
                        ('LocomotionCommand', LocomotionCommandHandler), \
                        ('Drive', DriveHandler), \
                        ('MotionControl', MotionControlHandler), \
                        ('Sensor', SensorHandler), \
                        ('Actuator', ActuatorHandler)
                       ])

# update the dictionary to include bidirectional mapping
handler_type_mapping.update(
    OrderedDict([(v, k) for k, v in handler_type_mapping.iteritems()]))


def getHandlerTypeClass(name):
    """
    Given a handler type name in string, return the corresponding handler class object
    """
    if not isinstance(name, str):
        raise TypeError("Expected handler type name as string")

    # if the name ends with `Handler`, remove it
    name = re.sub('Handler$', '', name)

    try:
        return handler_type_mapping[name]
    except KeyError:
Пример #50
0
    params = OrderedDict()

    params = param_init_fflayer(params, _concat(ff, '1'), 28 * 28, 300)
    params = param_init_fflayer(params, _concat(ff, '2'), 300, 150)

    if train_rou == 'synthetic_gradients':
        params = param_init_sgmod(params, _concat(sg, '1'), 300)
        params = param_init_sgmod(params, _concat(sg, '2'), 150)

    params = param_init_fflayer(params, _concat(ff, 'o'), 150, 10)

else:
    params = np.load(sys.argv[2])

tparams = OrderedDict()
for key, val in params.iteritems():
    tparams[key] = theano.shared(val, name=key)

# Training graph
if len(sys.argv) < 2 or int(sys.argv[1]) == 0:
    print "Constructing the training graph"

    train_data = theano.shared(tri, name='train_data')
    train_labels = theano.shared(trl, name='train_labels')

    # pass a batch of indices
    img_ids = T.vector('ids', dtype='int64')
    img = train_data[img_ids, :]
    lbl = train_labels[img_ids, :]
    if train_rou == 'synthetic_gradients':
        lbl_one_hot = T.extra_ops.to_one_hot(lbl, 10, dtype='float32')
Пример #51
0
                                  baseline=baseline,
                                  name=name,
                                  number=5,
                                  verbose=False)
                    results[name].append(t * 1000000)
                    timings.append("{:13.4f} us".format(t * 1000000))
                except RuntimeError:
                    results[name].append(np.inf)
                    timings.append("{:>16s}".format("recursion errror"))
            print "| " + " | ".join(timings) + " |"

    import pylab as py
    index = np.arange(len(benchmark_names))
    bar_width = 0.7 / len(results)
    colors = iter("brgy")
    for i, (m, result) in enumerate(results.iteritems()):
        py.bar(
            index + bar_width * i,
            result,
            bar_width,
            alpha=0.4,
            color=colors.next(),
            log=True,
            label=m,
        )

    py.xlim(xmax=index[-1] + bar_width * (i + 1))
    py.xlabel('Benchmark')
    py.ylabel('Time (us)')
    py.title('Benchmark of different tailcall optimization schemes')
    py.xticks(index + bar_width, benchmark_names, rotation=70)
Пример #52
0
def delete_namespace(namespace_id, throttle=False, dry_run=False):
    """
    Delete all the data associated with a namespace from the database.
    USE WITH CAUTION.

    NOTE: This function is only called from bin/delete-account-data.
    It prints to stdout.

    Raises AccountDeletionErrror with message if there are problems
    """

    with session_scope(namespace_id) as db_session:
        try:
            account = db_session.query(Account).join(Namespace).filter(
                Namespace.id == namespace_id).one()
        except NoResultFound:
            raise AccountDeletionErrror('Could not find account in database')

        if not account.is_marked_for_deletion:
            raise AccountDeletionErrror(
                'Account is_marked_for_deletion is False. '
                'Change this to proceed with deletion.')
        account_id = account.id
        account_discriminator = account.discriminator

    log.info('Deleting account', account_id=account_id)
    start_time = time.time()

    # These folders are used to configure batch deletion in chunks for
    # specific tables that are prone to transaction blocking during
    # large concurrent write volume.  See _batch_delete
    # NOTE: ImapFolderInfo doesn't reall fall into this category but
    # we include here for simplicity anyway.

    filters = OrderedDict()
    for table in [
            'message', 'block', 'thread', 'transaction', 'actionlog',
            'contact', 'event', 'dataprocessingcache'
    ]:
        filters[table] = ('namespace_id', namespace_id)

    if account_discriminator == 'easaccount':
        filters['easuid'] = ('easaccount_id', account_id)
        filters['easfoldersyncstatus'] = ('account_id', account_id)
    else:
        filters['imapuid'] = ('account_id', account_id)
        filters['imapfoldersyncstatus'] = ('account_id', account_id)
        filters['imapfolderinfo'] = ('account_id', account_id)

    from inbox.ignition import engine_manager
    # Bypass the ORM for performant bulk deletion;
    # we do /not/ want Transaction records created for these deletions,
    # so this is okay.
    engine = engine_manager.get_for_id(namespace_id)

    for cls in filters:
        _batch_delete(engine,
                      cls,
                      filters[cls],
                      throttle=throttle,
                      dry_run=dry_run)

    # Use a single delete for the other tables. Rows from tables which contain
    # cascade-deleted foreign keys to other tables deleted here (or above)
    # are also not always explicitly deleted, except where needed for
    # performance.
    #
    # NOTE: Namespace, Account are deleted at the end too.

    query = 'DELETE FROM {} WHERE {}={};'

    filters = OrderedDict()
    for table in ('category', 'calendar'):
        filters[table] = ('namespace_id', namespace_id)
    for table in ('folder', 'label'):
        filters[table] = ('account_id', account_id)
    filters['namespace'] = ('id', namespace_id)

    for table, (column, id_) in filters.iteritems():
        log.info('Performing bulk deletion', table=table)
        start = time.time()

        if throttle and check_throttle():
            log.info("Throttling deletion")
            gevent.sleep(60)

        if not dry_run:
            engine.execute(query.format(table, column, id_))
        else:
            log.debug(query.format(table, column, id_))

        end = time.time()
        log.info('Completed bulk deletion', table=table, time=end - start)

    # Delete the account object manually to get rid of the various objects
    # associated with it (e.g: secrets, tokens, etc.)
    with session_scope(account_id) as db_session:
        account = db_session.query(Account).get(account_id)
        if dry_run is False:
            db_session.delete(account)
            db_session.commit()

    # Delete liveness data ( heartbeats)
    log.debug('Deleting liveness data', account_id=account_id)
    clear_heartbeat_status(account_id)

    statsd_client.timing('mailsync.account_deletion.queue.deleted',
                         time.time() - start_time)
Пример #53
0
def get_callers(candidates):

    cand_str = candidates.splitlines()
    cand_list = OrderedDict(
    )  # dictionary of address, and the candidate object

    # place all candidates into the above dictionary
    for candidate in cand_str:
        logging.debug("Candidate: {}".format(candidate))
        candidate = candidate.split()
        if isHex(
                candidate[3]
        ):  # Don't add any non-hex function calls, for the broken instructions
            callee_candidate = callee(int(candidate[0], 16),
                                      int(candidate[3], 16))
            cand_list[int(candidate[0], 16)] = callee_candidate

    logging.info("Found {} potential candidates for grouping.".format(
        len(cand_list)))

    # form groupings based off of "close" call groupings
    func = 0x0
    current = 0x0
    call = None
    callers = {}

    for address, candidate in cand_list.iteritems(
    ):  # iterate over items in-order (by address)

        #logging.info("Candidate func address: {}\nCurrent address: {}".format(address, current))

        if (func == 0x0
            ):  # no defined caller, make a new one starting at first address

            func = int(
                address or 0
            )  # current starts at the base address, though functions may start earlier
            current = func
            call = caller(func, candidate)
            call.push(
                address, candidate
            )  # push THIS candidate into the caller (first call of mass caller func)

        elif (
                abs(address - current) <= 0xA
        ):  # a candidate is "close" to another if it is within 10 of the next address
            call.push(address, candidate)  # push a candidate into the caller
            current = (address or 0)

        else:
            if (call.count >= 4
                ):  # if there are less than 5 candiates in the caller, discard
                callers[
                    func] = call  # save the caller object, otherwise overwrite it

            func = int(
                address or 0
            )  # current starts at the base address, though functions may start earlier
            call = caller(func, candidate)
            call.push(
                address, candidate
            )  # push THIS candidate into the caller (first call of mass caller func)
            current = func

    logging.info("Found {} groups of candidates.".format(len(callers)))

    return callers
Пример #54
0
class Router(object):
    """ Represents a router of a chip, with a set of available links.\
        The router is iterable over the links, providing (source_link_id,\
        link) where:

            * source_link_id is the id of a link
            * link is the link with id source_link_id
    """
    ROUTER_DEFAULT_AVAILABLE_ENTRIES = 1024

    ROUTER_DEFAULT_CLOCK_SPEED = 150 * 1024 * 1024

    def __init__(
            self, links, emergency_routing_enabled,
            clock_speed=ROUTER_DEFAULT_CLOCK_SPEED,
            n_available_multicast_entries=ROUTER_DEFAULT_AVAILABLE_ENTRIES):
        """
        :param links: iterable of links
        :type links: iterable of :py:class:`spinn_machine.link.Link`
        :param emergency_routing_enabled: Determines if the router emergency\
                    routing is operating
        :type emergency_routing_enabled: bool
        :param clock_speed: The router clock speed in cycles per second
        :type clock_speed: int
        :param n_available_multicast_entries: The number of entries available\
                    in the routing table
        :type n_available_multicast_entries: int
        :raise spinn_machine.exceptions.SpinnMachineAlreadyExistsException: If\
                    any two links have the same source_link_id
        """
        self._links = OrderedDict()
        for link in links:
            self.add_link(link)

        self._emergency_routing_enabled = emergency_routing_enabled
        self._clock_speed = clock_speed
        self._n_available_multicast_entries = n_available_multicast_entries

    def add_link(self, link):
        """ Add a link to the router of the chip

        :param link: The link to be added
        :type link: :py:class:`spinn_machine.link.Link`
        :return: Nothing is returned
        :rtype: None
        :raise spinn_machine.exceptions.SpinnMachineAlreadyExistsException: If\
                    another link already exists with the same source_link_id
        """
        if link.source_link_id in self._links:
            raise SpinnMachineAlreadyExistsException(
                "link", str(link.source_link_id))
        self._links[link.source_link_id] = link

    def is_link(self, source_link_id):
        """ Determine if there is a link with id source_link_id.\
            Also implemented as __contains__(source_link_id)

        :param source_link_id: The id of the link to find
        :type source_link_id: int
        :return: True if there is a link with the given id, False otherwise
        :rtype: bool
        :raise None: No known exceptions are raised
        """
        return source_link_id in self._links

    def __contains__(self, source_link_id):
        """ See :py:meth:`is_link`
        """
        return self.is_link(source_link_id)

    def get_link(self, source_link_id):
        """ Get the link with the given id, or None if no such link.\
            Also implemented as __getitem__(source_link_id)

        :param source_link_id: The id of the link to find
        :type source_link_id: int
        :return: The link, or None if no such link
        :rtype: :py:class:`spinn_machine.link.Link`
        :raise None: No known exceptions are raised
        """
        if source_link_id in self._links:
            return self._links[source_link_id]
        return None

    def __getitem__(self, source_link_id):
        """ See :py:meth:`get_link`
        """
        return self.get_link(source_link_id)

    @property
    def links(self):
        """ The available links of this router

        :return: an iterable of available links
        :rtype: iterable of :py:class:`spinn_machine.link.Link`
        :raise None: does not raise any known exceptions
        """
        return self._links.itervalues()

    def __iter__(self):
        """ Get an iterable of source link ids and links in the router

        :return: an iterable of tuples of (source_link_id, link) where:
                    * source_link_id is the id of the link
                    * link is a router link
        :rtype: iterable of (int, :py:class:`spinn_machine.link.Link`)
        :raise None: does not raise any known exceptions
        """
        return self._links.iteritems()

    @property
    def emergency_routing_enabled(self):
        """ Indicator of whether emergency routing is enabled

        :return: True if emergency routing is enabled, False otherwise
        :rtype: bool
        :raise None: does not raise any known exceptions
        """
        return self._emergency_routing_enabled

    @property
    def clock_speed(self):
        """ The clock speed of the router in cycles per second

        :return: The clock speed in cycles per second
        :rtype: int
        :raise None: does not raise any known exceptions
        """
        return self._clock_speed

    @property
    def n_available_multicast_entries(self):
        """ The number of available multicast entries in the routing tables

        :return: The number of available entries
        :rtype: int
        :raise None: does not raise any known exceptions
        """
        return self._n_available_multicast_entries

    def __str__(self):
        return (
            "[Router: clock_speed={} MHz, emergency_routing={},"
            "available_entries={}, links={}]".format(
                (self._clock_speed / 1000000),
                self._emergency_routing_enabled,
                self._n_available_multicast_entries, self._links.values()))

    def __repr__(self):
        return self.__str__()

    def get_neighbouring_chips_coords(self):
        """utility method to convert links into x and y coords for placers

        :return: iterable list of destination coords in x and y dictonary
        :rtype: iterable of dict
        :raise None: this method does not raise any known excpetion

        """
        next_hop_chips_coords = list()
        for link in self.links:
            next_hop_chips_coords.append(
                {'x': link.destination_x, 'y': link.destination_y})
        return next_hop_chips_coords
Пример #55
0
                                ]
                            }
    return outliers


pois_features_threshold, non_pois_features_threshold = get_thresholds(5.6)
outliers = detect_outliers(pois_features_threshold,
                           non_pois_features_threshold)
print outliers

for feature in features_list:
    od_pois = OrderedDict(
        sorted(pois.items(), key=lambda x: x[1][feature], reverse=False))
    #    [~np.isnan(A)].mean()
    od_non_pois = OrderedDict(
        sorted(non_pois.items(), key=lambda x: x[1][feature], reverse=False))
    #print "feature: ",feature
    for k, v in od_pois.iteritems():
        #print k
        #print v[feature]
        if k == feature:
            #print v
            m = True
#print od_pois
#my_dataset = od[len(od)*best_results[9]:len(od)*(1-best_results[9])]
#cleaned_pois = dict(od_pois.items()[int(len(od_pois)*best_results[9]):int(len(od_pois)*(1.0-best_results[9]))])
#cleaned_non_pois = dict(od_non_pois.items()[int(len(od_non_pois)*best_results[9]):int(len(od_non_pois)*(1.0-best_results[9]))])
#my_dataset = cleaned_pois.copy()
#my_dataset.update(cleaned_non_pois)
#print "len(my_dataset)",len(my_dataset)
Пример #56
0
def allocations(request):
    allocations_by_hour = OrderedDict()
    filtered_allocations = Allocation.objects.filter()
    timetable = None
    r = request.REQUEST
    timetable = None
    teacher = None
    classroom = None
    activity = None
    group = None
    if 'timetable' in r:
        timetable = Timetable.objects.get(id=r['timetable'])
        filtered_allocations = filtered_allocations.filter(timetable=timetable)
    if 'teacher' in r:
        teacher = Teacher.objects.get(id=r['teacher'])
        filtered_allocations = filtered_allocations.filter(
            activityRealization__teachers__exact=teacher)
    if 'classroom' in r:
        classroom = Classroom.objects.get(id=r['classroom'])
        filtered_allocations = filtered_allocations.filter(classroom=classroom)
    if 'activity' in r:
        activity = Activity.objects.get(id=r['activity'])
        filtered_allocations = filtered_allocations.filter(
            activity__activity=activity)
    groups_listed = [None]
    if 'group' in r:
        group = Group.objects.get(id=r['group'])
        groups_listed = group.children() + [group]

    # Limit the number of allocations in response to MAX_ALLOCATIONS
    filters = ['timetable', 'teacher', 'classroom', 'activity', 'group']
    if not any([filter in r for filter in filters]):
        # Request is not filtered
        # Limit response to 10 allocations
        MAX_ALLOCATIONS = 10
        filtered_allocations = filtered_allocations[:MAX_ALLOCATIONS]

    max_overlaps_day = []
    whdict = dict([(WORKHOURS[i][0], i) for i in range(len(WORKHOURS))])
    for hour in [i[0] for i in WORKHOURS]:
        allocations_by_hour[hour] = [[] for j in WEEKDAYS]
    space_taken_list = []
    Span = namedtuple('Span', ['start', 'duration', 'w', 'action'])
    for day in range(len(WEEKDAYS)):
        # create the set of all activities for this day
        sa = set()
        for g in groups_listed:
            fa = filtered_allocations.filter(day=WEEKDAYS[day][0])
            if g is not None:
                fa = fa.filter(activity__studyGroups__exact=g.id)
            for a in fa:
                if a not in sa:
                    sa.add(a)
        allocation_tab = []
        start_hours = []
        for a in sorted(sa, key=lambda x: whdict[x.start]):
            placed = False
            j = 0
            while (j < len(allocation_tab)) and not placed:
                last = allocation_tab[j][-1]
                last_end = last.start + last.duration
                new_start = whdict[a.start]
                if last_end <= new_start:
                    new_pre_last_duration = new_start - last_end
                    if new_pre_last_duration > 0:
                        allocation_tab[j].append(
                            Span(start=last_end,
                                 duration=new_pre_last_duration,
                                 w=1,
                                 action=None))
                    allocation_tab[j].append(
                        Span(start=new_start,
                             duration=a.duration,
                             w=1,
                             action=a))
                    placed = True
                j += 1
            if not placed:
                l = [
                    Span(start=whdict[a.start],
                         duration=a.duration,
                         w=1,
                         action=a)
                ]
                start_hours.append(whdict[a.start])
                allocation_tab.append(l)
        # expand the allocations and empty spaces
        # insert the heading and trailing empty spaces
        prev_duration = None
        # print start_hours
        if len(allocation_tab) < 1:
            allocation_tab = [[
                Span(start=0, duration=len(whdict), w=1, action=None)
            ]]
        else:
            to_set = None
            for i, l in enumerate(allocation_tab):
                if start_hours[i] > 0:
                    l.insert(
                        0,
                        Span(start=0,
                             duration=start_hours[i],
                             w=1,
                             action=None))
                last = l[-1]
                duration = len(whdict) - (last.start + last.duration)
                if duration > 0:
                    if prev_duration == duration:
                        # print to_set, l
                        s = allocation_tab[to_set][-1]
                        allocation_tab[to_set][-1] = Span(start=s.start,
                                                          duration=s.duration,
                                                          w=s.w + 1,
                                                          action=None)
                    else:
                        s = Span(start=last.start + last.duration,
                                 duration=duration,
                                 w=1,
                                 action=None)
                        l.append(s)
                        to_set = i
                prev_duration = duration
        # insert allocations from allocation_tab into allocations_by_hour
        for l in allocation_tab:
            for j in l:
                allocations_by_hour[WORKHOURS[j.start][0]][day].append(
                    (j.w, j.duration, j.action))
        space_taken_list.append(allocation_tab)
        max_overlaps_day.append(len(allocation_tab))
    daynames = {
        "MON": "Ponedeljek",
        "TUE": 'Torek',
        "WED": "Sreda",
        "THU": "Četrtek",
        "FRI": "Petek"
    }
    day_header = []
    for day in range(len(WEEKDAYS)):
        day_header.append((daynames[WEEKDAYS[day][0]], max_overlaps_day[day]))
    return render_to_response(
        'timetable/allocations.html', {
            'timetable': timetable,
            'teacher': teacher,
            'classroom': classroom,
            'activity': activity,
            'group': group,
            'day_header': day_header,
            'allocations_by_hour': allocations_by_hour.iteritems()
        })
Пример #57
0
def setup_scenarios(gen_data, generators, times):

    col = 'scenariosdirectory'
    scenario_values = pd.Panel()
    if user_config.deterministic_solve or user_config.perfect_solve or \
            (not col in gen_data.columns):
        # a deterministic problem
        return scenario_values

    gen_params = gen_data[gen_data[col].notnull()]
    if len(gen_params) > 1:
        raise NotImplementedError('more than one generator with scenarios.')

    gen = generators[gen_params.index[0]]
    gen.has_scenarios = True

    # directory of scenario values where each file is one day
    scenarios_directory = gen_params['scenariosdirectory'].values[0]

    searchstr = "*.csv"

    filenames = sorted(
        glob(
            joindir(user_config.directory,
                    joindir(scenarios_directory, searchstr))))
    if not filenames:
        raise IOError('no scenario files in "{}"'.format(scenarios_directory))

    alldata = OrderedDict()
    for i, f in enumerate(filenames):
        data = _parse_scenario_day(f)
        # label scenarios for the day with the date
        date = Timestamp(data.columns.drop('probability')[0]).date()
        alldata[date] = data

    # TODO - assumes one hour intervals!!
    hrs = user_config.hours_commitment + user_config.hours_overlap

    # make scenarios into a pd.Panel with axes: day, scenario, {prob, [hours]}
    scenario_values = pd.Panel(
        items=alldata.keys(),
        major_axis=range(max([len(dat) for dat in alldata.values()])),
        minor_axis=['probability'] + range(hrs))

    for day, scenarios in alldata.iteritems():
        if 'probability' == scenarios.columns[-1]:
            # reoder so that probability is the first column
            scenarios = scenarios[scenarios.columns[:-1].insert(
                0, 'probability')]
        # rename the times into just hour offsets
        scenarios = scenarios.rename(columns=dict(
            zip(scenarios.columns, ['probability'] +
                range(len(scenarios.columns) - 1))))

        # and take the number of hours needed
        scenarios = scenarios[scenarios.columns[:1 + hrs]]

        scenario_values[day] = scenarios

    if user_config.wind_multiplier != 1.0:
        scenario_values *= user_config.wind_multiplier
        svt = scenario_values.transpose(2, 1, 0)
        svt['probability'] *= 1 / user_config.wind_multiplier
        scenario_values = svt.transpose(2, 1, 0)

    gen.scenario_values = scenario_values
    # defer scenario tree construction until actual time stage starts
    return scenario_values
Пример #58
0
 def dumpbin( self ):
     """
         Return a binary dump of the NIST object. Writable in a file ("wb" mode).
         
         :return: Binary representation of the NIST object.
         :rtype: str
     """
     debug.info( "Dumping NIST in binary" )
     
     self.clean()
     self.patch_to_standard()
     
     outnist = []
     
     for ntype in self.get_ntype():
         for idc in self.get_idc( ntype ):
             if ntype == 4:
                 outnist.append( int_to_binstring( int( self.data[ ntype ][ idc ][ 1 ] ), 4 * 8 ) )
                 outnist.append( int_to_binstring( int( self.data[ ntype ][ idc ][ 2 ] ), 1 * 8 ) )
                 outnist.append( int_to_binstring( int( self.data[ ntype ][ idc ][ 3 ] ), 1 * 8 ) )
                 outnist.append( int_to_binstring( int( self.data[ ntype ][ idc ][ 4 ] ), 1 * 8 ) )
                 outnist.append( ( chr( 0xFF ) * 5 ) )
                 outnist.append( int_to_binstring( int( self.data[ ntype ][ idc ][ 5 ] ), 1 * 8 ) )
                 outnist.append( int_to_binstring( int( self.data[ ntype ][ idc ][ 6 ] ), 2 * 8 ) )
                 outnist.append( int_to_binstring( int( self.data[ ntype ][ idc ][ 7 ] ), 2 * 8 ) )
                 outnist.append( int_to_binstring( int( self.data[ ntype ][ idc ][ 8 ] ), 1 * 8 ) )
                 outnist.append( self.data[ ntype ][ idc ][ 999 ] )
             else:
                 od = OrderedDict( sorted( self.data[ ntype ][ idc ].items() ) )
                 outnist.append( join( GS, [ tagger( ntype, tagid ) + value for tagid, value in od.iteritems() ] ) + FS )
     
     return "".join( outnist )
Пример #59
0
class Environment(object):
    """Environment within which all agents operate."""

    valid_actions = [None, 'forward', 'left', 'right']
    valid_inputs = {
        'light': TrafficLight.valid_states,
        'oncoming': valid_actions,
        'left': valid_actions,
        'right': valid_actions
    }
    valid_headings = [(1, 0), (0, -1), (-1, 0), (0, 1)]  # E, N, W, S
    hard_time_limit = -100  # Set a hard time limit even if deadline is not enforced.

    def __init__(self, verbose=False, num_dummies=100, grid_size=(8, 6)):
        self.num_dummies = num_dummies  # Number of dummy driver agents in the environment
        self.verbose = verbose  # If debug output should be given

        # Initialize simulation variables
        self.done = False
        self.t = 0
        self.agent_states = OrderedDict()
        self.step_data = {}
        self.success = None

        # Road network
        self.grid_size = grid_size  # (columns, rows)
        self.bounds = (1, 2, self.grid_size[0], self.grid_size[1] + 1)
        self.block_size = 100
        self.hang = 0.6
        self.intersections = OrderedDict()
        self.roads = []
        for x in xrange(self.bounds[0], self.bounds[2] + 1):
            for y in xrange(self.bounds[1], self.bounds[3] + 1):
                self.intersections[(x, y)] = TrafficLight(
                )  # A traffic light at each intersection

        for a in self.intersections:
            for b in self.intersections:
                if a == b:
                    continue
                if (abs(a[0] - b[0]) +
                        abs(a[1] - b[1])) == 1:  # L1 distance = 1
                    self.roads.append((a, b))

        # Add environment boundaries
        for x in xrange(self.bounds[0], self.bounds[2] + 1):
            self.roads.append(
                ((x, self.bounds[1] - self.hang), (x, self.bounds[1])))
            self.roads.append(
                ((x, self.bounds[3] + self.hang), (x, self.bounds[3])))
        for y in xrange(self.bounds[1], self.bounds[3] + 1):
            self.roads.append(
                ((self.bounds[0] - self.hang, y), (self.bounds[0], y)))
            self.roads.append(
                ((self.bounds[2] + self.hang, y), (self.bounds[2], y)))

        # Create dummy agents
        for i in xrange(self.num_dummies):
            self.create_agent(DummyAgent)

        # Primary agent and associated parameters
        self.primary_agent = None  # to be set explicitly
        self.enforce_deadline = False

        # Trial data (updated at the end of each trial)
        self.trial_data = {
            'testing': False,  # if the trial is for testing a learned policy
            'initial_distance': 0,  # L1 distance from start to destination
            'initial_deadline': 0,  # given deadline (time steps) to start with
            'net_reward': 0.0,  # total reward earned in current trial
            'final_deadline':
            None,  # deadline value (time remaining) at the end
            'actions': {
                0: 0,
                1: 0,
                2: 0,
                3: 0,
                4: 0
            },  # violations and accidents
            'success': 0  # whether the agent reached the destination in time
        }

    def create_agent(self, agent_class, *args, **kwargs):
        """ When called, create_agent creates an agent in the environment. """

        agent = agent_class(self, *args, **kwargs)
        self.agent_states[agent] = {
            'location': random.choice(self.intersections.keys()),
            'heading': (0, 1)
        }
        return agent

    def set_primary_agent(self, agent, enforce_deadline=False):
        """ When called, set_primary_agent sets 'agent' as the primary agent.
            The primary agent is the smartcab that is followed in the environment. """

        self.primary_agent = agent
        agent.primary_agent = True
        self.enforce_deadline = enforce_deadline

    def reset(self, testing=False, trial=1):
        """ This function is called at the beginning of a new trial. """

        self.done = False
        self.t = 0

        # Reset status text
        self.step_data = {}

        # Reset traffic lights
        for traffic_light in self.intersections.itervalues():
            traffic_light.reset()

        # Pick a start and a destination
        start = random.choice(self.intersections.keys())
        destination = random.choice(self.intersections.keys())

        # Ensure starting location and destination are not too close
        while self.compute_dist(start, destination) < 4:
            start = random.choice(self.intersections.keys())
            destination = random.choice(self.intersections.keys())

        start_heading = random.choice(self.valid_headings)
        distance = self.compute_dist(start, destination)
        deadline = distance * 5  # 5 time steps per intersection away
        if (self.verbose == True):  # Debugging
            print "Environment.reset(): Trial set up with start = {}, destination = {}, deadline = {}".format(
                start, destination, deadline)

        # Create a map of all possible initial positions
        positions = dict()
        for location in self.intersections:
            positions[location] = list()
            for heading in self.valid_headings:
                positions[location].append(heading)

        # Initialize agent(s)
        for agent in self.agent_states.iterkeys():

            if agent is self.primary_agent:
                self.agent_states[agent] = {
                    'location': start,
                    'heading': start_heading,
                    'destination': destination,
                    'deadline': deadline
                }
            # For dummy agents, make them choose one of the available
            # intersections and headings still in 'positions'
            else:
                intersection = random.choice(positions.keys())
                heading = random.choice(positions[intersection])
                self.agent_states[agent] = {
                    'location': intersection,
                    'heading': heading,
                    'destination': None,
                    'deadline': None
                }
                # Now delete the taken location and heading from 'positions'
                positions[intersection] = list(
                    set(positions[intersection]) - set([heading]))
                if positions[intersection] == list(
                ):  # No headings available for intersection
                    del positions[
                        intersection]  # Delete the intersection altogether

            agent.reset(destination=(destination
                                     if agent is self.primary_agent else None),
                        testing=testing,
                        trial=trial)
            if agent is self.primary_agent:
                # Reset metrics for this trial (step data will be set during the step)
                self.trial_data['testing'] = testing
                self.trial_data['initial_deadline'] = deadline
                self.trial_data['final_deadline'] = deadline
                self.trial_data['net_reward'] = 0.0
                self.trial_data['actions'] = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0}
                self.trial_data['parameters'] = {
                    'e': agent.epsilon,
                    'a': agent.alpha
                }
                self.trial_data['success'] = 0

    def step(self):
        """ This function is called when a time step is taken turing a trial. """

        # Pretty print to terminal
        print ""
        print "/-------------------"
        print "| Step {} Results".format(self.t)
        print "\-------------------"
        print ""

        if (self.verbose == True):  # Debugging
            print "Environment.step(): t = {}".format(self.t)

        # Update agents, primary first
        if self.primary_agent is not None:
            self.primary_agent.update()

        for agent in self.agent_states.iterkeys():
            if agent is not self.primary_agent:
                agent.update()

        # Update traffic lights
        for intersection, traffic_light in self.intersections.iteritems():
            traffic_light.update(self.t)

        if self.primary_agent is not None:
            # Agent has taken an action: reduce the deadline by 1
            agent_deadline = self.agent_states[
                self.primary_agent]['deadline'] - 1
            self.agent_states[self.primary_agent]['deadline'] = agent_deadline

            if agent_deadline <= self.hard_time_limit:
                self.done = True
                self.success = False
                if self.verbose:  # Debugging
                    print "Environment.step(): Primary agent hit hard time limit ({})! Trial aborted.".format(
                        self.hard_time_limit)
            elif self.enforce_deadline and agent_deadline <= 0:
                self.done = True
                self.success = False
                if self.verbose:  # Debugging
                    print "Environment.step(): Primary agent ran out of time! Trial aborted."

        self.t += 1

    def sense(self, agent):
        """ This function is called when information is requested about the sensor
            inputs from an 'agent' in the environment. """

        assert agent in self.agent_states, "Unknown agent!"

        state = self.agent_states[agent]
        location = state['location']
        heading = state['heading']
        light = 'green' if (self.intersections[location].state
                            and heading[1] != 0) or (
                                (not self.intersections[location].state)
                                and heading[0] != 0) else 'red'

        # Populate oncoming, left, right
        oncoming = None
        left = None
        right = None
        for other_agent, other_state in self.agent_states.iteritems():
            if agent == other_agent or location != other_state['location'] or (
                    heading[0] == other_state['heading'][0]
                    and heading[1] == other_state['heading'][1]):
                continue
            # For dummy agents, ignore the primary agent
            # This is because the primary agent is not required to follow the waypoint
            if other_agent == self.primary_agent:
                continue
            other_heading = other_agent.get_next_waypoint()
            if (heading[0] * other_state['heading'][0] +
                    heading[1] * other_state['heading'][1]) == -1:
                if oncoming != 'left':  # we don't want to override oncoming == 'left'
                    oncoming = other_heading
            elif (heading[1] == other_state['heading'][0]
                  and -heading[0] == other_state['heading'][1]):
                if right != 'forward' and right != 'left':  # we don't want to override right == 'forward or 'left'
                    right = other_heading
            else:
                if left != 'forward':  # we don't want to override left == 'forward'
                    left = other_heading

        return {
            'light': light,
            'oncoming': oncoming,
            'left': left,
            'right': right
        }

    def get_deadline(self, agent):
        """ Returns the deadline remaining for an agent. """

        return self.agent_states[agent][
            'deadline'] if agent is self.primary_agent else None

    def act(self, agent, action):
        """ Consider an action and perform the action if it is legal.
            Receive a reward for the agent based on traffic laws. """

        assert agent in self.agent_states, "Unknown agent!"
        assert action in self.valid_actions, "Invalid action!"

        state = self.agent_states[agent]
        location = state['location']
        heading = state['heading']
        light = 'green' if (self.intersections[location].state
                            and heading[1] != 0) or (
                                (not self.intersections[location].state)
                                and heading[0] != 0) else 'red'
        inputs = self.sense(agent)

        # Assess whether the agent can move based on the action chosen.
        # Either the action is okay to perform, or falls under 4 types of violations:
        # 0: Action okay
        # 1: Minor traffic violation
        # 2: Major traffic violation
        # 3: Minor traffic violation causing an accident
        # 4: Major traffic violation causing an accident
        violation = 0

        # Reward scheme
        # First initialize reward uniformly random from [-1, 1]
        reward = 2 * random.random() - 1

        # Create a penalty factor as a function of remaining deadline
        # Scales reward multiplicatively from [0, 1]
        fnc = self.t * 1.0 / (
            self.t + state['deadline']) if agent.primary_agent else 0.0
        gradient = 10

        # No penalty given to an agent that has no enforced deadline
        penalty = 0

        # If the deadline is enforced, give a penalty based on time remaining
        if self.enforce_deadline:
            penalty = (math.pow(gradient, fnc) - 1) / (gradient - 1)

        # Agent wants to drive forward:
        if action == 'forward':
            if light != 'green':  # Running red light
                violation = 2  # Major violation
                if inputs['left'] == 'forward' or inputs[
                        'right'] == 'forward':  # Cross traffic
                    violation = 4  # Accident

        # Agent wants to drive left:
        elif action == 'left':
            if light != 'green':  # Running a red light
                violation = 2  # Major violation
                if inputs['left'] == 'forward' or inputs[
                        'right'] == 'forward':  # Cross traffic
                    violation = 4  # Accident
                elif inputs[
                        'oncoming'] == 'right':  # Oncoming car turning right
                    violation = 4  # Accident
            else:  # Green light
                if inputs['oncoming'] == 'right' or inputs[
                        'oncoming'] == 'forward':  # Incoming traffic
                    violation = 3  # Accident
                else:  # Valid move!
                    heading = (heading[1], -heading[0])

        # Agent wants to drive right:
        elif action == 'right':
            if light != 'green' and inputs[
                    'left'] == 'forward':  # Cross traffic
                violation = 3  # Accident
            else:  # Valid move!
                heading = (-heading[1], heading[0])

        # Agent wants to perform no action:
        elif action == None:
            if light == 'green':
                violation = 1  # Minor violation

        # Did the agent attempt a valid move?
        if violation == 0:
            if action == agent.get_next_waypoint(
            ):  # Was it the correct action?
                reward += 2 - penalty  # (2, 1)
            elif action == None and light != 'green' and agent.get_next_waypoint(
            ) == 'right':
                # valid action but incorrect (idling at red light, when we should have gone right on red)
                reward += 1 - penalty  # (1, 0)
            elif action == None and light != 'green':  # Was the agent stuck at a red light?
                reward += 2 - penalty  # (2, 1)
            else:  # Valid but incorrect
                reward += 1 - penalty  # (1, 0)

            # Move the agent
            if action is not None:
                location = (
                    (location[0] + heading[0] - self.bounds[0]) %
                    (self.bounds[2] - self.bounds[0] + 1) + self.bounds[0],
                    (location[1] + heading[1] - self.bounds[1]) %
                    (self.bounds[3] - self.bounds[1] + 1) + self.bounds[1]
                )  # wrap-around
                state['location'] = location
                state['heading'] = heading
        # Agent attempted invalid move
        else:
            if violation == 1:  # Minor violation
                reward += -5
            elif violation == 2:  # Major violation
                reward += -10
            elif violation == 3:  # Minor accident
                reward += -20
            elif violation == 4:  # Major accident
                reward += -40

        # Did agent reach the goal after a valid move?
        if agent is self.primary_agent:
            if state['location'] == state['destination']:
                # Did agent get to destination before deadline?
                if state['deadline'] >= 0:
                    self.trial_data['success'] = 1

                # Stop the trial
                self.done = True
                self.success = True

                if (self.verbose == True):  # Debugging
                    print "Environment.act(): Primary agent has reached destination!"

            if (self.verbose == True):  # Debugging
                print "Environment.act() [POST]: location: {}, heading: {}, action: {}, reward: {}".format(
                    location, heading, action, reward)

            # Update metrics
            self.step_data['t'] = self.t
            self.step_data['violation'] = violation
            self.step_data['state'] = agent.get_state()
            self.step_data['deadline'] = state['deadline']
            self.step_data['waypoint'] = agent.get_next_waypoint()
            self.step_data['inputs'] = inputs
            self.step_data['light'] = light
            self.step_data['action'] = action
            self.step_data['reward'] = reward

            self.trial_data['final_deadline'] = state['deadline'] - 1
            self.trial_data['net_reward'] += reward
            self.trial_data['actions'][violation] += 1

            if (self.verbose == True):  # Debugging
                print "Environment.act(): Step data: {}".format(self.step_data)
        return reward

    def compute_dist(self, a, b):
        """ Compute the Manhattan (L1) distance of a spherical world. """

        dx1 = abs(b[0] - a[0])
        dx2 = abs(self.grid_size[0] - dx1)
        dx = dx1 if dx1 < dx2 else dx2

        dy1 = abs(b[1] - a[1])
        dy2 = abs(self.grid_size[1] - dy1)
        dy = dy1 if dy1 < dy2 else dy2

        return dx + dy
Пример #60
0
def process_data(RMticket, Application, affiliate, Prod_status):
    #Connecting to LGDOP mongoDB
    connection = pymongo.MongoClient(
        'mongodb://' + clarify_mongo_user + ':' + clarify_mongo_pwd +
        '@mongodb:27017/libertyglobal-bss-clarify?ssl=false')
    db = connection['libertyglobal-bss-clarify']
    coll_name = affiliate + '-ci'
    print RMticket
    print coll_name
    collection_ci = db[coll_name]
    dict_output_list = OrderedDict()  #Final ouput
    #Getting list of files in RMticket from CI collection
    list_files = []
    for doc in collection_ci.find():
        for each_rm in doc['RM_ID'].keys():
            if each_rm == RMticket:
                for each_file in doc['RM_ID'][each_rm]['component'].keys():
                    if each_file not in list_files:
                        list_files.append(each_file)
    #Getting Dependent RMs list in ci collection
    dict_ci = {}
    for doc in collection_ci.find():
        for each_rm in doc['RM_ID'].keys():
            for each_file in doc['RM_ID'][each_rm]['component'].keys():
                if each_file in list_files:
                    dict_ci[each_rm] = doc['RM_ID'][each_rm]['build_time']

    print dict_ci
    print db
    print affiliate
    sorted_patches_dict = OrderedDict()
    sorted_patches_dict = sorted_dict(dict_ci)
    print 'sorted_patches_dict:-'
    print sorted_patches_dict
    prod_sorted_patches = OrderedDict()
    prod_sorted_patches = Env_patches_list(db, affiliate, 'PROD',
                                           sorted_patches_dict)
    print 'prod_sorted_patches:-'
    print prod_sorted_patches

    for key, value in prod_sorted_patches.iteritems():
        element = sorted_patches_dict.pop(key, 'None')

    uat_sorted_patches = OrderedDict()
    uat_sorted_patches = Env_patches_list(db, affiliate, 'UAT',
                                          sorted_patches_dict)
    print 'uat_sorted_patches:-'
    print uat_sorted_patches

    for key, value in uat_sorted_patches.iteritems():
        element = sorted_patches_dict.pop(key, 'None')

    sit_sorted_patches = OrderedDict()
    sit_sorted_patches = Env_patches_list(db, affiliate, 'JIT',
                                          sorted_patches_dict)
    print 'sit_sorted_patches:-'
    print sit_sorted_patches

    for key, value in sit_sorted_patches.iteritems():
        element = sorted_patches_dict.pop(key, 'None')

    print sorted_patches_dict

    if Prod_status == 'Yes':
        dict_output_list['PROD'] = prod_sorted_patches

    dict_output_list['UAT'] = uat_sorted_patches
    dict_output_list['JIT'] = sit_sorted_patches
    dict_output_list['Not Installed in JIT&UAT&PROD'] = sorted_patches_dict

    bolstatus = 0
    for key, value in dict_output_list.iteritems():
        if key == 'PROD' or key == 'UAT' or key == 'JIT':
            for key1, value1 in value.iteritems():
                if key1 == RMticket:
                    bolstatus = 1
                    value[key1] = value1 + '-**-' + 'Current'
                else:
                    if bolstatus == 1:
                        value[key1] = value1 + '-**-' + 'Before'
                    else:
                        value[key1] = value1 + '-**-' + 'After'

    return dict_output_list