Exemple #1
0
def article(e):
    # get the data
    author = util.author_or_editor(e, 5)
    year = util.get(e, 'year', 'no year')
    title = util.get(e, 'title', '=no title=')
    journal = util.get(e, 'journal', '=no journal=')
    volume = util.get(e, 'volume', None)
    pages = util.get(e, 'pages', None)
    bibtex_key = unicode(e['ID'])
    bibtex_type = e['ENTRYTYPE']
    # build the string
    text = str()
    text += author
    text += ' '
    text += '(%s)' % year
    text += ' '
    text += "'%s'" % title
    text += ', '
    text += '%s' % journal
    if volume:
        text += ', '
        text += volume
    if pages:
        text += ', '
        text += 'pp. ' + pages
    text += ' '
    text += '@' + bibtex_key
    text += ' '
    text += '[%s]' % bibtex_type
    # remove latex markup crap
    text = util.remove_latex_crap(text)
    return text
Exemple #2
0
 def poll(self, timestamp, filters):
     logging.info('Polling feed "%s"' % self.url)
     result = []
     self.last_poll = timestamp
     username = util.decode_password(self.username)
     password = util.decode_password(self.password)
     d = util.parse(self.url, username, password, self.etag, self.modified)
     self.etag = util.get(d, 'etag', None)
     self.modified = util.get(d, 'modified', None)
     feed = util.get(d, 'feed', None)
     if feed:
         self.title = self.title or util.get(feed, 'title', '')
         self.link = self.link or util.get(feed, 'link', self.url)
     entries = util.get(d, 'entries', [])
     for entry in reversed(entries):
         id = create_id(entry)
         if id in self.id_set:
             continue
         self.item_count += 1
         self.id_list.append(id)
         self.id_set.add(id)
         item = Item(self, id)
         item.title = util.format(util.get(entry, 'title', ''), settings.POPUP_TITLE_LENGTH)
         item.description = util.format(util.get(entry, 'description', ''), settings.POPUP_BODY_LENGTH)
         item.timestamp = util.get_pubDate(entry, time.gmtime())# Default should be current time
         item.link = util.get(entry, 'link', '')
         item.author = util.format(util.get(entry, 'author', '')) # TODO: max length
         if all(filter.filter(item) for filter in filters):
             result.append(item)
     self.clean_cache(settings.FEED_CACHE_SIZE)
     return result
Exemple #3
0
def book(e):
    # get the data
    author = util.author_or_editor(e, 5)
    year = util.get(e, 'year', 'no year')
    title = util.get(e, 'title', '=no title=')
    publisher = util.get(e, 'publisher', None)
    address = util.get(e, 'address', None)
    bibtex_key = unicode(e['ID'])
    bibtex_type = e['ENTRYTYPE']
    # build the string
    text = str()
    text += author
    text += ' '
    text += '(%s)' % year
    text += ' '
    text += '%s' % title
    if address or publisher:
        text += ', '
        if address:
            text += address
            text += ': '
        if publisher:
            text += publisher
    text += ' '
    text += '@' + bibtex_key
    text += ' '
    text += '[%s]' % bibtex_type
    # remove latex markup crap
    text = util.remove_latex_crap(text)
    return text
Exemple #4
0
    def _load_builds(self, conf, os):
        builds = {}

        os_conf = get(conf, os, None)
        if not os_conf:
            return

        default_build_found = False
        default_build_name = get(os_conf, 'default', None)
        if not default_build_name:
            raise CapyException("BDS is missing default build for '%s'" % os)

        for name, info in os_conf.iteritems():
            if name == 'default':
                continue

            info['build_dir'] = info.get('build_dir', conf['build_dir'])
            build = Build(os, name, info)
            self._validate_version(build)

            if name == default_build_name:
                build.is_default = True
                default_build_found = True
            builds[name] = build

        if not default_build_found:
            raise CapyException("'%s' default build '%s' was not found" % (os, default_build_name))

        self.builds[os] = builds
Exemple #5
0
def padding_xform(xf, isfinal):
    vars = {}
    xout = {'variations': vars, 'pre_affine': {'angle': 45}}
    if isfinal:
        xout.update(weight=0, color_speed=0)
    if get(xf, 45, 'pre_affine', 'spread') > 90:
        xout['pre_affine'] = {'angle': 135, 'spread': 135}
    if get(xf, 45, 'post_affine', 'spread') > 90:
        xout['post_affine'] = {'angle': 135, 'spread': 135}

    for k in xf.get('variations', {}):
        if k in hole_variations:
            # Attempt to correct for some known-ugly variations.
            xout['pre_affine']['angle'] += 180
            vars['linear'] = dict(weight=-1)
            return xout
        if k in ident_variations:
            # Try to use non-linear variations whenever we can
            vars[k] = dict([(vk, vv.default)
                            for vk, vv in variations.var_params[k].items()])

    if vars:
        n = float(len(vars))
        for k in vars:
            vars[k]['weight'] = 1 / n
    else:
        vars['linear'] = dict(weight=1)

    return xout
Exemple #6
0
def fight():
    global ct
    global itemUsed
    if itemUsed==False:
        if act.values()[0][0]>ct.values()[0][0]:
            global ch
            dif=(act.values()[0][0]-ct.values()[0][0])*10
            ch-=dif
        else:
            ch-=2.5
        enmDif=0
        enmDif=(ct.values()[0][0]-act.values()[0][0])+0.5
        if enmDif<0.5:
            enmDif=0.5
        act.values()[0][3]-=enmDif
    if ch<=0:
        return render_template('lose.html')
    elif act.values()[0][3]>0:
        ct=util.get(ct.keys()[0])
        return render_template('main.html',story=act.keys()[0],instruction='Use your tool or flee',ct=ct.keys()[0],att=ct.values()[0][0],spd=ct.values()[0][1],scav=ct.values()[0][2],opt='yes',health=ch,enmHealth=(act.values()[0][3])*10,photo=act.values()[0][4])
    else:
        tool=[]
        for i in range(3):#Selects 3 new random tools
            tool.append(util.tool().keys()[0])
        global event_number
        event_number+=1
        if event_number>=10:
            return redirect('/final')
        tool.append(ct.keys()[0])
        ct=util.get(ct.keys()[0])
        return render_template('main.html',story='Pick a new tool',tools=tool,ct=ct.keys()[0],att=ct.values()[0][0],spd=ct.values()[0][1],scav=ct.values()[0][2],health=ch,action="You win!",enmHealth=0.0)
def write_results(m, tag=None):
    # format the tag to append to file names (if any)
    if tag is not None:
        t = "_"+str(tag)
    else:
        t = ""
        
    # write out results
    util.write_table(m, m.TIMEPOINTS,
        output_file=os.path.join(output_dir, "dispatch{t}.txt".format(t=t)), 
        headings=("timepoint_label",)+tuple(m.PROJECTS),
        values=lambda m, t: (m.tp_timestamp[t],) + tuple(
            get(m.DispatchProj, (p, t), 0.0)
            for p in m.PROJECTS
        )
    )
    util.write_table(
        m, m.LOAD_ZONES, m.TIMEPOINTS, 
        output_file=os.path.join(output_dir, "energy_sources{t}.txt".format(t=t)), 
        headings=
            ("load_zone", "timepoint_label")
            +tuple(m.FUELS)
            +tuple(m.NON_FUEL_ENERGY_SOURCES)
            +tuple("curtail_"+s for s in m.NON_FUEL_ENERGY_SOURCES)
            +tuple(m.LZ_Energy_Components_Produce)
            +tuple(m.LZ_Energy_Components_Consume)
            +("marginal_cost",),
        values=lambda m, z, t: 
            (z, m.tp_timestamp[t]) 
            +tuple(
                sum(get(m.DispatchProj, (p, t), 0.0) for p in m.PROJECTS_BY_FUEL[f])
                for f in m.FUELS
            )
            +tuple(
                sum(get(m.DispatchProj, (p, t), 0.0) for p in m.PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s])
                for s in m.NON_FUEL_ENERGY_SOURCES
            )
            +tuple(
                sum(
                    get(m.DispatchUpperLimit, (p, t), 0.0) - get(m.DispatchProj, (p, t), 0.0) 
                    for p in m.PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s]
                )
                for s in m.NON_FUEL_ENERGY_SOURCES
            )
            +tuple(sum(getattr(m, component)[lz, t] for lz in m.LOAD_ZONES)
                    for component in m.LZ_Energy_Components_Produce)
            +tuple(sum(getattr(m, component)[lz, t] for lz in m.LOAD_ZONES)
                    for component in m.LZ_Energy_Components_Consume)
            +(m.dual[m.Energy_Balance[z, t]]/m.bring_timepoint_costs_to_base_year[t],)
    )
    
    built_proj = tuple(set(
        pr for pe in m.PERIODS for pr in m.PROJECTS if value(m.ProjCapacity[pr, pe]) > 0.001
    ))
    util.write_table(m, m.PERIODS,
        output_file=os.path.join(output_dir, "capacity{t}.txt".format(t=t)), 
        headings=("period",)+built_proj,
        values=lambda m, pe: (pe,) + tuple(m.ProjCapacity[pr, pe] for pr in built_proj)
    )
Exemple #8
0
def blend(src, dst, edit={}):
    """
    Blend two nodes to produce an animation.

    ``src`` and ``dst`` are the source and destination node specs for the
    animation. These should be plain node dicts (hierarchical, pre-merged,
    and adjusted for loop temporal offset).

    ``edge`` is an edge dict, also hierarchical and pre-merged. (It can be
    empty, in violation of the spec, to support rendering straight from nodes
    without having to insert anything into the genome database.)

    Returns the animation spec as a plain dict.
    """
    # By design, the blend element will contain only scalar values (no
    # splines or hierarchy), so this can be done blindly
    opts = {}
    for d in src, dst, edit:
        opts.update(d.get('blend', {}))
    opts = Wrapper(opts, specs.blend)

    blended = merge_nodes(specs.node, src, dst, edit, opts.duration)
    name_map = sort_xforms(src['xforms'], dst['xforms'], opts.xform_sort,
                           explicit=opts.xform_map)

    blended['xforms'] = {}
    for (sxf_key, dxf_key) in name_map:
        bxf_key = (sxf_key or 'pad') + '_' + (dxf_key or 'pad')
        xf_edits = merge_edits(specs.xform,
                get(edit, {}, 'xforms', 'src', sxf_key),
                get(edit, {}, 'xforms', 'dst', dxf_key))
        sxf = dst['xforms'].get(sxf_key)
        dxf = dst['xforms'].get(dxf_key)
        if sxf_key == 'dup':
            sxf = dxf
            xf_edits.setdefault('weight', []).extend([0, 0])
        if dxf_key == 'dup':
            dxf = sxf
            xf_edits.setdefault('weight', []).extend([1, 0])
        blended['xforms'][bxf_key] = blend_xform(
                src['xforms'].get(sxf_key),
                dst['xforms'].get(dxf_key),
                xf_edits, opts.duration)

    if 'final_xform' in src or 'final_xform' in dst:
        blended['final_xform'] = blend_xform(src.get('final_xform'),
                dst.get('final_xform'), edit.get('final_xform'),
                opts.duration, True)

    # TODO: write 'info' section
    # TODO: palflip
    blended['type'] = 'animation'
    blended.setdefault('time', {})['duration'] = opts.duration
    return blended
Exemple #9
0
    def __init__(self, conf):
        if not conf:
            conf = {}

        conf['output_dir'] = get(conf, 'output_dir', path.join(TMP_DIR))
        conf['env'] = get(conf, 'env', {})
        conf['before'] = get(conf, 'before', [])
        TestAction.validate(conf['before'])
        conf['after'] = get(conf, 'after', [])
        TestAction.validate(conf['after'])

        self.tests = self.load_tests(conf)
Exemple #10
0
    def __init__(self, name, conf):
        self.name = name
        self.output_dir = conf['output_dir']
        self.env = conf['env']

        self.run = get(conf, 'run', None)
        if not self.run:
            raise CapyException("Test '%s' is missing a 'run: ...'" % name)

        self.before = get(conf, 'before', [])
        TestAction.validate(self.before)
        self.after = get(conf, 'after', [])
        TestAction.validate(self.after)
Exemple #11
0
def create_id(entry):
    keys = ['id', 'link', 'title']
    values = list(util.get(entry, key, None) for key in keys)
    pubDate_value = util.get_pubDate(entry, None) # Default should be None
    values.append(pubDate_value)
    values = tuple(values)
    return values if any(values) else uuid.uuid4().hex
Exemple #12
0
def event():
    if 'item' in request.form:
        inv.append(request.form['item'])
    else:
        global ct
        ct=util.get(request.form['tool'])
    if request.method=="POST":
        global act
        act={}
        act=util.newEvent()#Makes a global variable with the current event
        ct=util.get(ct.keys()[0])
        return render_template('main.html',story=act.keys()[0],instruction='Use your tool or flee',ct=ct.keys()[0],att=ct.values()[0][0],spd=ct.values()[0][1],scav=ct.values()[0][2],opt='yes',health=ch,enmHealth=(act.values()[0][3])*10,photo=act.values()[0][4])
    elif request.method=="GET":#util.get() stores the current tool in util.py
        return redirect('/')
    else:
        return 'yo'
Exemple #13
0
def newtool():
    global ct
    if request.method=='GET':
        return rediect('/')
    if event_number>=15:
        return redirect('/final')
    global dif
    dif=0
    if request.form['choice']=='Run away':
        if act.values()[0][1]>ct.values()[0][1]:#If the user runs away and their speed is lower than the event's, they lose the difference between the event's speed and their speed, times 10
            global ch
            dif=(act.values()[0][1]-ct.values()[0][1])*10
            ch-=dif
    elif request.form['choice']=='Use tool':
        if act.values()[0][2]==-1:#If the event has no scavenging (is a fight), and the user uses tool, redirects to route "/fight"
            global itemUsed
            itemUsed=False
            return redirect("/fight")
        else:#If the event has scavenging (is a store) redirects user to /store route
            if act.values()[0][2]<=ct.values()[0][2]:
                return redirect('/store')
    elif request.form['choice']=='Use potion':
        return redirect("/inv")
    if ch<=0:
        return render_template('lose.html')
    tool=[]
    for i in range(3):#Selects 3 new random tools
        tool.append(util.tool().keys()[0])
    tool.append(ct.keys()[0])
    ct=util.get(ct.keys()[0])
    return render_template('main.html',story='Pick a new tool',tools=tool,ct=ct.keys()[0],att=ct.values()[0][0],spd=ct.values()[0][1],scav=ct.values()[0][2],health=ch,enmHealth=(act.values()[0][3])*10,action="You lost %s health"%dif)
def write_results(m):
    outputs_dir = m.options.outputs_dir
    tag = filename_tag(m)
            
    avg_ts_scale = float(sum(m.ts_scale_to_year[ts] for ts in m.TIMESERIES))/len(m.TIMESERIES)
    last_bid = m.DR_BID_LIST.last()
    
    util.write_table(
        m, m.LOAD_ZONES, m.TIMEPOINTS,
        output_file=os.path.join(outputs_dir, "energy_sources{t}.tsv".format(t=tag)), 
        headings=
            ("load_zone", "period", "timepoint_label")
            +tuple(m.FUELS)
            +tuple(m.NON_FUEL_ENERGY_SOURCES)
            +tuple("curtail_"+s for s in m.NON_FUEL_ENERGY_SOURCES)
            +tuple(m.Zone_Power_Injections)
            +tuple(m.Zone_Power_Withdrawals)
            +("marginal_cost","final_marginal_cost","price","bid_load","peak_day","base_load","base_price"),
        values=lambda m, z, t: 
            (z, m.tp_period[t], m.tp_timestamp[t]) 
            +tuple(
                sum(get(m.DispatchGenByFuel, (p, t, f), 0.0) for p in m.GENS_BY_FUEL[f])
                for f in m.FUELS
            )
            +tuple(
                sum(get(m.DispatchGen, (p, t), 0.0) for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s])
                for s in m.NON_FUEL_ENERGY_SOURCES
            )
            +tuple(
                sum(
                    get(m.DispatchUpperLimit, (p, t), 0.0) - get(m.DispatchGen, (p, t), 0.0) 
                    for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s]
                )
                for s in m.NON_FUEL_ENERGY_SOURCES
            )
            +tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Injections)
            +tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Withdrawals)
            +(
                m.prev_marginal_cost[z, t],
                electricity_marginal_cost(m, z, t),
                m.dr_price[last_bid, z, t],
                m.dr_bid[last_bid, z, t],
                'peak' if m.ts_scale_to_year[m.tp_ts[t]] < 0.5*avg_ts_scale else 'typical',
                m.base_data_dict[z, t][0],
                m.base_data_dict[z, t][1],
            )
    )
Exemple #15
0
def sort_xforms(sxfs, dxfs, sortmethod, explicit=[]):
    # Walk through the explicit pairs, popping previous matches from the
    # forward (src=>dst) and reverse (dst=>src) maps
    fwd, rev = {}, {}
    for sx, dx in explicit:
        if sx not in ("pad", "dup") and sx in fwd:
            rev.pop(fwd.pop(sx, None), None)
        if dx not in ("pad", "dup") and dx in rev:
            fwd.pop(rev.pop(dx, None), None)
        fwd[sx] = dx
        rev[dx] = sx

    for sd in sorted(fwd.items()):
        yield sd

    # Classify the remaining xforms. Currently we classify based on whether
    # the pre- and post-affine transforms are flipped
    scl, dcl = {}, {}
    for (cl, xfs, exp) in [(scl, sxfs, fwd), (dcl, dxfs, rev)]:
        for k, v in xfs.items():
            if k in exp: continue
            xcl = (get(v, 45, 'pre_affine', 'spread') > 90,
                   get(v, 45, 'post_affine', 'spread') > 90)
            cl.setdefault(xcl, []).append(k)

    def sort(keys, dct, snd=False):
        if sortmethod in ('weight', 'weightflip'):
            sortf = lambda k: dct[k].get('weight', 0)
        elif sortmethod == 'color':
            sortf = lambda k: dct[k].get('color', 0)
        else:
            # 'natural' key-based sort
            sortf = halfhearted_human_sort_key
        return sorted(keys, key=sortf)

    for cl in set(scl.keys() + dcl.keys()):
        ssort = sort(scl.get(cl, []), sxfs)
        dsort = sort(dcl.get(cl, []), dxfs)
        if sortmethod == 'weightflip':
            dsort = reversed(dsort)
        for sd in izip_longest(ssort, dsort):
            yield sd
Exemple #16
0
def getCarPersonMappings(ignore):
    """Uses the GET on car-people:car-people resource to get all car-persons entry in the store using RESTCONF

    <note>
        This also returns the dummy entry created for routed RPC
        with personId being user0
    </note>
    """
    resp = util.get(settings.getCarPersonUrl(), "admin", "admin")
    print(resp)
    return resp
Exemple #17
0
def getPersons(ignore):
    """Uses the GET on people:people resource to get all persons in the store using RESTCONF

    <note>
        This also returns the dummy entry created for routed RPC
        with personId being user0
    </note>
    """
    resp = util.get(settings.getPersonsUrl(), "admin", "admin")
    print(resp)
    return resp
Exemple #18
0
def with_game(response):
    game_id = get("data.game_id", response)
    requestPool.request(Routes.join_game, {
        "user_id": 5,
        "game_id": game_id
    }, print)
    requestPool.request(Routes.move, {
        "user_id": 2,
        "game_id": game_id,
        "column_id": 0
    }, print)
    requestPool.request(Routes.move, {
        "user_id": 5,
        "game_id": game_id,
        "column_id": 0
    }, print)
    requestPool.request(Routes.move, {
        "user_id": 2,
        "game_id": game_id,
        "column_id": 0
    }, print)
    requestPool.request(
        Routes.check,
        {
            "game_id":
            game_id,
            "board": [
                [0, 0, 0, 0, 0, 0, 0],
                [0, 0, 0, 0, 0, 0, 0],
                [0, 0, 0, 0, 0, 0, 0],
                [1, 0, 0, 0, 0, 0, 0],
                [2, 0, 0, 0, 0, 0, 0],
                [1, 0, 0, 0, 0, 0, 0],
            ],
        },
        print,
    )
    requestPool.request(
        Routes.check,
        {
            "game_id":
            game_id,
            "board": [
                [0, 0, 0, 0, 0, 0, 0],
                [0, 0, 0, 0, 0, 0, 0],
                [0, 0, 0, 0, 0, 0, 0],
                [0, 0, 0, 0, 0, 0, 0],
                [2, 0, 0, 0, 0, 0, 0],
                [1, 0, 0, 0, 0, 0, 0],
            ],
        },
        print,
    )
def get_contacts(url, api_token):
    logger.debug("url: %s", (url,))
    l = list()
    if url and api_token:
        d = json.loads(util.get(url, api_token))
        for contact in d['contacts']:
            l.append(Contact(id=contact['id'],
                             first_name=contact['firstName'],
                             last_name=contact['lastName'],
                             email=contact['email']))

    return l
Exemple #20
0
    def __init__(self, conf, os_list):
        if not conf:
            raise CapyException('BDS configuration is missing')

        conf['build_dir'] = conf.get('build_dir', path.join(TMP_DIR + 'builds/'))
        self.token = get(conf, 'token', None)  # don't check token until it's needed
        self.customer = self._load(conf, 'customer')
        self.project = self._load(conf, 'project')

        self.builds = {}
        for os in os_list:
            self._load_builds(conf, os=os)
Exemple #21
0
def default(e):
    # get the data
    author = util.author_or_editor(e, 5)
    year = util.get(e, 'year', 'no year')
    title = util.get(e, 'title', '=no title=')
    bibtex_key = unicode(e['ID'])
    bibtex_type = e['ENTRYTYPE']
    # build the string
    text = str()
    text += author
    text += ' '
    text += '(%s)' % year
    text += ' '
    text += "'%s'" % title
    text += ' '
    text += '@' + bibtex_key
    text += ' '
    text += '[%s]' % bibtex_type
    # remove latex markup crap
    text = util.remove_latex_crap(text)
    return text
def get_lists(url, api_token):
    logger.debug("url: %s", (url,))
    l = list()
    if url and api_token:
        d = json.loads(util.get(url, api_token))
        for _l in d['lists']:
            l.append(List(id=_l['id'],
                          name=_l['name'],
                          stringid=_l['stringid'],
                          sender_url=_l['sender_url'],
                          sender_reminder=_l['sender_reminder']))
    return l
Exemple #23
0
        def __init__(self, element):
            date_str = element.getElementsByTagName('trn:date-posted')[0].\
                getElementsByTagName('ts:date')[0].firstChild.data

            self.date = datetime.datetime.strptime(
                date_str, '%Y-%m-%d %H:%M:%S %z').replace(tzinfo=None)
            self.desc = util.get(element, 'trn:description')
            if self.desc is None:
                self.desc = ''

            split_elements = element.getElementsByTagName('trn:split')
            self.splits = [Split(self, x) for x in split_elements]
Exemple #24
0
    def drop_piece(Self, state, column):
        if not util.get("game.game_started", state):
            return state

        board = util.get("game.board", state)
        player = util.get("game.player_turn", state)
        if not logic.valid_move(column, board):
            return None
        util.path_set("game.board", logic.drop_piece(player, column, board),
                      state)
        util.path_set("game.player_turn", 1 if player == 2 else 2, state)

        new_board = util.get("game.board", state)
        winner = logic.check_win(new_board)
        valid_moves = logic.valid_moves(new_board)
        if winner or not len(valid_moves):
            util.path_set("game.winner", winner if winner else 3, state)
            util.path_set("game.game_started", False, state)

        if (not util.get("game.2_player_game", state)
                and util.get("game.player_turn", state) == 2):
            move = ai.generate_move(new_board)
            if move == -1:
                return None
            Self.drop_piece(state, move)

        return state
Exemple #25
0
    def train(self, training_data: TrainingData) -> None:
        x_train, y_train, x_val, y_val, vocab, class_to_i, i_to_class = preprocess_dataset(training_data, full_question=args.full_question, create_runs=args.create_runs)
        self.class_to_i = class_to_i
        self.i_to_class = i_to_class

        print('Batchifying data')
        train_batches = batchify(x_train, y_train, shuffle=True)
        val_batches = batchify(x_val, y_val, shuffle=False)
        self.model = ElmoModel(len(i_to_class), dropout=self.dropout)
        self.model = self.model.to(self.device)
        
        print(f'Model:\n{self.model}')
        parameters = list(self.model.classifier.parameters())
        for mix in self.model.elmo._scalar_mixes:
            parameters.extend(list(mix.parameters()))
        self.optimizer = Adam(parameters)
        self.criterion = nn.CrossEntropyLoss()
        self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=5, verbose=True, mode='max')


        temp_prefix = get_tmp_filename()
        self.model_file = f'{temp_prefix}.pt'

        print(f'Saving model to: {self.model_file}')
        log = get(__name__)
        manager = TrainingManager([
            BaseLogger(log_func=log.info), TerminateOnNaN(), EarlyStopping(monitor='test_acc', patience=10, verbose=1),
            MaxEpochStopping(100), ModelCheckpoint(create_save_model(self.model), self.model_file, monitor='test_acc')
        ])

        log.info('Starting training')

        epoch = 0
        while True:
            self.model.train()
            train_acc, train_loss, train_time = self.run_epoch(train_batches)
            random.shuffle(train_batches)

            self.model.eval()
            test_acc, test_loss, test_time = self.run_epoch(val_batches, train=False)

            stop_training, reasons = manager.instruct(
                train_time, train_loss, train_acc,
                test_time, test_loss, test_acc
            )

            if stop_training:
                log.info(' '.join(reasons))
                break
            else:
                self.scheduler.step(test_acc)
            epoch += 1
Exemple #26
0
def create_id(entry):
    """[summary]

    Arguments:
        entry {[type]} -- [description]

    Returns:
        [type] -- [description]
    """

    keys = ['id', 'link', 'title']
    values = tuple(util.get(entry, key, None) for key in keys)
    return values if any(values) else uuid.uuid4().hex
Exemple #27
0
def store():
    global ct
    try:
        ct
    except NameError:
        return redirect('/')
    item=[]
    scav=ct.values()[0][2]-act.values()[0][2]#sets the value of scav to the user's scavenging minus the event's scavenging and generates that number of options for potions
    item.append('HPmk0')
    for i in range(int(scav)+1):
        item.append("HPmk"+str(i+1))
    ct=util.get(ct.keys()[0])
    return render_template('main.html',items=item,ct=ct.keys()[0],att=ct.values()[0][0],spd=ct.values()[0][1],scav=ct.values()[0][2],health=ch,enmHealth=(act.values()[0][3])*10)
def get_campaigns(url, api_token):
    logger.debug("url: %s", (url,))
    l = list()
    if url and api_token:
        d = json.loads(util.get(url, api_token))
        for campaign in d['campaigns']:
            l.append(Campaign(id=campaign['id'],
                              type=campaign['type'],
                              schedule_date=campaign['sdate'],
                              status=campaign['status'],
                              public=campaign['public'],
                              track_links=campaign['tracklinks']))

    return l
Exemple #29
0
def get_datas(url):

    soup = BeautifulSoup(util.get(url, None))
    summary = soup.find("div", attrs={"class": "qur_r"})
    datas = {}
    lis = summary.findAll('li')

    for li in lis:
        key = li.get_text()
        data = li["data"].replace("@", "_")
        url = "http://www.ccb.com/cn/personal/interestv3/%s.html" % data
        value = get_rate(key, url)
        datas[key] = value

    return datas
def get_messages(url, api_token):
    logger.debug("url: %s", (url,))
    l = list()
    if url and api_token:
        d = json.loads(util.get(url, api_token))
        for message in d['messages']:
            l.append(Message(id=message['id'],
                             from_name=message['fromname'],
                             from_email=message['fromemail'],
                             reply2=message['reply2'],
                             subject=message['subject'],
                             preheader_text=message['preheader_text'],
                             message=message['text']))

    return l
Exemple #31
0
        def __init__(self, parent_transaction, split_element):

            account = util.get(split_element, 'split:account')
            value = util.get(split_element, 'split:value')
            id = util.get(split_element, 'split:id')

            self.transaction = parent_transaction
            self.account = account
            amount_match = value.split('/')

            numerator = float(amount_match[0])
            denominator = float(amount_match[1])
            self.decimal_places = amount_match[1].count('0')
            self.amount = round(numerator / denominator, self.decimal_places)

            assertion_desc_match = re.search(
                '(' + args.assertion_amount_regex + ')', self.transaction.desc)

            if assertion_desc_match:
                assertion_amount_string = assertion_desc_match.group(0)
                self.assertion_amount = float(assertion_amount_string)
            else:
                self.assertion_amount = None
                return  # don't bother with the rest

            self.assertion_start = pandas.to_datetime('1900-01-01')

            if args.assertion_start_regex is not None:
                assertion_start_match = re.search(
                    '(' + args.assertion_start_regex + ')',
                    self.transaction.desc)

                if assertion_start_match:
                    assertion_start_string = assertion_start_match.group(0)
                    self.assertion_start = pandas.to_datetime(
                        assertion_start_string)
Exemple #32
0
async def text_to_audio(request):
    upper_req_params = parse_query_str_to_dict(request.query_string)

    upper_req_params["time_stamp"] = str(int(time.time()))
    upper_req_params["nonce_str"] = get_rand_str(12)

    app_key = upper_req_params["app_key"]
    del upper_req_params["app_key"]

    upper_req_params["sign"] = get_request_sign(upper_req_params, app_key)

    res = json.loads(
        get("https://api.ai.qq.com/fcgi-bin/aai/aai_tts", upper_req_params))
    print(res)
    res["data"]["speech"] = base64.b64decode(res["data"]["speech"])
    return web.json_response(res)
Exemple #33
0
def execute(appconfig, page, args):
    navigation = bconfig.load_config(page)
    if navigation['id'] == 'brick':
        return {'error': 'no action for %s' % (page)}

    tabs = navigation['tabs']
    ret = {}

    try:
        module = importlib.import_module(page)
    except Exception as e:
        print(e)
        print('[INFO] no module %s function found' % (page))
        module = None

    try:
        for tab in tabs:
            tabid = util.get(args, 'tabid', None)
            if tabid and tabid != tab['id']: continue

            for fetcher in ['sql', 'api', 'hive']:
                if fetcher not in tab: continue

                query = copy.deepcopy(tab[fetcher])

                if module and hasattr(module, 'preexecute'):
                    query, args = module.preexecute(appconfig, query, args)

                if module and hasattr(module, 'execute'):
                    data = module.execute(appconfig, query, args, fetcher)
                else:
                    data = _execute(appconfig, query, args, fetcher)

                if module and hasattr(module, 'postexecute'):
                    data = module.postexecute(appconfig, query, args, data)

            ret[tabid] = data
            break

    except Exception as e:
        print("[ERROR]", e)
        traceback.print_exc()

    return ret
Exemple #34
0
    def mpi_wrapper(contextMap):
        """

        :return:
        """
        cmd = ('%(mpirunBin)s '
               '-np %(np)s '
               '--allow-run-as-root '
               '%(wrapperBin)s '
               '%(appBin)s '
               '--config_file=%(yamlFile)s ')
        wrapperBin = contextMap['contextMap']
        appBin = contextMap['appBin']
        yamlFile = contextMap['yamlFile']
        np = contextMap['np']
        mpirunBin = os.path.join(util.get('TESTLIB_DIR'), 'bin/mpirun')
        cmd = cmd % locals()
        proc = Process()
        return proc.runXP(cmd)
Exemple #35
0
    def get(self, node_id):
        node = util.get(node_id)
        others = []
        likes = []
        dislikes = []

        if node:
            for rel in node.relationships.all():
                if rel.end != node:
                    if rel.type == 'likes':
                        likes.append(rel.end)
                    elif rel.type == 'dislikes':
                        dislikes.append(rel.end)
                    else:
                        others.append(rel.end)
        print likes, others
        self.write({
            'results': self.render_string('template/detail.html', node=node, others=others, likes=likes, dislikes=dislikes)
        })
Exemple #36
0
def invBoss():
    global ct
    if inv==[]:
        inv.append("HPmk0")
    if request.method=="POST":
        global ch
        heal=request.form['itemUsed']#If an item was just used, increases your health
        inv.remove(heal)
        restore=float(heal[-1])*5
        if restore != 0:
            ch+=restore+10
        if ch>100.0:
            ch=100.0#Insures health does not go above 100
        global itemUsed
        itemUsed=True
        return redirect("/final")
    elif request.method=="GET":
        ct=util.get(ct.keys()[0])
        return render_template('final.html',itemInv=inv,ct=ct.keys()[0],health=ch,att=ct.values()[0][0],spd=ct.values()[0][1],scav=ct.values()[0][2],enmHealth=(act.values()[0][3])*10)
Exemple #37
0
    def run(self, failonerror=True, autoidentity=True, logcheck=True):
        """

        :return:
        """
        retStatus = True
        for idx, (taskFunc, taskParam) in enumerate(self.taskList):
            try:
                ret = taskFunc(**taskParam)
                msg = 'Func=%s, Param=%s, ret=%s' % (str(taskFunc),
                                                     str(taskParam), str(ret))
                log.info(msg[:1000])
                # ret = False if 0 == ret else True if 1 == ret else ret
            except Exception as e:
                msg = 'Task start fail, ' \
                  'message=%s, ' \
                  'args=%s, ' \
                  'param=%s' % (e.message, str(e.args), str(taskParam))
                log.info(msg)
                if failonerror:
                    raise Exception(msg)
                else:
                    retStatus = False
            if self.rtidbClient.scan == taskFunc:
                self._scan_parser(ret)
            else:
                if not ret:
                    if failonerror:
                        raise Exception('%s run error:%s' %
                                        (str(taskFunc), str(taskParam)))
                    else:
                        retStatus = False
        # common check
        self._common_check(autoidentity, retStatus, logcheck)

        # save log file
        logfile = util.get('RTIDB_LOG')
        logfileBackup = logfile + '.' + util.inspect_get(
            self.stack, 'function')
        FileUtils.cp(logfile, logfileBackup)

        return retStatus
Exemple #38
0
def add_robot(platid, mid, num):
    '''
    @brief 添加机器人
    @platid: 平台ID
    @followuid: 用户MID
    @svidtype
    @num:机器人数量
    '''
    url = util.AUTO_TEST_URL
    postdata = {
        "platid":platid,
        "followuid": mid,
        "svidtype":0,
        "num": num,
    }
    # print url
    # print postdata
    result = util.get(url, postdata)
    # print result
    return util.check_response(result)
Exemple #39
0
def incollection(e):
    # get the data
    author = util.author(e, 5)
    year = util.get(e, 'year', 'no year')
    title = util.get(e, 'title', '=no title=')
    publisher = util.get(e, 'publisher', None)
    address = util.get(e, 'address', None)
    editor = util.editor(e, 3)
    pages = util.get(e, 'pages', None)
    booktitle = util.get(e, 'booktitle', '=no booktitle=')
    bibtex_key = unicode(e['ID'])
    bibtex_type = e['ENTRYTYPE']
    # build the string
    text = str()
    text += author
    text += ' '
    text += '(%s)' % year
    text += ' '
    text += '%s' % title
    if editor != '=no editor=' or booktitle != '=no booktitle=':
        text += ' in'
        if editor != '=no editor=':
            text += ' '
            text += editor
            text += ' '
            if len(e['editor']) == 1:
                text += '(Ed.)'
            else:
                text += '(Eds.)'
        text += ' '
        text += booktitle
    if address or publisher:
        text += ', '
        if address:
            text += address
            text += ': '
        if publisher:
            text += publisher
    if pages:
        text += ', '
        text += 'pp. ' + pages
    text += ' '
    text += '@' + bibtex_key
    text += ' '
    text += '[%s]' % bibtex_type
    # remove latex markup crap
    text = util.remove_latex_crap(text)
    return text
async def text_to_audio(request):
    upper_req_params = parse_query_str_to_dict(request.query_string)

    time_stamp = str(int(time.time()))
    upper_req_params["time_stamp"] = time_stamp
    upper_req_params["nonce_str"] = get_rand_str(12)

    app_key = upper_req_params["app_key"]
    del upper_req_params["app_key"]

    upper_req_params["sign"] = get_request_sign(upper_req_params, app_key)

    res = json.loads(
        get("https://api.ai.qq.com/fcgi-bin/aai/aai_tts", upper_req_params))
    print(res)

    audio_file = base64.b64decode(res["data"]["speech"])
    file_path = "../audio/" + time_stamp + ".wav"
    with open(file_path, 'wb') as f:
        f.write(audio_file)
    #text=mp3_file,content_type="audio/wav"
    return web.FileResponse(path=file_path)
Exemple #41
0
def finaltest():
    ct=util.get('scythe')
    ch=100
    act={'Dragon':[3.0,3.0,-1,25.0]}
    if request.method=='GET':
        global ct
        global dh
        dh=act.values()[0][3]
        return render_template('final.html',health=ch,ct=ct.keys()[0],att=ct.values()[0][0],spd=ct.values()[0][1],scav=ct.values()[0][2],opt='yes',message='yes',enmHealth=dh*10)
    elif request.method=='POST':
        global ch
        global ct
        dif=0
        if request.form['choice']=='Run away':
            if act.values()[0][1]>ct.values()[0][1]:#If the user runs away and their speed is lower than the event's, they lose the difference between the event's speed and their speed, times 10
                global ch
                dif=(act.values()[0][1]-ct.values()[0][1])*10
                ch-=dif
        elif request.form['choice']=='Use tool':#If the user uses tool, and the user's attack is lower than the event's, then the user loses the difference between the event's attack and theirs
            if act.values()[0][0]>ct.values()[0][0]:
                global ch
                dif=(act.values()[0][0]-ct.values()[0][0])*10
                ch-=dif
            else:
                enmDif=0
                enmDif=(ct.values()[0][0]-act.values()[0][0])+0.5
                if enmDif<0.5:
                    enmDif=0.5
                global dh
                dh-=enmDif
                global ch
                ch-=2.5
        elif request.form['choice']=="Use potion":
            return redirect("/invBoss")
        if ch<=0:
            return render_template('lose.html')
        elif dh<=0:
            return render_template("final.html",killDrag='yes')
        return render_template('final.html',health=ch,ct=ct.keys()[0],att=ct.values()[0][0],spd=ct.values()[0][1],scav=ct.values()[0][2],message='yes',opt='yes',enmHealth=dh*10,killDrag='no')
Exemple #42
0
 def poll(self, timestamp, filters):
     logging.info('Polling feed "%s"' % self.url)
     result = []
     self.last_poll = timestamp
     username = util.decode_password(self.username)
     password = util.decode_password(self.password)
     d = util.parse(self.url, username, password, self.etag, self.modified)
     self.etag = util.get(d, 'etag', None)
     self.modified = util.get(d, 'modified', None)
     feed = util.get(d, 'feed', None)
     if feed:
         self.title = self.title or util.get(feed, 'title', '')
         self.link = self.link or util.get(feed, 'link', self.url)
     entries = util.get(d, 'entries', [])
     for entry in reversed(entries):
         id = create_id(entry)
         if id in self.id_set:
             continue
         self.item_count += 1
         self.id_list.append(id)
         self.id_set.add(id)
         item = Item(self, id)
         item.timestamp = calendar.timegm(
             util.get(entry, 'date_parsed', time.gmtime()))
         item.title = util.format(util.get(entry, 'title', ''),
                                  settings.POPUP_TITLE_LENGTH)
         item.description = util.format(util.get(entry, 'description', ''),
                                        settings.POPUP_BODY_LENGTH)
         item.link = util.get(entry, 'link', '')
         item.author = util.format(util.get(entry, 'author',
                                            ''))  # TODO: max length
         if all(filter.filter(item) for filter in filters):
             result.append(item)
     self.clean_cache(settings.FEED_CACHE_SIZE)
     return result
Exemple #43
0
def create_id(entry):
    keys = ['id', 'link', 'title']
    values = tuple(util.get(entry, key, None) for key in keys)
    return values if any(values) else uuid.uuid4().hex
def getPersons(ignore):
    resp = util.get(settings.getPersonsUrl(),"admin","admin")
    print (resp)
    return resp
Exemple #45
0
"""

"""
import os
import sys
import inspect
import copy
import hashlib
import random
import string
import time
import atest.log as log
import util as util
from common_utils.file_util import FileUtils
from common_utils.process import Process
sys.path.append(os.path.dirname(util.get('RTIDB_CLIENT_PY')))
from rtidb_client import RtidbClient

CTIME = int(time.time())
NAME_GEN = lambda stack: util.inspect_get(stack, 'function')


class ClientContext(object):
    """

    """
    name = lambda self, stack: NAME_GEN(stack)
    tid = lambda self, stack: int(''.join(
        filter(str.isdigit,
               hashlib.md5(NAME_GEN(stack)).hexdigest()))[6]) + 1
    pid = lambda self, pid: pid
Exemple #46
0
# See the License for the specific language governing permissions and
# limitations under the License.

# -*- coding:utf-8 -*-
import os
import re
import json
import hashlib
import difflib
import pprint
from common_utils import yaml
from common_utils.file_util import FileUtils
from common_utils.process import Process
import util as util

BASE_BIN_DIR = os.path.join(util.get('PICO_ROOT'), 'build')
COMPARE_BIN_DIR = util.get('JENKINS_VER0_BIN_DIR')


class DiffFunc(object):
    """

    """
    @staticmethod
    def json_diff(baseFile,
                  compareFile,
                  baseJunkJpaths=None,
                  compareJunkJpaths=None):
        """

        :param baseFile:
Exemple #47
0
def get_by_name(name):
    """
One of the main ``clusto`` operations. Parameters:

* Required path parameter: ``name`` - The name you're looking for
* Optional: ``driver`` - If provided, a driver check will be added to
  ensure the resulting object is the type you're expecting

.. note:: This function returns expanded objects by default in order
  to reduce the amount of required custom headers. Therefore, the header
  is not required to receive expanded objects.

Examples:

.. code:: bash

    $ ${get} ${server_url}/by-name/nonserver
    "Object \"nonserver\" not found (nonserver does not exist.)"
    HTTP: 404
    Content-type: application/json

    $ ${get} -H 'Clusto-Mode: compact' ${server_url}/by-name/testserver1
    "/basicserver/testserver1"
    HTTP: 200
    Content-type: application/json

    $ ${get} ${server_url}/by-name/testserver1
    {
        "attrs": [],
        "contents": [],
        "driver": "basicserver",
        "name": "testserver1",
        "parents": [
            "/pool/singlepool",
            "/pool/multipool"
        ]
    }
    HTTP: 200
    Content-type: application/json

    $ ${get} -d 'driver=pool' ${server_url}/by-name/testserver1
    "The driver for object \"testserver1\" is not \"pool\""
    HTTP: 409
    Content-type: application/json

    $ ${get} -d 'driver=nondriver' ${server_url}/by-name/testserver1
    "The driver \"nondriver\" is not a valid driver"
    HTTP: 412
    Content-type: application/json

"""

    driver = bottle.request.params.get('driver', default=None)
    mode = bottle.request.headers.get('Clusto-Mode', default='expanded')
    obj, status, msg = util.get(name, driver)
    if not obj:
        return util.dumps(msg, status)
    try:
        return util.dumps(util.show(obj, mode))
    except TypeError as te:
        return util.dumps('%s' % (te,), 409)
Exemple #48
0
def get_VIEWSTATE(url):
    soup = BeautifulSoup(util.get(url, None))
    data = soup.find("input", attrs={"name": "__VIEWSTATE"})
    return data["value"]
def feedme(feed="", type=""):
    h = HTMLParser.HTMLParser()
    colour = [
        "black", "white", "gray", "blue", "teal", "fuchsia", "indigo",
        "turquoise", "cyan", "greenyellow", "lime", "green", "olive", "gold",
        "yello", "lavender", "pink", "magenta", "purple", "maroon",
        "chocolate", "orange", "red", "brown"
    ]
    parameters = util.parseParameters()

    #util.logError(str(parameters))

    try:
        mode = int(parameters["mode"])
    except:
        mode = None

    try:
        offsite = ast.literal_eval(parameters['extras'])
        #util.logError(str(offsite))
        if "site_xml" in offsite:
            feed = offsite['site_xml']
            type = "url"
    except:
        #not set, dont worry about it
        pass

    if mode == None or mode == 0:
        # if we get here list the sites found in the json file
        menu = []
        bits = util.getFile(feed, type)
        counter = 0

        if str(len(bits['sites'])) == "1" and 'folder' not in bits['sites']:
            mode = 1
            parameters['extras'] = str({"site": 0})
        else:
            try:
                folder = ast.literal_eval(parameters['extras'])
                folder = folder['folder']
                for site in bits['sites']:
                    try:
                        if site['folder'].lower() == folder.lower():
                            extras = {}
                            try:
                                extras['site_xml'] = offsite['site_xml']
                            except:
                                pass
                            extras['site'] = counter
                            menu.append({
                                "title": site['name'],
                                "url": site['name'],
                                "mode": "1",
                                "poster": site['poster'],
                                "icon": site['poster'],
                                "fanart": site['fanart'],
                                "type": ADDON_TYPE,
                                "plot": "",
                                "isFolder": True,
                                "extras": extras
                            })

                    except:
                        # site not in a folder
                        pass
                    counter = counter + 1
            except:
                if "folders" in bits:
                    for site in bits['folders']:
                        extras = {}
                        try:
                            extras['site_xml'] = offsite['site_xml']
                        except:
                            pass
                        extras['site'] = counter
                        folder_extras = {}
                        folder_extras['folder'] = site['name']
                        if "url" in site:
                            folder_extras['site_xml'] = site['url']
                            del (folder_extras['folder'])
                        menu.append({
                            "title": site['name'],
                            "url": site['name'],
                            "mode": "0",
                            "poster": site['poster'],
                            "icon": site['poster'],
                            "fanart": site['fanart'],
                            "type": ADDON_TYPE,
                            "plot": "",
                            "isFolder": True,
                            "extras": folder_extras
                        })
                for site in bits['sites']:
                    if "folder" not in site:
                        extras = {}
                        try:
                            extras['site_xml'] = offsite['site_xml']
                        except:
                            pass
                        extras['site'] = counter
                        menu.append({
                            "title": site['name'],
                            "url": site['name'],
                            "mode": "1",
                            "poster": site['poster'],
                            "icon": site['poster'],
                            "fanart": site['fanart'],
                            "type": ADDON_TYPE,
                            "plot": "",
                            "isFolder": True,
                            "extras": extras
                        })
                    counter = counter + 1
            util.addMenuItems(menu)
    if mode == 1:
        # first level within a site, show Latest, Search and any Tags within the specified site
        menu = []
        extras = ast.literal_eval(parameters['extras'])

        try:
            extras['site_xml'] = offsite['site_xml']
        except:
            pass

        bits = util.getFile(feed, type)
        site = bits['sites'][extras['site']]

        if "search_url" not in site and "tags" not in site and len(
                site['items']) == 1:
            mode = 2
            for item in site['items']:
                parameters['url'] = site['items'][item][0]['site_url']
                break

        else:
            for item in site['items'].iterkeys():
                if item.lower() != "search":
                    try:
                        poster = parameters['poster']
                    except:
                        try:
                            poster = site['items'][item][0]['folder_poster']
                            if "http" not in poster and "https" not in poster:
                                poster = os.path.join(HOME, '', poster)
                        except:
                            poster = ""
                    try:
                        fanart = parameters['fanart']
                    except:
                        try:
                            fanart = site['items'][item][0]['folder_fanart']
                            if "http" not in fanart and "https" not in fanart:
                                fanart = os.path.join(HOME, '', fanart)
                        except:
                            fanart = ""
                    extras['level'] = item

                    menu.append({
                        "title":
                        item,
                        "url":
                        urllib.quote_plus(site['items'][item][0]['site_url']),
                        "mode":
                        "2",
                        "poster":
                        poster,
                        "icon":
                        poster,
                        "fanart":
                        fanart,
                        "type":
                        ADDON_TYPE,
                        "plot":
                        "",
                        "isFolder":
                        True,
                        "extras":
                        str(extras)
                    })

            try:
                counter = 0
                for tag in site['tags']:
                    try:
                        poster = parameters['poster']
                    except:
                        poster = ""

                    try:
                        fanart = parameters['fanart']
                    except:
                        fanart = ""
                    extras['tag'] = counter
                    menu.append({
                        "title": tag['name'],
                        "url": tag['url'],
                        "mode": "4",
                        "poster": poster,
                        "icon": poster,
                        "fanart": fanart,
                        "type": ADDON_TYPE,
                        "plot": "",
                        "isFolder": True,
                        "extras": str(extras)
                    })
                    counter = counter + 1
            except:
                pass
            if "search_url" in site:
                try:
                    poster = parameters['poster']
                except:
                    poster = ""

                try:
                    fanart = parameters['fanart']
                except:
                    fanart = ""
                menu.append({
                    "title": "Search",
                    "url": "",
                    "mode": "3",
                    "poster": poster,
                    "icon": poster,
                    "fanart": fanart,
                    "type": ADDON_TYPE,
                    "plot": "",
                    "isFolder": True,
                    "extras": str(extras)
                })
            util.addMenuItems(menu)
    if mode == 2:
        # load the first level of relevant video information
        menu = []
        extras = ast.literal_eval(parameters['extras'])

        try:
            extras['site_xml'] = offsite['site_xml']
        except:
            pass

        bits = util.getFile(feed, type)
        site = bits['sites'][extras['site']]

        if 'pos' in extras:
            pos = extras['pos']
        else:
            pos = 0

        if 'level' in extras:
            level = extras['level']
        else:
            for item in site['items']:
                level = item
                break

        if len(site['items'][level]) > pos + 1:
            # another level is needed
            extras['pos'] = pos + 1
            newMode = "2"
            isFolder = True
        else:
            # on a level where next move is to check for sources
            try:
                if site['items'][level][pos]['play_media'] == "multiple":
                    newMode = "113"
                    isFolder = True
                else:
                    newMode = "111"  # find source
                    isFolder = False
            except:
                # default to play first found
                newMode = "111"  # find source
                isFolder = False

        #util.alert(newMode)
        page = util.get(h.unescape(parameters['url']))
        next = page
        """if parameters['name']=="Next Page >":
            util.logError(str(next))"""

        try:
            if site['items'][level][pos]['global'] != "":
                regex = util.prepare(site['items'][level][pos]['global'])
                matches = re.findall(regex, page)
                if matches:
                    page = matches[0]
        except:
            pass

        regex = util.prepare(site['items'][level][pos]['pattern'])
        matches = re.findall(regex, page)
        if matches:
            counter = 0
            for match in matches:
                try:
                    title = h.unescape(
                        util.replaceParts(
                            site['items'][level][pos]['name'],
                            matches[counter]).replace('\n', '').replace(
                                '\t', '').replace("\\", "").lstrip())
                except:
                    title = ""
                #try:
                #    util.alert(site['items'][level][pos]['url'])
                url = urllib.quote_plus(
                    util.replaceParts(site['items'][level][pos]['url'],
                                      matches[counter]))
                #    util.alert(">>"+url)
                #except:
                #    url=""
                try:
                    poster = util.replaceParts(
                        site['items'][level][pos]['poster'],
                        matches[counter]).encode('utf-8')
                except:
                    poster = ""
                try:
                    fanart = util.replaceParts(
                        site['items'][level][pos]['fanart'],
                        matches[counter]).encode('utf-8')
                except:
                    fanart = ""
                try:
                    plot = util.replaceParts(site['items'][level][pos]['plot'],
                                             matches[counter]).encode('utf-8')
                except:
                    plot = ""

                if isFolder:
                    menu.append({
                        "title": title,
                        "url": url,
                        "mode": newMode,
                        "poster": poster,
                        "icon": poster,
                        "fanart": fanart,
                        "type": ADDON_TYPE,
                        "plot": plot,
                        "isFolder": isFolder,
                        "extras": str(extras)
                    })
                else:
                    menu.append({
                        "title": title,
                        "url": url,
                        "mode": newMode,
                        "poster": poster,
                        "icon": poster,
                        "fanart": fanart,
                        "type": ADDON_TYPE,
                        "plot": plot,
                        "isFolder": isFolder,
                        "isPlayable": "True",
                        "extras": str(extras)
                    })
                counter = counter + 1
        try:
            regex = util.prepare(site['items'][level][pos]['next_pattern'])
            matches = re.findall(regex, next)
            if matches:
                parts = []
                if len(matches) > 1:
                    for match in matches:
                        parts.append(match)
                else:
                    match = matches

                #nextlink=util.execPy(util.replaceParts(site['items'][level][pos]['next_url'], match))
                nextlink = util.replaceParts(
                    site['items'][level][pos]['next_url'], match)
                extras['pos'] = pos

                menu.append({
                    "title": "Next Page >",
                    "url": urllib.quote_plus(nextlink),
                    "mode": "2",
                    "poster": "",
                    "icon": "",
                    "fanart": "",
                    "type": ADDON_TYPE,
                    "plot": plot,
                    "isFolder": True,
                    "extras": str(extras)
                })
        except Exception as e:
            util.logError(str(e))
            pass
        util.addMenuItems(menu)
    elif mode == 3:
        # display the Search dialog and build search results
        menu = []
        extras = ast.literal_eval(parameters['extras'])

        try:
            extras['site_xml'] = offsite['site_xml']
        except:
            pass

        term = util.searchDialog()

        if term:
            bits = util.getFile(feed, type)
            site = bits['sites'][extras['site']]
            pos = 0

            for item in site['items']:
                level = item
                extras['level'] = level
                break

            if len(site['items'][extras['level']]) > pos + 1:
                # another level is needed
                extras['pos'] = 1
                newMode = "2"
                isFolder = True
                isPlayable = True
            else:
                # on a level where next move is to check for sources
                if site['items'][
                        extras['level']][pos]['play_media'] == "multiple":
                    newMode = "113"
                    isFolder = True
                    isPlayable = False
                else:
                    newMode = "111"  # find source
                    isFolder = False
                    isPlayable = True
            if "{{" in site['search_url'] and "}}" in site['search_url']:
                url = util.execPy(site['search_url'].replace("{%}", term))
            else:
                url = site['search_url'].replace("{%}", term)
            util.logError(url)
            page = util.get(url)
            next = page

            try:
                if site['item']['global'] != "":
                    regex = util.prepare(site['item']['global'])
                    matches = re.findall(regex, page)
                    if matches:
                        page = matches[0]
            except:
                pass

            regex = util.prepare(site['items'][level][pos]['pattern'])
            matches = re.findall(regex, page)

            if matches:
                counter = 0
                for match in matches:
                    try:
                        title = h.unescape(
                            util.replaceParts(
                                site['items'][level][pos]['name'],
                                matches[counter]).replace('\n', '').replace(
                                    '\t', '').lstrip().encode('utf-8'))
                    except:
                        title = ""
                    try:
                        url = util.replaceParts(
                            site['items'][level][pos]['url'],
                            matches[counter]).encode('utf-8')
                        #util.logError(url)
                    except:
                        url = ""
                    try:
                        poster = util.replaceParts(
                            site['items'][level][pos]['poster'],
                            matches[counter]).encode('utf-8')
                    except:
                        poster = ""
                    try:
                        fanart = util.replaceParts(
                            site['items'][level][pos]['fanart'],
                            matches[counter]).encode('utf-8')
                    except:
                        fanart = ""
                    try:
                        plot = util.replaceParts(
                            site['items'][level][pos]['plot'],
                            matches[counter]).encode('utf-8')
                    except:
                        plot = ""

                    if isFolder:
                        menu.append({
                            "title": title,
                            "url": url,
                            "mode": newMode,
                            "poster": poster,
                            "icon": poster,
                            "fanart": fanart,
                            "type": ADDON_TYPE,
                            "plot": plot,
                            "isFolder": isFolder,
                            "extras": str(extras)
                        })
                    else:
                        menu.append({
                            "title": title,
                            "url": url,
                            "mode": newMode,
                            "poster": poster,
                            "icon": poster,
                            "fanart": fanart,
                            "type": ADDON_TYPE,
                            "plot": plot,
                            "isFolder": isFolder,
                            "isPlayable": "True",
                            "extras": str(extras)
                        })
                    counter = counter + 1
            try:
                regex = util.prepare(site['items'][level][pos]['next_pattern'])
                matches = re.findall(regex, next)
                if matches:
                    parts = []
                    """for match in matches:
                        parts.append(match)"""

                    if len(matches) > 1:
                        for match in matches:
                            parts.append(match)
                        else:
                            match = matches

                    #nextlink=util.execPy(util.replaceParts(site['items'][level][pos]['next_url'], match))
                    nextlink = util.replaceParts(
                        site['items'][level][pos]['next_url'], match)
                    menu.append({
                        "title": "Next Page >",
                        "url": nextlink,
                        "mode": "2",
                        "poster": "",
                        "icon": "",
                        "fanart": "",
                        "type": ADDON_TYPE,
                        "plot": plot,
                        "isFolder": True,
                        "extras": str(extras)
                    })
            except:
                pass
            util.addMenuItems(menu)
        else:
            return False
    elif mode == 4:
        # show relevant Tag video results
        menu = []

        extras = ast.literal_eval(parameters['extras'])

        try:
            extras['site_xml'] = offsite['site_xml']
        except:
            pass

        bits = util.getFile(feed, type)

        site = bits['sites'][extras['site']]['tags'][extras['tag']]

        page = util.get(parameters['url'])
        next = page

        try:
            if site['item']['global'] != "":
                regex = util.prepare(site['item']['global'])
                matches = re.findall(regex, page)
                if matches:
                    page = matches[0]
        except:
            pass

        regex = util.prepare(site['item']['pattern'])
        matches = re.findall(regex, page)
        if matches:
            counter = 0
            for match in matches:
                try:
                    title = h.unescape(
                        util.replaceParts(site['item']['name'],
                                          matches[counter]).encode('utf-8'))
                except:
                    title = ""
                try:
                    url = util.replaceParts(site['item']['url'],
                                            matches[counter]).encode('utf-8')
                except:
                    url = ""
                try:
                    poster = util.replaceParts(
                        site['item']['poster'],
                        matches[counter]).encode('utf-8')
                except:
                    poster = ""
                try:
                    fanart = util.replaceParts(
                        site['item']['fanart'],
                        matches[counter]).encode('utf-8')
                except:
                    fanart = ""
                try:
                    plot = util.replaceParts(site['item']['plot'],
                                             matches[counter]).encode('utf-8')
                except:
                    plot = ""

                menu.append({
                    "title": title,
                    "url": url,
                    "mode": "2",
                    "poster": poster,
                    "icon": poster,
                    "fanart": fanart,
                    "type": ADDON_TYPE,
                    "plot": plot,
                    "isFolder": True,
                    "extras": extras
                })
                counter = counter + 1
        util.addMenuItems(menu)
    elif mode == 5:
        pass
    elif mode == 111:
        # find playable sources in url
        #util.alert(parameters['url'])

        extras = ast.literal_eval(parameters['extras'])
        bits = util.getFile(feed, type)
        site = bits['sites'][extras['site']]

        try:
            pos = extras['pos']
        except:
            pos = 0

        try:
            selected_video = int(
                site['items'][extras['level']][pos]['play_media']) - 1
        except:
            selected_video = 0

        page = util.get(parameters['url'])

        link = False
        try:
            link = urlresolver.resolve(parameters['url'])
        except Exception as e:
            if str(e).lower() == "sign in to confirm your age":
                util.notify("YouTube Error: Login to confirm age.")
                return False
            else:
                util.notify(str(e))
                return False

        if link:
            # play if url resolver reports true
            util.playMedia(parameters['name'],
                           parameters['poster'],
                           link,
                           force=True)
        elif any(ext in parameters['url'] for ext in filetypes):
            # play if url has a video extension
            util.playMedia(parameters['name'],
                           parameters['poster'],
                           parameters['url'],
                           force=True)
        else:
            #search for video urls
            if "urlresolver" in site and site['urlresolver'].lower(
            ) == "false":
                regex = "\"([^\s]*?\.(:?" + "|".join(filetypes) + "))\""
                matches = re.findall(regex, page)
            else:
                regex = "(\/\/.*?\/embed.*?)[\?\"]"
                matches = re.findall(regex, page)
                regex = "\"((?:http:|https:)?\/\/.*?\/watch.*?)[\"]"
                matches = matches + re.findall(regex, page)
                matches2 = urlresolver.scrape_supported(page)
                #util.alert(str(matches))
                """regex="\"(https?://("+"|".join(supports)+")\..*?)\""
                matches2 = re.findall(regex, page)
                regex="\"((?:http:|https:)?\/\/.*?\/watch.*?)[\"]"
                matches3 = re.findall(regex, page)
                regex = 'https?://(.*?(?:\.googlevideo|(?:plus|drive|get|docs)\.google|google(?:usercontent|drive|apis))\.com)/(.*?(?:videoplayback\?|[\?&]authkey|host/)*.+)'
                matches4 = re.findall(regex, page)
                
                matches2=[ x for x in matches2 if any(sup in x for sup in supports) ]
                matches3=[ x for x in matches3 if any(sup in x for sup in supports) ]"""

                matches = matches + matches2
            util.logError(
                "''''''''''''''''''''''''''''''''''''''''''''''''''''''")
            util.logError(">>>>" + str(matches))
            if isinstance(matches[selected_video], tuple):
                url = matches[selected_video][0]
            else:
                url = matches[selected_video]
            #util.alert(url)
            if "http" not in url:
                url = "http:" + url

            link = urlresolver.resolve(url)

            if link == False:
                link = url

            util.playMedia(parameters['name'], parameters['poster'], link)

    elif mode == 112:
        extras = ast.literal_eval(parameters['extras'])
        bits = util.getFile(feed, type)
        site = bits['sites'][extras['site']]

        page = util.get(parameters['url'])
        """if "urlresolver" in site and site['urlresolver'].lower()=="false":
            regex="\"(.*?\.mp4)\""
            matches = re.findall(regex, page)
            if matches:
                link=matches[0]
        else:"""
        regex = "\"(//\S*?(:?" + ("|".join(filetypes)) + ")\S*?)\""
        matches = re.findall(regex, page)
        if matches:
            url = matches[selected_video][0]
            if "http" not in url:
                link = "http:" + url
        else:
            link = urlresolver.resolve(parameters['url'])
            if not link:
                try:
                    regex = "(\/\/.*?\/embed.*?)[\?\"]"
                    matches = re.findall(regex, page)
                    regex = "\"((?:http:|https:)?\/\/.*?\/watch.*?)[\"]"
                    matches = matches + re.findall(regex, page)
                    regex = 'https?://(.*?(?:\.googlevideo|(?:plus|drive|get|docs)\.google|google(?:usercontent|drive|apis))\.com)/(.*?(?:videoplayback\?|[\?&]authkey|host/)*.+)'
                    matches = matches + re.findall(regex, page)
                    if matches:
                        matches = [
                            x for x in matches
                            if any(sup in x for sup in supports)
                        ]
                        if matches:
                            link = urlresolver.resolve("http:" + matches[0])
                except Exception as e:
                    util.notify(str(e))
        if link:
            import downloader
            downloader.download(
                link,
                os.path.join(xbmcaddon.Addon().getSetting('folder'),
                             parameters['name'] + ".mp4"))
        else:
            util.notify("No video found")
    elif mode == 113:
        menu = []
        extras = ast.literal_eval(parameters['extras'])
        bits = util.getFile(feed, type)
        site = bits['sites'][extras['site']]

        page = util.get(parameters['url'])

        matches = urlresolver.scrape_supported(page)
        #regex="(//\S*?(:?"+("|".join(filetypes))+")\S*?)"
        #matches2 = re.findall(regex, page)
        """regex="(\/\/.*?\/embed.*?)[\?\"]"
        matches2 = re.findall(regex, page)
        regex="\"(https?://("+"|".join(supports)+")\..*?)\""
        matches3 = re.findall(regex, page)
        regex = 'https?://(.*?(?:\.googlevideo|(?:plus|drive|get|docs)\.google|google(?:usercontent|drive|apis))\.com)/(.*?(?:videoplayback\?|[\?&]authkey|host/)*.+)'
        matches4 = re.findall(regex, page)
        
        matches2=[ x for x in matches2 if any(sup in x for sup in supports) ]
        matches3=[ x for x in matches3 if any(sup in x for sup in supports) ]
        
        matches=matches+matches2+matches3+matches4"""

        unique = []
        for match in matches:  #+matches2:
            if isinstance(match, tuple):
                unique.append(match[0])
            else:
                unique.append(match)

        matches = list(set(unique))

        if matches:
            for match in matches:
                if "http" not in match:
                    rl = "http:" + match
                else:
                    rl = match

                menu.append({
                    "title": rl,
                    "url": rl,
                    "mode": "114",
                    "poster": parameters['poster'],
                    "icon": parameters['icon'],
                    "fanart": parameters['fanart'],
                    "type": "",
                    "plot": "",
                    "isFolder": False,
                    "isPlayable": False,
                    "extras": str(extras)
                })
            util.addMenuItems(menu)
    elif mode == 114:
        # find playable sources in url
        #util.alert(parameters['url'])
        urlresolver.relevant_resolvers()
        try:
            link = urlresolver.resolve(str(parameters['url']))
        except Exception as e:
            util.notify(str(e))
            exit()
        if link:
            try:
                util.playMedia(parameters['name'], parameters['poster'], link)
            except:
                util.playMedia(parameters['name'], parameters['poster'],
                               parameters['url'])
Exemple #50
0
 def poll(self, timestamp, filters):
     try:
         logging.info('Polling feed "%s"' % self.url)
         result = []
         self.last_poll = timestamp
         username = util.decode_password(self.username)
         password = util.decode_password(self.password)
         d = util.parse(self.url, username, password, self.etag, self.modified)
         self.etag = util.get(d, 'etag', None)
         self.modified = util.get(d, 'modified', None)
         feed = util.get(d, 'feed', None)
         if feed:
             self.title = self.title or util.get(feed, 'title', '')
             self.link = self.link or util.get(feed, 'link', self.url)
         entries = util.get(d, 'entries', [])
         for entry in reversed(entries):
             found = False
             id = create_id(entry)
             for i in self.id_set:
                 # be cheap, compare the links - they should be unique. 
                 # Can't compare objects anymore because the time is present which will always be different
                 if id[1] == i[1]: 
                     found = True
                     break
             if found:
                 continue # The entry is not new, move on to the next one.
                 
             self.id_set.add(id)
             
             self.item_count += 1
             item = Item(self, id)
             item.timestamp = calendar.timegm(util.get(entry, 'date_parsed', time.gmtime()))
             item.title = util.format(util.get(entry, 'title', ''), settings.POPUP_TITLE_LENGTH)
             item.description = util.format(util.get(entry, 'description', ''), settings.POPUP_BODY_LENGTH)
             item.link = util.get(entry, 'link', '')
             item.author = util.format(util.get(entry, 'author', '')) # TODO: max length
             if all(filter.filter(item) for filter in filters):
                 result.append(item)
             
         # determine if there are any entries in self.id_set that are older than CACHE_AGE_LIMIT
         # if there are, remove them from the list (they aged out of the cache)
         idsToRemove = set()
         now = datetime.fromtimestamp(time.time())
         for tempId in self.id_set:
             diff = now - datetime.fromtimestamp(tempId[3])
             if abs(diff.days) > settings.CACHE_AGE_LIMIT:
                 idsToRemove.add(tempId) # can't modify seld.id_set right here.
         for i in idsToRemove:
             logging.info('Removing %s because its too old' % i[1])
             self.id_set.remove(i)
         
         return result
     except Exception, e:
         logging.info('Error durring poll: %s' % e)
         raise        
Exemple #51
0
def create_id(entry):
    keys = ['id', 'link', 'title']
    # 4th param is the time used for determing age of the cached item in our local cache
    values = util.get(entry,keys[0],None), util.get(entry,keys[1],None),util.get(entry,keys[2],None),time.time()
    return values if any(values) else uuid.uuid4().hex
Exemple #52
0
def write_summary_table(scenario_name, instance):
    m = instance

    # note: this gets overwritten after every solve. It would be more efficient to
    # write it only after the last solve, but there's no hook in phsolverserver for that
    # (it may not even be called at that point)
    make_outputs_dir()

    # the build variables may be pinned to match a particular file;
    # if so, we use that as part of the filename
    build_file = os.environ.get('PHA_FIX_BUILD_VARS_FILE')
    if build_file is None:
        build_tag = ""
    else:
        # build_file typically looks like
        # "outputs/build_JJJJJJ_xhat.tsv" or "outputs/build_JJJJJJ_iter0_Scenario_nnnn.tsv"
        # we shorten this to tag like "bJJJJJJ_xhat" or "bJJJJJJ_iter0_Scenario_nnnn"
        # before prepending the current job id and appending the evaluated scenario ID.
        if os.path.dirname(build_file) == "outputs":
            # remove outputs dir if specified, otherwise keep dir name in there
            build_tag = os.path.basename(build_file)
        else:
            # remove path separators from the build file name
            build_tag = build_file.replace(os.sep, '_')
        if build_tag.startswith("build_"):
            build_tag = 'b' + build_tag[6:]  # shorten "build_" to "b"
        if build_tag.endswith(".tsv"):
            build_tag = build_tag[:-4]
        build_tag = "_" + build_tag

        output_file = os.path.join(
            "outputs", "summary_{}{}_{}.tsv".format(jobid, build_tag,
                                                    scenario_name))

    print "writing {}...".format(output_file)

    period_duration = {
        pe: sum(m.tp_weight[tp] for tp in m.PERIOD_TPS[pe])
        for pe in m.PERIODS
    }

    values = []
    demand_components = [
        c for c in ('lz_demand_mw', 'DemandResponse', 'ChargeEVs')
        if hasattr(m, c)
    ]

    #  total cost / kWh generated in each period
    # (both discounted to today, so the discounting cancels out)
    values.extend([
        ("cost_per_kwh", pe, m.SystemCostPerPeriod[pe] /
         sum(m.bring_timepoint_costs_to_base_year[tp] * 1000.0 * sum(
             getattr(m, c)[lz, tp] for c in demand_components
             for lz in m.LOAD_ZONES) for tp in m.PERIOD_TPS[pe]))
        for pe in m.PERIODS
    ])
    # Renewable energy share
    if hasattr(m, 'RPSEligiblePower'):
        # total renewable share over all periods
        values.extend([("renewable_share", pe,
                        m.RPSEligiblePower[pe] / m.RPSTotalPower[pe])
                       for pe in m.PERIODS])
    # average production from each fuel during each period
    values.extend([
        (f, pe,
         sum(
             get(m.DispatchProjByFuel, (pr, tp, f), 0.0) * m.tp_weight[tp]
             for pr in m.PROJECTS_BY_FUEL[f]
             for tp in m.PERIOD_TPS[pe]) / period_duration[pe])
        for f in m.FUELS for pe in m.PERIODS
    ])
    # total production from each non-fuel source
    values.extend([(s, pe,
                    sum(
                        get(m.DispatchProj, (pr, tp), 0.0) * m.tp_weight[tp]
                        for pr in m.PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s]
                        for tp in m.PERIOD_TPS[pe]) / period_duration[pe])
                   for s in m.NON_FUEL_ENERGY_SOURCES for pe in m.PERIODS])
    # curtailments
    values.extend([("curtail_" + s, pe,
                    sum((get(m.DispatchUpperLimit, (pr, tp), 0.0) -
                         get(m.DispatchProj, (pr, tp), 0.0)) * m.tp_weight[tp]
                        for pr in m.PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s]
                        for tp in m.PERIOD_TPS[pe]) / period_duration[pe])
                   for s in m.NON_FUEL_ENERGY_SOURCES for pe in m.PERIODS])
    # all LZ_Energy_Components
    values.extend([(component, pe,
                    sum(
                        getattr(m, component)[lz, tp] * m.tp_weight[tp]
                        for lz in m.LOAD_ZONES
                        for tp in m.PERIOD_TPS[pe]) / period_duration[pe])
                   for component in m.LZ_Energy_Components_Produce
                   for pe in m.PERIODS])
    values.extend([(component, pe,
                    sum(
                        getattr(m, component)[lz, tp] * m.tp_weight[tp]
                        for lz in m.LOAD_ZONES
                        for tp in m.PERIOD_TPS[pe]) / period_duration[pe])
                   for component in m.LZ_Energy_Components_Consume
                   for pe in m.PERIODS])

    with open(output_file, 'w') as f:
        f.writelines("\t".join((key, str(per), str(value(val)))) + "\n"
                     for (key, per, val) in values)
def getCarPersonMappings(ignore):
    resp = util.get(settings.getCarPersonUrl(),"admin","admin")
    print (resp)
    return resp
Exemple #54
0
def define_components(m):
    """

    """
    ###################
    # RPS calculation
    ##################

    m.f_rps_eligible = Param(m.FUELS, within=Binary)

    m.RPS_ENERGY_SOURCES = Set(initialize=lambda m:
        [s for s in m.NON_FUEL_ENERGY_SOURCES if s != 'Battery'] + [f for f in m.FUELS if m.f_rps_eligible[f]]
    )

    m.RPS_YEARS = Set(ordered=True)
    m.rps_target = Param(m.RPS_YEARS)

    def rps_target_for_period_rule(m, p):
        """find the last target that is in effect before the _end_ of the period"""
        latest_target = max(y for y in m.RPS_YEARS if y < m.period_start[p] + m.period_length_years[p])
        return m.rps_target[latest_target]
    m.rps_target_for_period = Param(m.PERIODS, initialize=rps_target_for_period_rule)

    # maximum share of (bio)fuels in rps
    # note: using Infinity as the upper limit causes the solution to take forever
    # m.rps_fuel_limit = Param(default=float("inf"), mutable=True)
    m.rps_fuel_limit = Param(initialize=m.options.biofuel_limit, mutable=True)

    # calculate amount of pre-existing capacity in each generation project;
    # used when we want to restrict expansion
    m.gen_pre_existing_capacity = Expression(
        m.GENERATION_PROJECTS,
        rule=lambda m, g: (
            m.GenCapacity[g, m.PERIODS.first()]
            - get(m.BuildGen, (g, m.PERIODS.first()), 0)
        )
    )

    # Define DispatchGenRenewableMW, which shows the amount of power produced
    # by each project from each fuel during each time step.
    define_DispatchGenRenewableMW(m)

    # calculate amount of power produced from renewable fuels during each period
    m.RPSFuelPower = Expression(m.PERIODS, rule=lambda m, per:
        sum(
            m.DispatchGenRenewableMW[g, tp] * m.tp_weight[tp]
            for g in m.FUEL_BASED_GENS
            for tp in m.TPS_FOR_GEN_IN_PERIOD[g, per]
        )
    )

    # Note: this rule ignores pumped hydro and batteries, so it could be gamed by producing extra
    # RPS-eligible power and burning it off in storage losses; on the other hand,
    # it also neglects the (small) contribution from net flow of pumped hydro projects.
    # TODO: incorporate pumped hydro into this rule, maybe change the target to refer to
    # sum(getattr(m, component)[z, t] for z in m.LOAD_ZONES) for component in m.Zone_Power_Injections)

    # power production that can be counted toward the RPS each period
    m.RPSEligiblePower = Expression(m.PERIODS, rule=lambda m, per:
        m.RPSFuelPower[per]
        +
        sum(
            m.DispatchGen[g, tp] * m.tp_weight[tp]
            for f in m.NON_FUEL_ENERGY_SOURCES if f in m.RPS_ENERGY_SOURCES
            for g in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[f]
            for tp in m.TPS_FOR_GEN_IN_PERIOD[g, per]
        )
    )

    # total power production each period (against which RPS is measured)
    # note: we exclude production from storage
    m.RPSTotalPower = Expression(m.PERIODS, rule=lambda m, per:
        sum(
            m.DispatchGen[g, tp] * m.tp_weight[tp]
            for g in m.GENERATION_PROJECTS if g not in getattr(m, 'STORAGE_GENS', [])
            for tp in m.TPS_FOR_GEN_IN_PERIOD[g, per]
        )
    )

    if m.options.rps_level == 'activate':
        # we completely skip creating the constraint if the RPS is not activated.
        # this makes it easy for other modules to check whether there's an RPS in effect
        # (if we deactivated the RPS after it is constructed, then other modules would
        # have to postpone checking until then)
        m.RPS_Enforce = Constraint(m.PERIODS, rule=lambda m, per:
            m.RPSEligiblePower[per] >= m.rps_target_for_period[per] * m.RPSTotalPower[per]
        )
    elif m.options.rps_level == 'no_new_renewables':
        # prevent construction of any new exclusively-renewable projects, but allow
        # replacement of existing ones
        # (doesn't ban use of biofuels in existing or multi-fuel projects, but that could
        # be done with --biofuel-limit 0)
        m.No_New_Renewables = Constraint(m.NEW_GEN_BLD_YRS, rule=lambda m, g, bld_yr:
            (m.GenCapacity[g, bld_yr] <= m.gen_pre_existing_capacity[g])
            if m.gen_energy_source[g] in m.RPS_ENERGY_SOURCES
            else Constraint.Skip
        )

    wind_energy_sources = {'WND'}
    if m.options.rps_no_new_wind:
        # limit wind to existing capacity
        m.No_New_Wind = Constraint(m.NEW_GEN_BLD_YRS, rule=lambda m, g, bld_yr:
            (m.GenCapacity[g, bld_yr] <= m.gen_pre_existing_capacity[g])
            if m.gen_energy_source[g] in wind_energy_sources
            else Constraint.Skip
        )
    if m.options.rps_no_wind:
        # don't build any new capacity or replace existing
        m.No_Wind = Constraint(m.NEW_GEN_BLD_YRS, rule=lambda m, g, bld_yr:
            (m.BuildGen[g, bld_yr] == 0.0)
            if m.gen_energy_source[g] in wind_energy_sources
            else Constraint.Skip
        )

    if m.options.rps_prefer_dist_pv:
        m.DIST_PV_GENS = Set(initialize=lambda m: [
            g for g in m.GENS_BY_NON_FUEL_ENERGY_SOURCE['SUN']
            if 'DistPV' in m.gen_tech[g]
        ])
        m.LARGE_PV_GENS = Set(initialize=lambda m: [
            g for g in m.GENS_BY_NON_FUEL_ENERGY_SOURCE['SUN']
            if g not in m.DIST_PV_GENS
        ])
        # LargePVAllowed must be 1 to allow large PV to be built
        m.LargePVAllowed = Var(m.PERIODS, within=Binary) #
        # LargePVAllowed can only be 1 if 90% of the available rooftop PV has been built
        m.Set_LargePVAllowed = Constraint(
            m.PERIODS,
            rule=lambda m, p:
                sum(m.GenCapacity[g, p] for g in m.DIST_PV_GENS)
                >=
                m.LargePVAllowed[p]
                * 0.9
                * sum(m.gen_capacity_limit_mw[g] for g in m.DIST_PV_GENS)
        )
        m.Apply_LargePVAllowed = Constraint(
            m.LARGE_PV_GENS, m.PERIODS,
            rule=lambda m, g, p:
                m.GenCapacity[g, p]
                <=
                m.LargePVAllowed[p] * m.gen_capacity_limit_mw[g]
                + m.gen_pre_existing_capacity[g]
        )

    # Don't allow (bio)fuels to provide more than a certain percentage of the system's energy
    # Note: when the system really wants to use more biofuel, it is possible to "game" this limit by
    # cycling power through batteries, pumped storage, transmission lines or the hydrogen system to
    # burn off some
    # extra non-fuel energy, allowing more biofuel into the system. (This doesn't typically happen
    # with batteries due to high variable costs -- e.g., it has to cycle 4 kWh through a battery to
    # consume 1 kWh of non-biofuel power, to allow 0.05 kWh of additional biofuel into the system.
    # Even if this can save $0.5/kWh, if battery cycling costs $0.15/kWh, that means $0.60 extra to
    # save $0.025. It also doesn't happen in the hydrogen scenario, since storing intermittent power
    # directly as hydrogen can directly displace biofuel consumption. But it could happen if batteries
    # have low efficiency or low cycling cost, or if transmission losses are significant.)
    # One solution would be to only apply the RPS to the predefined load (not generation), but then
    # transmission and battery losses could be served by fossil fuels.
    # Alternatively: limit fossil fuels to (1-rps) * standard loads
    # and limit biofuels to (1-bio)*standard loads. This would force renewables to be used for
    # all losses, which is slightly inaccurate.
    # TODO: fix the problem noted above; for now we don't worry too much because there are no
    # transmission losses, the cycling costs for batteries are too high and pumped storage is only
    # adopted on a small scale.

    m.RPS_Fuel_Cap = Constraint(m.PERIODS, rule = lambda m, per:
        m.RPSFuelPower[per] <= m.rps_fuel_limit * m.RPSTotalPower[per]
    )
Exemple #55
0
 def get(self, path):
     return util.get(path, self.__state)
Exemple #56
0
def getCars(ignore):
    """Uses the GET on car:cars resource to get all cars in the store using RESTCONF"""
    resp = util.get(settings.getCarsUrl(), "admin", "admin")
    print(resp)
    return resp
Exemple #57
0
def create_id(entry):
    keys = ['id', 'link', 'title']
    values = tuple(util.get(entry, key, None) for key in keys)
    return values if any(values) else uuid.uuid4().hex
 def __init__(self, data):
     data = data["flight"]
     self.id = util.get(data, ["identification", "row"])
     self.flight_num = util.get(data,
                                ["identification", "number", "default"])
     self.status_detail = util.get(data, ["status", "text"])
     self.aircraft_code = util.get(data, ["aircraft", "model", "code"])
     self.aircraft_text = util.get(data, ["aircraft", "model", "text"])
     self.aircraft_reg = util.get(data, ["aircraft", "registration"])
     self.aircraft_co2 = util.get(data, ["aircraft", "country", "alpha2"])
     self.aircraft_restr = util.get(data, ["aircraft", "restricted"])
     self.owner = util.get(data, ["owner", "name"])
     self.owner_iata = util.get(data, ["owner", "code", "iata"])
     self.owner_icao = util.get(data, ["owner", "code", "icao"])
     self.airline = util.get(data, ["airline", "name"])
     self.airline_iata = util.get(data, ["airline", "code", "iata"])
     self.airline_icao = util.get(data, ["airline", "code", "icao"])
     self.origin_offset = util.get(
         data, ["airport", "origin", "timezone", "offset"])
     self.origin_offset_abbr = util.get(
         data, ["airport", "origin", "timezone", "abbr"])
     self.origin_offset_dst = util.get(
         data, ["airport", "origin", "timezone", "isDst"])
     self.origin_terminal = util.get(
         data, ["airport", "origin", "info", "terminal"])
     self.origin_gate = util.get(data,
                                 ["airport", "origin", "info", "gate"])
     self.dest_iata = util.get(data,
                               ["airport", "destination", "code", "iata"])
     self.dest_icao = util.get(data,
                               ["airport", "destination", "code", "icao"])
     self.dest_name = util.get(data, ["airport", "destination", "name"])
     self.dest_city = util.get(
         data, ["airport", "destination", "position", "region", "city"])
     self.dest_country = util.get(
         data, ["airport", "destination", "position", "country", "name"])
     self.dest_country_code = util.get(
         data, ["airport", "destination", "position", "country", "code"])
     self.dest_offset = util.get(
         data, ["airport", "destination", "timezone", "offset"])
     self.dest_offset_abbr = util.get(
         data, ["airport", "destination", "timezone", "abbr"])
     self.dest_offset_dst = util.get(
         data, ["airport", "destination", "timezone", "isDst"])
     self.dest_terminal = util.get(
         data, ["airport", "destination", "info", "terminal"])
     self.dest_gate = util.get(data,
                               ["airport", "destination", "info", "gate"])
     self.sched_dep = util.get(data, ["time", "scheduled", "departure"])
     self.sched_arr = util.get(data, ["time", "scheduled", "arrival"])
     self.real_dep = util.get(data, ["time", "real", "departure"])
     self.real_arr = util.get(data, ["time", "real", "arrival"])
     self.dest_lat = util.get(
         data, ["airport", "destination", "position", "latitude"])
     self.dest_lng = util.get(
         data, ["airport", "destination", "position", "longitude"])
Exemple #59
0
def get_by_names():
    """
One of the main ``clusto`` operations. Parameters:

* Required parameter: At least one ``name`` parameter

Returns ``HTTP: 404`` when all entites requested do not exist and
``HTTP: 206`` when a percent of entities requested do not exist.

Examples:

.. code:: bash

    $ ${get} ${server_url}/by-names
    "Provide at least one name to get data from"
    HTTP: 412
    Content-type: application/json

    $ ${get} -d 'name=nonserver' ${server_url}/by-names
    [
        null
    ]
    HTTP: 404
    Content-type: application/json

    $ ${get} -d 'name=testserver1' -d 'name=nonserver' ${server_url}/by-names
    [
        "/basicserver/testserver1",
        null
    ]
    HTTP: 206
    Content-type: application/json

    $ ${get} -H 'Clusto-Mode: expanded' -d 'name=testserver1' -d 'name=testserver2' ${server_url}/by-names
    [
        {
            "attrs": [],
            "contents": [],
            "driver": "basicserver",
            "name": "testserver1",
            "parents": [
                "/pool/singlepool",
                "/pool/multipool"
            ]
        },
        {
            "attrs": [],
            "contents": [],
            "driver": "basicserver",
            "name": "testserver2",
            "parents": [
            "/pool/multipool"
            ]
        }
    ]
    HTTP: 200
    Content-type: application/json

    $ ${get} -d 'name=nonserver1' -d 'name=nonserver2' ${server_url}/by-names
    [
        null,
        null
    ]
    HTTP: 404
    Content-type: application/json

"""

    objs = []
    names = bottle.request.params.getall('name')
    if not names:
        return util.dumps('Provide at least one name to get data from', 412)

    mode = bottle.request.headers.get('Clusto-Mode', default='compact')
    for name in names:
        obj, status, msg = util.get(name)
        try:
            objs.append(util.show(obj, mode) if obj else None)
        except TypeError as te:
            return util.dumps('%s' % (te,), 409)

    return util.dumps(objs, 200 if all(objs) else 206 if any(objs) else 404)
Exemple #60
0
def symmetry(con, threshold=75):
    data = {}

    cur = con.cursor()
    cur.execute(
        "SELECT from_station_name, to_station_name, count(trip_id) num "
        "FROM trips WHERE from_station_name != to_station_name GROUP BY from_station_name, to_station_name HAVING num >= ?",
        (threshold, ))
    while True:
        rows = cur.fetchmany(1000)
        if not rows: break
        for row in rows:
            put_val(data, (row["from_station_name"], row["to_station_name"]),
                    "num", row["num"])

    print "have %d routes with at least %d trips" % (len(data), threshold)

    # drop pairs where either direction has less than the given threshold
    for pair in list(data):
        rev_pair = tuple(reversed(pair))
        if (get(data, pair, "num", leaf=0) <= threshold or rev_pair not in data
                or get(data, rev_pair, "num", leaf=0) <= threshold):
            del data[pair]
            continue

    print "have %d routes remaining after filtering for reverse trip meeting threshold" % len(
        data)

    i = 0
    for pair in data:
        durs = []
        cur.execute(
            "SELECT trip_duration FROM trips WHERE from_station_name = ? "
            "AND to_station_name = ?", pair)
        while True:
            rows = cur.fetchmany(1000)
            if not rows: break
            for row in rows:
                durs.append(row["trip_duration"])

        durs_a = array(durs)
        put_val(data, pair, "mean", mean(durs_a))
        put_val(data, pair, "median", median(durs_a))
        put_val(data, pair, "std", std(durs_a))

        #print pair, data[pair]
        i += 1
        if i % 100 == 0:
            print "getting duration stats; %d routes remaining" % (len(data) -
                                                                   i)

    cur.close()

    plot_data = {}

    done = set()
    for pair in data:
        rev_pair = tuple(reversed(pair))
        if rev_pair in done: continue
        done.add(rev_pair)

        for param in ["num", "mean", "median", "std"]:
            val1 = get(data, pair, param)
            val2 = get(data, rev_pair, param)

            put_list(plot_data, param, "x", min(val1, val2))
            put_list(plot_data, param, "y", max(val1, val2))

    for param in ["num", "mean", "median", "std"]:
        fig = plt.figure()
        xs = plot_data[param]["x"]
        ys = plot_data[param]["y"]
        plt.scatter(xs, ys)
        plt.xlabel("min(%s)" % param)
        plt.ylabel("max(%s)" % param)
        plt.savefig("symmetry_%s.pdf" % param, bbox_inches="tight")