Example #1
0
 def get_user(self):
     request = self.request
     user = None
     if request.vars.connection_token:
         auth_url = "https://%s.api.oneall.com/connections/%s.json"  % \
                    (self.domain, request.vars.connection_token)
         auth_pw = "%s:%s" % (self.public_key, self.private_key)
         auth_pw = base64.b64encode(auth_pw)
         headers = dict(Authorization="Basic %s" % auth_pw)
         try:
             auth_info_json = fetch(auth_url, headers=headers)
             auth_info = json.loads(auth_info_json)
             data = auth_info['response']['result']['data']
             if data['plugin']['key'] == 'social_login':
                 if data['plugin']['data']['status'] == 'success':
                     userdata = data['user']
                     self.profile = userdata['identity']
                     source = self.profile['source']['key']
                     mapping = self.mappings.get(source,
                                                 self.mappings['default'])
                     user = mapping(self.profile)
         except (JSONDecodeError, KeyError):
             pass
         if user is None and self.on_login_failure:
             redirect(self.on_login_failure)
     return user
Example #2
0
File: cpe.py Project: v0re/Kvasir
def process_xml(filename=None, download=False, wipe=False):
    """
    Process the CPE data through an uploaded file or have it download directly
    from the MITRE webserver
    """
    try:
        from lxml import etree
    except ImportError:
        try:
            import xml.etree.cElementTree as etree
        except ImportError:
            try:
                import xml.etree.ElementTree as etree
            except:
                raise Exception("Unable to find valid ElementTree module.")

    if download:
        # grab cpe data from http://static.nvd.nist.gov/feeds/xml/cpe/dictionary/official-cpe-dictionary_v2.2.xml
        from gluon.tools import fetch
        import sys
        try:
            logger.info(
                "Downloading http://static.nvd.nist.gov/feeds/xml/cpe/dictionary/official-cpe-dictionary_v2.3.xml.gz... Please wait..."
            )
            gz_cpedata = fetch(
                'http://static.nvd.nist.gov/feeds/xml/cpe/dictionary/official-cpe-dictionary_v2.3.xml.gz'
            )
            logger.info("Download complete. %s bytes received" %
                        (sys.getsizeof(gz_cpedata)))
        except Exception, e:
            raise Exception("Error downloading CPE XML file: %s" % (e))
Example #3
0
def up_man(order):
    if order.tab != 'men':
        return 'not tab == "men"'

    # это оплата повышения уровня
    man = db.men[order.ref_id]
    if not man:
        return 'not found man'

    if man.up_order != order.id:
        return 'up_order != order.id'

    from gluon.tools import fetch
    url = LITEcash.url + LITEcash.check % ('%s.%s' %
                                           (order.bill_id, order.skey))
    #print url
    resp = fetch(url)
    #print resp
    import gluon.contrib.simplejson as sj
    res = sj.loads(resp)  # {'bill': bill_id }
    err = res.get('error')
    if err:
        return '%s' % err

    status = res['status']
    if status == 'CLOSED':
        from decimal import Decimal
        # обновление только 1 раз при наступлении статуса и сразу удалим заказ
        man.update_record(up_order=None, trust=man.trust + Decimal(1))
    elif status in ['EXPIRED', 'INVALID']:
        # забудем это заказ
        man.update_record(up_order=None)

    order.update_record(status=status)
    return status
def fetch_current_TNRS_context_names(request):
    try:
        # fetch the latest contextName values as JSON from remote site
        from gluon.tools import fetch
        import simplejson

        method_dict = get_opentree_services_method_urls(request)
        fetch_url = method_dict['getContextsJSON_url']
        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "http:%s" % fetch_url

        # as usual, this needs to be a POST (pass empty fetch_args)
        contextnames_response = fetch(fetch_url, data='')

        contextnames_json = simplejson.loads(contextnames_response)
        # start with LIFE group (incl. 'All life'), and add any other ordered suggestions
        ordered_group_names = unique_ordered_list(
            ['LIFE', 'PLANTS', 'ANIMALS'] + [g for g in contextnames_json])
        context_names = []
        for gname in ordered_group_names:
            # allow for eventual removal or renaming of expected groups
            if gname in contextnames_json:
                context_names += [
                    n.encode('utf-8') for n in contextnames_json[gname]
                ]

        # draftTreeName = str(ids_json['draftTreeName']).encode('utf-8')
        return (context_names)

    except Exception, e:
        # throw 403 or 500 or just leave it
        return ('ERROR', e.message)
Example #5
0
def update_data():
    data = loads_json(fetch(configuration.get('app.cases_data_source_url')))
    casos = data['casos']
    dias = casos['dias']

    # db.casos.fecha.requires = IS_DATE(format=('%Y/%m/%d'))
    # db.casos.arribo_a_cuba_foco.requires = IS_DATE(format=('%Y/%m/%d'))
    # db.casos.consulta_medico.requires = IS_DATE(format=('%Y/%m/%d'))

    for dia in dias.keys():
        fecha = dias[dia]['fecha']
        if 'diagnosticados' not in dias[dia]:
            continue
        diagnosticados_dia = dias[dia]['diagnosticados']
        for diagnosticado in diagnosticados_dia:
            diagnosticado['arribo_a_cuba_foco'] = diagnosticado['arribo_a_cuba_foco'].replace('/', '-') if diagnosticado['arribo_a_cuba_foco'] else diagnosticado['arribo_a_cuba_foco']
            diagnosticado['consulta_medico'] = diagnosticado['consulta_medico'].replace('/', '-') if diagnosticado['consulta_medico'] else diagnosticado['consulta_medico']

            diagnosticado['codigo'] = diagnosticado['id']
            del diagnosticado['id']
            diagnosticado['fecha'] = fecha.replace('/', '-') if fecha else fecha
            diagnosticado['dia'] = dia
            db.casos.update_or_insert(db.casos.codigo == diagnosticado['codigo'], **diagnosticado)
            # db.casos.insert(**diagnosticado)
    db.commit()
    return True


    # print('akakak', data['casos'])
Example #6
0
def _fetch_duplicate_study_ids(study_DOI=None, study_ID=None):
    # Use the oti (docstore index) service to see if there are other studies in
    # the collection with the same DOI; return the IDs of any duplicate studies
    # found, or an empty list if there are no dupes.
    if not study_DOI:
        # if no DOI exists, there are no known duplicates
        return [ ]
    oti_base_url = api_utils.get_oti_base_url(request)
    fetch_url = '%s/singlePropertySearchForStudies' % oti_base_url
    try:
        dupe_lookup_response = fetch(
            fetch_url,
            data={
                "property": "ot:studyPublication",
                "value": study_DOI,
                "exact": False
            }
        )
    except:
        raise HTTP(400, traceback.format_exc())
    dupe_lookup_response = unicode(dupe_lookup_response, 'utf-8') # make sure it's Unicode!
    response_json = anyjson.loads(dupe_lookup_response)
    duplicate_study_ids = [x['ot:studyId'] for x in response_json['matched_studies']]
    # Remove this study's ID; any others that remain are duplicates
    try:
        duplicate_study_ids.remove(study_ID)
    except ValueError:
        # ignore error, if oti is lagging and doesn't have this study yet
        pass
    return duplicate_study_ids
Example #7
0
def fetch_current_TNRS_context_names(request):
    try:
        # fetch the latest contextName values as JSON from remote site
        from gluon.tools import fetch
        import simplejson

        method_dict = get_opentree_services_method_urls(request)
        fetch_url = method_dict['getContextsJSON_url']
        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "http:%s" % fetch_url

        # as usual, this needs to be a POST (pass empty fetch_args)
        contextnames_response = fetch(fetch_url, data='')

        contextnames_json = simplejson.loads( contextnames_response )
        # start with LIFE group (incl. 'All life'), and add any other ordered suggestions
        ordered_group_names = unique_ordered_list(['LIFE','PLANTS','ANIMALS'] + [g for g in contextnames_json])
        context_names = [ ]
        for gname in ordered_group_names:
            # allow for eventual removal or renaming of expected groups
            if gname in contextnames_json:
                context_names += [n.encode('utf-8') for n in contextnames_json[gname] ]

        # draftTreeName = str(ids_json['draftTreeName']).encode('utf-8')
        return (context_names)

    except Exception, e:
        # throw 403 or 500 or just leave it
        return ('ERROR', e.message)
Example #8
0
 def get_user(self):
     request = self.request
     if request.vars.assertion:
         audience = self.audience
         issuer = self.issuer
         assertion = XML(request.vars.assertion, sanitize=True)
         verify_data = {"assertion": assertion, "audience": audience}
         auth_info_json = fetch(self.verify_url, data=verify_data)
         j = json.loads(auth_info_json)
         epoch_time = int(time.time() * 1000)  # we need 13 digit epoch time
         if (
             j["status"] == "okay"
             and j["audience"] == audience
             and j["issuer"].endswith(issuer)
             and j["expires"] >= epoch_time
         ):
             return dict(email=j["email"])
         elif self.on_login_failure:
             # print "status:  ", j["status"]=="okay", j["status"]
             # print "audience:", j["audience"]==audience, j["audience"], audience
             # print "issuer:  ", j["issuer"]==issuer, j["issuer"], issuer
             # print "expires:  ", j["expires"] >= epoch_time, j["expires"], epoch_time
             redirect(self.on_login_failure)
         else:
             redirect("https://login.persona.org")
     return None
Example #9
0
def process_xml(filename=None, download=False, wipe=False):
    """
    Process the CPE data through an uploaded file or have it download directly
    from the MITRE webserver
    """
    try:
        from lxml import etree
    except ImportError:
        try:
            import xml.etree.cElementTree as etree
        except ImportError:
            try:
                import xml.etree.ElementTree as etree
            except:
                raise Exception("Unable to find valid ElementTree module.")

    if download:
        # grab cpe data from http://static.nvd.nist.gov/feeds/xml/cpe/dictionary/official-cpe-dictionary_v2.2.xml
        from gluon.tools import fetch
        import sys
        try:
            logger.info("Downloading http://static.nvd.nist.gov/feeds/xml/cpe/dictionary/official-cpe-dictionary_v2.3.xml.gz... Please wait...")
            gz_cpedata = fetch('http://static.nvd.nist.gov/feeds/xml/cpe/dictionary/official-cpe-dictionary_v2.3.xml.gz')
            logger.info("Download complete. %s bytes received" % (sys.getsizeof(gz_cpedata)))
        except Exception, e:
            raise Exception("Error downloading CPE XML file: %s" % (e))
Example #10
0
def from_livecoin(db, exchg):
    exchg_id = exchg.id
    for pair in db_client.get_exchg_pairs(db, exchg_id):
        if not pair.used: continue
        t1 = get_ticker(db, exchg_id, pair.curr1_id).upper()
        t2 = get_ticker(db, exchg_id, pair.curr2_id).upper()

        print t1, t2,  # 'pair.ticker:', pair.ticker,
        if Test: continue

        try:
            #if True:
            '''
            https://api.livecoin.net/exchange/ticker?currencyPair=EMC/BTC
            {"last":0.00051000,"high":0.00056000,"low":0.00042690,"volume":21150.15056000,
                "vwap":0.00049384964581547641,"max_bid":0.00056000,"min_ask":0.00042690,"best_bid":0.00046000,"best_ask":0.00056960} '''
            cryp_url = 'https://' + exchg.url + '/' + exchg.API + '?currencyPair=' + t1 + '/' + t2
            print cryp_url
            res = fetch(cryp_url)
            ## res = {"best_bid":0.00046000,"best_ask":0.00056960}
            res = json.loads(res)
            if type(res) != dict:
                continue
            if not res.get('best_bid'): continue
            buy = float(res['best_ask'])
            sell = float(res['best_bid'])
            #return dict(buy= buy, sell= sell)
            print sell, buy
            db_common.store_rates(db, pair, sell, buy)
        except Exception as e:
            #else:
            msg = "serv_rates %s :: %s" % (exchg.url, e)
            print msg
            continue
    db.commit()
Example #11
0
def from_btc_e_3(db, exchg):
    exchg_id = exchg.id
    pairs = []
    for pair in db_client.get_exchg_pairs(db, exchg_id):
        if not pair.used: continue
        t1 = get_ticker(db, exchg_id, pair.curr1_id)
        t2 = get_ticker(db, exchg_id, pair.curr2_id)
        if t1 and t2:
            pairs.append(t1 + '_' + t2)
    pairs = '-'.join(pairs)
    url = 'http://btc-e.com/api/3/ticker/' + pairs
    print url
    resp = fetch(url)
    res = json.loads(resp)
    for k, v in res.iteritems():
        print k[:3], k[4:],  # v
        curr1 = get_curr(db, exchg_id, k[:3])
        curr2 = get_curr(db, exchg_id, k[4:])
        if not curr1 or not curr2:
            print 'not curr found for serv rate'
            continue
        pair = db((db.exchg_pairs.curr1_id == curr1.id)
                  & (db.exchg_pairs.curr2_id == curr2.id)).select().first()
        if not pair:
            print 'pair nor found in get_exchg_pairs'
            continue
        db_common.store_rates(db, pair, v['sell'], v['buy'])
        print 'updates:', v['sell'], v['buy']
    db.commit()
Example #12
0
def discover():

    from gluon.tools import fetch
    from gluon.tools import geocode
    address = '310 Mcmasters Street, Chapel Hill, NC, USA'
    (latitude, longitude) = geocode(address)
    url = 'http://openstates.org/api/v1//legislators/geo/?lat=' + str(latitude) + '&long=' + str(longitude) + '&apikey=c16a6c623ee54948bac2a010ea6fab70'
    url1 = 'http://congress.api.sunlightfoundation.com//legislators/locate?latitude=' + str(latitude) + '&longitude=' + str(longitude) + '&apikey=c16a6c623ee54948bac2a010ea6fab70'
    page = fetch(url)
    page1 = fetch(url)

    import gluon
    data_list = gluon.contrib.simplejson.loads(page)
    data_list1 = gluon.contrib.simplejson.loads(page)

    return dict(data_list=data_list,data_list1=data_list1,)
Example #13
0
    def request(self, path, args=None, post_args=None):
        """Fetches the given path in the Graph API.

        We translate args to a valid query string. If post_args is given,
        we send a POST request to the given path with the given arguments.
        """
        logging.info("in facebook request")
        if not args: args = {}
        if self.access_token:
            if post_args is not None:
                post_args["access_token"] = self.access_token
            else:
                args["access_token"] = self.access_token
        post_data = None if post_args is None else urllib.urlencode(post_args)
        logging.info("about to open url")
        #file = urllib.urlopen("https://graph.facebook.com/" + path + "?" +
        #                      urllib.urlencode(args), post_data)
        s=fetch("https://graph.facebook.com/" + path + "?" +
                              urllib.urlencode(args), post_args)
        logging.info("opened URL")
        try:
	    logging.info("parsing")
            response = _parse_json(s) #file.read())
        finally:
            logging.info("closing")
            #file.close()
        if response.get("error"):
            raise GraphAPIError(response["error"]["code"],
                                response["error"]["message"])
        logging.info("returning " + repr(response))
        return response
Example #14
0
 def get_user(self):
     request = self.request
     user = None
     if request.vars.connection_token:
         auth_url = "https://%s.api.oneall.com/connections/%s.json"  % \
                    (self.domain, request.vars.connection_token)
         auth_pw = "%s:%s" % (self.public_key,self.private_key)
         auth_pw = base64.b64encode(auth_pw)
         headers = dict(Authorization="Basic %s" % auth_pw)
         try:
             auth_info_json = fetch(auth_url,headers=headers)
             auth_info = json.loads(auth_info_json)
             data = auth_info['response']['result']['data']
             if data['plugin']['key'] == 'social_login':
                 if data['plugin']['data']['status'] == 'success':
                     userdata = data['user']
                     self.profile = userdata['identity']
                     source = self.profile['source']['key']
                     mapping = self.mappings.get(source,self.mappings['default'])
                     user = mapping(self.profile)
         except (JSONDecodeError, KeyError):
             pass
         if user is None and self.on_login_failure:
                 redirect(self.on_login_failure)
     return user
Example #15
0
def door():
	#if not requires_user():
	#	redirect(URL('home'))
	requires_user()
	response.menu = []
	form=FORM()
	led_selection=""
	if form.accepts(request.vars,formname='gate'):
		if request.vars.led1 is None:
			request.vars.led1="0"
		else:
			request.vars.led1="1"
		if request.vars.led2 is None:
			request.vars.led2="0"
		else:
			request.vars.led2="1"
		if request.vars.led3 is None:
			request.vars.led3="0"
		else:
			request.vars.led3="1"
		if request.vars.led4 is None:
			request.vars.led4="0"
		else:
			request.vars.led4="1"
		if request.vars.led5 is None:
			request.vars.led5="0"
		else:
			request.vars.led5="1"
		led_selection=request.vars.led1 + request.vars.led2 + request.vars.led3 + request.vars.led4 + request.vars.led5
		#response.flash = 'form accepted'
                shacconfigs = db(ShacConfig.myindex==1).select()
                if len(shacconfigs) == 0:
			ShacConfig.insert(myindex=1,shac_url='http://192.168.1.80')
			shac_url='http://192.168.1.80'
		else:
        		for config in shacconfigs:
				shac_url=config.shac_url
		import re
		from gluon.tools import fetch
		role_code = str(rolling_code())
		try:
			#mypage = fetch("http://127.0.0.1/gate?" + role_code + "&" + led_selection)
			#mypage = fetch("http://192.168.1.80/gate?" + role_code + "&" + led_selection)
			#mypage = fetch(shac_url + "/gate?" + role_code + "&" + led_selection)
			mypage = fetch(shac_url + "/door")
		except IOError:
			response.flash=T('Arduino SHAC or link to it down?! Code: ' + role_code + '&' + led_selection)
		else:
			p = re.compile('[a-z]+')
			if re.match( r'(.*)Welcome the gate(\.*)', mypage, ):
				response.flash=T('Gate access granted! Code: ' + role_code + '&' + led_selection)
			# else it failed
			else:
				response.flash=T('Gate access denied! Code: ' + role_code + '&' + led_selection)
	else:
		response.flash=T('Select LED sequence and click the gate to open')
	return dict(form=form, ledsequence=led_selection)
Example #16
0
def from_cryptsy(db, exchg):
    exchg_id = exchg.id
    ##print conn
    for pair in db_client.get_exchg_pairs(db, exchg_id):
        if not pair.used: continue
        t1 = get_ticker(db, exchg_id, pair.curr1_id)
        t2 = get_ticker(db, exchg_id, pair.curr2_id)
        '''
        v1 http://www.cryptsy.com/api.php?method=singlemarketdata&marketid=132
        v2 https://www.cryptsy.com/api/v2/markets/132
        pubapi2.cryptsy.com - (Amsterdam, Netherlands)
        DOGE - 132
        {"success":1,"return":{"markets":{"DOGE":{"marketid":"132","label":"DOGE\/BTC","lasttradeprice":"0.00000071","volume":"102058604.42108892","lasttradetime":"2015-07-07 09:30:31","primaryname":"Dogecoin","primarycode":"DOGE","secondaryname":"BitCoin","secondarycode":"BTC","recenttrades":[{"id":"98009366","time":"2015-07-07 
        '''
        print t1, t2, 'pair.ticker:', pair.ticker,
        if Test: continue

        try:
            #if True:
            #params = {'method': 'singlemarketdata', 'marketid': pair.ticker }
            cryp_url = 'https://' + exchg.url + '/api/v2/markets/' + pair.ticker + '/ticker'
            print cryp_url
            res = fetch(cryp_url)
            res = json.loads(res)
            if type(res) != dict:
                continue
            if not res.get('success'): continue
            if True:
                # v2
                # {"success":true,"data":{"id":"132","bid":4.7e-7,"ask":4.9e-7}}
                res = res['data']
                buy = res['ask']
                sell = res['bid']
            else:
                # v1
                rr = res['return']['markets'].get('DOGE')
                if not rr:
                    continue
                ll = rr['label']
                pair_ll = t1 + '/' + t2
                if ll.lower() != pair_ll.lower():
                    print 'll.lower() != pair_ll.lower()', ll.lower(
                    ), pair_ll.lower()
                    continue

                # тут обратные ордера поэтому наоборот
                buy = rr['sellorders'][0]['price']
                sell = rr['buyorders'][0]['price']
            #return dict(buy= buy, sell= sell)
            print sell, buy
            db_common.store_rates(db, pair, sell, buy)
        except Exception as e:
            #else:
            msg = "serv_rates %s :: %s" % (exchg.url, e)
            print msg
            continue
    db.commit()
Example #17
0
 def fetch(self, url, callback, **kwargs):
     # Replace kwarg keys.
     kwargs['data'] = kwargs.pop('body', None)
     try:
         html = fetch(url, **kwargs)
         #@TODO: what if there was an error from fetch?
         callback(Storage({'body':html, 'error':None}))
     except Exception, e:
         logging.error("Exception during fetch", exc_info=True)
         callback(Storage({'body':None, 'error':e}))
Example #18
0
 def _fetch(cls, page_size=5, **data):
     return JSONDecoder().decode(
         fetch(url,
               data=dict({
                   "page-number": 1,
                   "page-size": int(page_size)
               }, **{
                   k: v
                   for k, v in data.iteritems()
                   if v and not k.startswith("_")
               })))
Example #19
0
def lite_wager_go(wager, LITEcash):
    url = LITEcash.url + LITEcash.go_wager % (wager.lite_wager_id,
                                              wager.lite_wager_key)
    print url
    try:
        resp = fetch(url)
        print resp
        r = json.loads(resp)
        error = r.get('error')
    except:
        error = 'fetch(%s) error!' % url

    return error
Example #20
0
def iugu_teste_py():
    from gluon.tools import fetch
    import urllib, base64
    url = "https://api.iugu.com/v1/invoices?"
    token = appconfig.get("iugu.token")
    base64string = base64.encodestring('%s:%s' % (token, "")).replace('\n', '')
    data = {}
    data['creat_at_from'] = "2018-07-01"
    data['limit'] = 10
    data['start'] = 0
    url += urllib.urlencode(data)
    r = json.loads(fetch(url, headers={'Authorization': "Basic %s" % base64string}))
    return {'url': url, 'r': r}
Example #21
0
def taxonomy_version():
    view_dict = default_view_dict.copy()

    # load taxonomy-version history and basic stats
    ott = json.loads(fetch_local_ott_stats() or '[]')
    if len(ott) == 0:
        # report this error on the page
        view_dict['taxonomy_version'] = 'NO VERSIONS FOUND'
        view_dict['taxonomy_stats'] = ott
        return view_dict

    # Get OTT version from URL, or bounce to the latest version by default
    if len(request.args) == 0:
        # safer to sort by date-strings [yyyy-mm-dd] than version strings
        sorted_ott = sorted(ott, key=lambda v: v['date'], reverse=False)
        taxonomy_version = sorted_ott[-1].get('version')
        redirect(URL('opentree', 'about', 'taxonomy_version', 
            vars={}, 
            args=[taxonomy_version]))

    taxo_version = request.args[0]
    view_dict['taxonomy_version'] = taxo_version
    view_dict['taxonomy_stats'] = ott

    # fetch and render Markdown release notes as HTML
    from gluon.tools import fetch
    from gluon.contrib.markdown.markdown2 import markdown
    from urllib2 import HTTPError
    fetch_url = 'https://raw.githubusercontent.com/OpenTreeOfLife/reference-taxonomy/master/doc/{v}.md'.format(v=taxo_version)
    try:
        version_notes_response = fetch(fetch_url)
        # N.B. We assume here that any hyperlinks have the usual Markdown braces!
        version_notes_html = markdown(version_notes_response).encode('utf-8')
    except HTTPError:
        version_notes_html = None
    view_dict['taxonomy_version_notes'] = version_notes_html

    # List all synthesis releases that used this OTT version
    synth = json.loads(fetch_local_synthesis_stats() or '{}')
    related_releases = []
    for date in synth:
        synth_ott_version = synth[date]['OTT_version']
        if synth_ott_version:
            # If a draft was provided (eg, "ott2.9draft8"), truncate this
            # to specify the main version (in this case, "ott2.9")
            synth_ott_version = synth_ott_version.split('draft')[0]
        if synth_ott_version == taxo_version:
            related_releases.append(synth[date]['version'])
    view_dict['related_synth_releases'] = related_releases 

    return view_dict
Example #22
0
def import_file_onaccept(form):
    """
        When the import file is uploaded, do the import into the database
    """

    table = db.admin_import_file
    uploadfolder = table.file.uploadfolder
    filename = form.vars.file
    type = form.vars.type

    prefix, resourcename = type.split("_", 1)
    if type == "inv_inv_item":
        # Override the resourcename
        resourcename = "warehouse"
    # elif type == "hrm_person":
    #    # Override the prefix
    #    prefix = "pr"

    # This doesn't work as it doesn't pickup the resolvers from the controllers
    # resource = s3mgr.define_resource(prefix, resourcename)
    # template = os.path.join(request.folder, resource.XSLT_PATH, "s3csv",
    #                        "%s.xsl" % type)
    template = os.path.join(request.folder, "static", "formats", "s3csv", "%s.xsl" % type)
    filepath = os.path.join(uploadfolder, filename)
    # resource.import_xml(filepath, template=template)

    url = "%s/%s/%s.s3csv/create?filename=%s&transform=%s&ignore_errors=True" % (
        s3.base_url,
        prefix,
        resourcename,
        filepath,
        template,
    )

    import Cookie
    import urllib2
    from gluon.tools import fetch

    if db_string[0].find("sqlite") != -1:
        # Unlock database
        db.commit()

    # Keep Session
    cookie = Cookie.SimpleCookie()
    cookie[response.session_id_name] = response.session_id
    session._unlock(response)
    try:
        result = fetch(url, cookie=cookie)
        session.information = result.split("{", 1)[1].rstrip("}")
    except urllib2.URLError, exception:
        session.error = str(exception)
Example #23
0
def synthesis_release():
    view_dict = default_view_dict.copy()

    # Load each JSON document into a list or dict, so we can compile daily entries. 
    # NB: For simplicity and uniformity, filter these to use only simple dates
    # with no time component!
    # EXAMPLE u'2015-01-16T23Z' ==> u'2015-01-16'
    raw = json.loads(fetch_local_synthesis_stats() or '{}')
    # Pre-sort its raw date strings, so we can discard all the but latest info
    # for each date (e.g. we might toss the morning stats but keep the evening).
    sorted_dates = sorted(raw.keys(), reverse=False)
    synth = {}
    for d in sorted_dates:
        raw_data = raw[d]
        simple_date = _force_to_simple_date_string(d)
        synth[ simple_date ] = raw_data
        # this should overwrite data from earlier in the day

    if len(synth.keys()) == 0:
        # report this error on the page
        view_dict['release_version'] = 'NO RELEASES FOUND'
        view_dict['synthesis_stats'] = synth
        return view_dict

    # Get date or version from URL, or bounce to the latest release by default
    if len(request.args) == 0:
        release_date = sorted(synth.keys(), reverse=False)[-1]
        release_version = synth[release_date].get('version')
        redirect(URL('opentree', 'about', 'synthesis_release', 
            vars={}, 
            args=[release_version]))

    synth_release_version = request.args[0]
    view_dict['release_version'] = synth_release_version
    view_dict['synthesis_stats'] = synth

    # fetch and render Markdown release notes as HTML
    from gluon.tools import fetch
    from gluon.contrib.markdown.markdown2 import markdown
    from urllib2 import HTTPError
    fetch_url = 'https://raw.githubusercontent.com/OpenTreeOfLife/germinator/master/doc/ot-synthesis-{v}.md'.format(v=synth_release_version)
    try:
        version_notes_response = fetch(fetch_url)
        # N.B. We assume here that any hyperlinks have the usual Markdown braces!
        version_notes_html = markdown(version_notes_response).encode('utf-8')
    except HTTPError:
        version_notes_html = None
    view_dict['synthesis_release_notes'] = version_notes_html

    return view_dict
Example #24
0
def synthesis_release():
    view_dict = default_view_dict.copy()

    # Load each JSON document into a list or dict, so we can compile daily entries. 
    # NB: For simplicity and uniformity, filter these to use only simple dates
    # with no time component!
    # EXAMPLE u'2015-01-16T23Z' ==> u'2015-01-16'
    raw = json.loads(fetch_local_synthesis_stats() or '{}')
    # Pre-sort its raw date strings, so we can discard all the but latest info
    # for each date (e.g. we might toss the morning stats but keep the evening).
    sorted_dates = sorted(raw.keys(), reverse=False)
    synth = {}
    for d in sorted_dates:
        raw_data = raw[d]
        simple_date = _force_to_simple_date_string(d)
        synth[ simple_date ] = raw_data
        # this should overwrite data from earlier in the day

    if len(synth.keys()) == 0:
        # report this error on the page
        view_dict['release_version'] = 'NO RELEASES FOUND'
        view_dict['synthesis_stats'] = synth
        return view_dict

    # Get date or version from URL, or bounce to the latest release by default
    if len(request.args) == 0:
        release_date = sorted(synth.keys(), reverse=False)[-1]
        release_version = synth[release_date].get('version')
        redirect(URL('opentree', 'about', 'synthesis_release', 
            vars={}, 
            args=[release_version]))

    synth_release_version = request.args[0]
    view_dict['release_version'] = synth_release_version
    view_dict['synthesis_stats'] = synth

    # fetch and render Markdown release notes as HTML
    from gluon.tools import fetch
    from gluon.contrib.markdown.markdown2 import markdown
    from urllib2 import HTTPError
    fetch_url = 'https://raw.githubusercontent.com/OpenTreeOfLife/germinator/master/doc/ot-synthesis-{v}.md'.format(v=synth_release_version)
    try:
        version_notes_response = fetch(fetch_url)
        # N.B. We assume here that any hyperlinks have the usual Markdown braces!
        version_notes_html = markdown(version_notes_response).encode('utf-8')
    except HTTPError:
        version_notes_html = None
    view_dict['synthesis_release_notes'] = version_notes_html

    return view_dict
Example #25
0
    def get_invoices(self, page=0, filters=dict()):
        # type: (int, dict) -> list

        url = self.url + 'invoices?'
        payload = {'limit': (page + 1) * 1000, 'start': page * 1000}

        if 'customer_id' in filters:
            payload['customer_id'] = filters['customer_id']
            url += urllib.urlencode(payload)
            return [current.cache.ram(
                "invoices_ID_" + str(payload['customer_id']),
                lambda: json.loads(fetch(url, headers=self.headers)),
                144000  # 4h = 144000seg
            )]

        if 'created_at_from' in filters:
            if 'created_at_to' in filters:
                d = year_month_dict(from_date=filters['created_at_from'], to_date=filters['created_at_to'])
            else:
                d = year_month_dict(from_date=filters['created_at_from'])
        elif 'created_at_to' in filters:
            d = year_month_dict(to_date=filters['created_at_to'])
        else:
            url += urllib.urlencode(filters)
            return [json.loads(fetch(url, headers=self.headers))]

        invoices_monthly = []
        for key in d:
            payload['created_at_from'] = d[key]['first_day']
            payload['created_at_to'] = d[key]['last_day']
            url = self.url + 'invoices?' + urllib.urlencode(payload)
            invoices_monthly.append(current.cache.ram(
                "invoices_CA_" + key,
                lambda: json.loads(fetch(url, headers=self.headers)),
                144000  # 4h = 144000seg
            ))
        return invoices_monthly
Example #26
0
    def _new_nexson_with_crossref_metadata(doi, ref_string, include_cc0=False):
        if doi:
            # use the supplied DOI to fetch study metadata
            search_term = doi
        elif ref_string:
            # use the supplied reference text to fetch study metadata
            search_term = ref_string

        # look for matching studies via CrossRef.org API
        doi_lookup_response = fetch(
            'http://search.crossref.org/dois?%s' % 
            urlencode({'q': search_term})
        )
        doi_lookup_response = unicode(doi_lookup_response, 'utf-8')   # make sure it's Unicode!
        matching_records = anyjson.loads(doi_lookup_response)

        # if we got a match, grab the first (probably only) record
        if len(matching_records) > 0:
            match = matching_records[0];

            # Convert HTML reference string to plain text
            raw_publication_reference = match.get('fullCitation', '')
            ref_element_tree = web2pyHTMLParser(raw_publication_reference).tree
            # root of this tree is the complete mini-DOM
            ref_root = ref_element_tree.elements()[0]
            # reduce this root to plain text (strip any tags)

            meta_publication_reference = ref_root.flatten().decode('utf-8')
            meta_publication_url = match.get('doi', u'')  # already in URL form
            meta_year = match.get('year', u'')
            
        else:
            # Add a bogus reference string to signal the lack of results
            if doi:
                meta_publication_reference = u'No matching publication found for this DOI!'
            else:
                meta_publication_reference = u'No matching publication found for this reference string'
            meta_publication_url = u''
            meta_year = u''

        # add any found values to a fresh NexSON template
        nexson = get_empty_nexson(BY_ID_HONEY_BADGERFISH, include_cc0=include_cc0)
        nexml_el = nexson['nexml']
        nexml_el[u'^ot:studyPublicationReference'] = meta_publication_reference
        if meta_publication_url:
            nexml_el[u'^ot:studyPublication'] = {'@href': meta_publication_url}
        if meta_year:
            nexml_el[u'^ot:studyYear'] = meta_year
        return nexson
Example #27
0
def taxonomy_version():
    view_dict = default_view_dict.copy()

    # load taxonomy-version history and basic stats
    ott = json.loads(fetch_local_ott_stats() or '[]')
    if len(ott) == 0:
        # report this error on the page
        view_dict['taxonomy_version'] = 'NO VERSIONS FOUND'
        view_dict['taxonomy_stats'] = ott
        return view_dict

    # Get OTT version from URL, or bounce to the latest version by default
    if len(request.args) == 0:
        taxonomy_version = sorted([v.get('version') for v in ott], reverse=False)[-1]
        redirect(URL('opentree', 'about', 'taxonomy_version', 
            vars={}, 
            args=[taxonomy_version]))

    taxo_version = request.args[0]
    view_dict['taxonomy_version'] = taxo_version
    view_dict['taxonomy_stats'] = ott

    # fetch and render Markdown release notes as HTML
    from gluon.tools import fetch
    from gluon.contrib.markdown.markdown2 import markdown
    from urllib2 import HTTPError
    fetch_url = 'https://raw.githubusercontent.com/OpenTreeOfLife/reference-taxonomy/master/doc/{v}.md'.format(v=taxo_version)
    try:
        version_notes_response = fetch(fetch_url)
        # N.B. We assume here that any hyperlinks have the usual Markdown braces!
        version_notes_html = markdown(version_notes_response).encode('utf-8')
    except HTTPError:
        version_notes_html = None
    view_dict['taxonomy_version_notes'] = version_notes_html

    # List all synthesis releases that used this OTT version
    synth = json.loads(fetch_local_synthesis_stats() or '{}')
    related_releases = []
    for date in synth:
        synth_ott_version = synth[date]['OTT_version']
        if synth_ott_version:
            # If a draft was provided (eg, "ott2.9draft8"), truncate this
            # to specify the main version (in this case, "ott2.9")
            synth_ott_version = synth_ott_version.split('draft')[0]
        if synth_ott_version == taxo_version:
            related_releases.append(synth[date]['version'])
    view_dict['related_synth_releases'] = related_releases 

    return view_dict
Example #28
0
def lite_wager_end(wager, winned):
    from gluon.tools import fetch

    args = ''
    for w in winned:
        cond = db.wager_conds[w]
        if not cond: continue
        args += '%s/' % cond.bill_id
    url = LITEcash.url + LITEcash.end_wager % (wager.lite_wager_id,
                                               wager.lite_wager_key, args)
    #print url
    resp = fetch(url)
    import gluon.contrib.simplejson as sj
    res = sj.loads(resp)
    return res
Example #29
0
    def get_user(self):
        request = self.request
        if request.vars.token:
            user = Storage()
            data = urllib.urlencode(dict(apiKey = self.api_key, token=request.vars.token))
            auth_info_json = fetch(self.auth_url+'?'+data)
            auth_info = json.loads(auth_info_json)

            if auth_info['stat'] == 'ok':
                self.profile = auth_info['profile']
                provider = re.sub('[^\w\-]','',self.profile['providerName'])
                user = self.mappings.get(provider,self.mappings.default)(self.profile)
                return user
            elif self.on_login_failure:
                redirect(self.on_login_failure)
        return None
 def get_user(self):
     request = self.request
     user = None
     if request.vars.token:
         try:
             auth_url = self.auth_base_url + self.api_secret + "/" + request.vars.token
             json_data = fetch(auth_url, headers={'User-Agent': "LoginRadius - Python - SDK"})
             self.profile = json.loads(json_data)
             provider = self.profile['Provider']
             mapping = self.mappings.get(provider, self.mappings['default'])
             user = mapping(self.profile)
         except (JSONDecodeError, KeyError):
             pass
         if user is None and self.on_login_failure:
             redirect(self.on_login_failure)
     return user
 def get_user(self):
     request = self.request
     user = None
     if request.vars.token:
         try:
             auth_url = self.auth_base_url + self.api_secret + "/" + request.vars.token
             json_data = fetch(auth_url, headers={'User-Agent': "LoginRadius - Python - SDK"})
             self.profile = json.loads(json_data)
             provider = self.profile['Provider']
             mapping = self.mappings.get(provider, self.mappings['default'])
             user = mapping(self.profile)
         except (JSONDecodeError, KeyError):
             pass
         if user is None and self.on_login_failure:
             redirect(self.on_login_failure)
     return user
Example #32
0
    def get_user(self):
        request = self.request
        if request.vars.token:
            user = Storage()
            data = urllib.urlencode(dict(apiKey=self.api_key, token=request.vars.token))
            auth_info_json = fetch(self.auth_url + "?" + data)
            auth_info = json.loads(auth_info_json)

            if auth_info["stat"] == "ok":
                self.profile = auth_info["profile"]
                provider = re.sub("[^\w\-]", "", self.profile["providerName"])
                user = self.mappings.get(provider, self.mappings.default)(self.profile)
                return user
            elif self.on_login_failure:
                redirect(self.on_login_failure)
        return None
 def get_user(self):
     request = self.request
     if request.vars.token:
         user = Storage()
         data = urllib.urlencode(dict(apiKey = self.api_key, token=request.vars.token))
         auth_info_json = fetch(self.auth_url+'?'+data)
         auth_info = json.loads(auth_info_json)
         
         if auth_info['stat'] == 'ok':
             self.profile = auth_info['profile']
             provider = re.sub('[^\w\-]','',self.profile['providerName'])
             user = self.mappings.get(provider,self.mappings.default)(self.profile)
             return user
         elif self.on_login_failure:
             redirect(self.on_login_failure)
     return None
Example #34
0
def extract1(url,l1): #extracts the top_result urls from the first page
    ret=[]
    html=fetch(url) #fetches the html page of the given url ans stores it in a string html
    html_splitted=html.split("<A HREF=\"citation.cfm") #splitting the fetched string
    ht=html_splitted[0]
    del html_splitted[0]
    k=html_splitted[0].split('>')
    if k[1][:-3]==l1[0]:#if the user inputs title of a paper
        k=html_splitted[0].find('\"')
        s="http://dl.acm.org/citation.cfm"+html_splitted[0][:k]
        ret.append(s.split('&preflayout')[0]+'&preflayout=flat')
    else: #if user inputs some keywords
        for i in range(0,l1[2]):
            k=html_splitted[i].find('\"')
            s="http://dl.acm.org/citation.cfm"+html_splitted[i][:k]
            ret.append(s.split('&preflayout')[0]+'&preflayout=flat')
    return ret
Example #35
0
def in_btc():
    man_id = session.man_id
    if not man_id:
        return 'session error 1'
    man_bal_id = request.args(0)
    if not man_bal_id:
        return 'empty man_bal_id'
    man_bal = db.man_bals[man_bal_id]
    if not man_bal:
        return 'empty man_bal'
    if man_bal.man_id != man_id:
        return 'session error 2'

    man = db.men[man_bal.man_id]
    if not man:
        return 'error man'

    cash = db.cash[man_bal.cash_id]
    if not cash:
        return 'error cash'

    if cash.system_id != myconf.take('cash.bitcoin_id', cast=int):
        return T('В разработке')

    if not man_bal.dep_bill or len(man_bal.dep_bill) < 2:
        ## make a bill on LITE.cash

        url = 'http://lite.cash/api_bill/make.json/325?order=%s' % man.ref_key
        #print url
        #return url

        from gluon.tools import fetch
        resp = fetch(url)
        #print resp
        import gluon.contrib.simplejson as sj
        if not resp[:2].isdigit():
            # если тут не число - значит ошибка
            res = sj.loads(resp)  # {'bill': bill_id }
            err = res.get('error')
            if err:
                return dict(err=err)

        ## bill_id, _, skey = resp.partition('.')
        man_bal.update_record(dep_bill=resp)

    redirect('http://lite.cash/bill/show/' + man_bal.dep_bill)
Example #36
0
def process_exploits(filename=None):
    """
    Process Canvas Exploits.xml file into the database
    """

    localdb = current.globalenv['db']

    if filename is None:
        expurl = 'http://exploitlist.immunityinc.com/home/serve/live'
        from gluon.tools import fetch
        import sys
        try:
            print("Downloading CANVAS Exploits XML file... Please wait...")
            xmldata = fetch(expurl)
            print("Download complete. %s bytes received" % (sys.getsizeof(xmldata)))
        except Exception, e:
            raise Exception("Error downloading CPE XML file: %s" % (e))
Example #37
0
 def get_user(self):
     request = self.request
     if request.vars.assertion:
         audience = self.audience
         issuer = self.issuer
         assertion = XML(request.vars.assertion, sanitize=True)
         verify_data = {'assertion': assertion, 'audience': audience}
         auth_info_json = fetch(self.verify_url, data=verify_data)
         j = json.loads(auth_info_json)
         epoch_time = int(time.time() * 1000)  # we need 13 digit epoch time
         if j["status"] == "okay" and j["audience"] == audience and j['issuer'] == issuer and j['expires'] >= epoch_time:
             return dict(email=j['email'])
         elif self.on_login_failure:
             redirect('http://google.com')
         else:
             redirect('http://google.com')
     return None
Example #38
0
def download_subtree():
    id_type = request.args(0)  # 'ottol-id' or 'node-id'
    node_or_ottol_id = request.args(1)
    node_name = request.args(2)
    import cStringIO
    import contenttype as c
    s = cStringIO.StringIO()

    try:
        # fetch the Newick tree as JSON from remote site
        from gluon.tools import fetch
        import simplejson

        method_dict = get_opentree_services_method_urls(request)

        # use the appropriate web service for this ID type
        fetch_url = method_dict['getDraftSubtree_url']
        if id_type == 'ottol-id':
            fetch_args = {'ott_id': Number(node_or_ottol_id)}
        else:
            fetch_args = {'node_id': node_or_ottol_id}
        fetch_args['format'] = 'newick'
        fetch_args['height_limit'] = -1
        # TODO: allow for dynamic height, based on max tips?

        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "https:%s" % fetch_url

        # apparently this needs to be a POST, or it just describes the API
        tree_response = fetch(fetch_url, data=fetch_args)
        tree_json = simplejson.loads(tree_response)
        newick_text = str(tree_json.get('newick',
                                        'NEWICK_NOT_FOUND')).encode('utf-8')
        s.write(newick_text)

    except Exception, e:
        # throw 403 or 500 or just leave it
        if id_type == 'ottol-id':
            s.write(
                u'ERROR - Unable to fetch the Newick subtree for ottol id "%s" (%s):\n\n%s'
                % (node_or_ottol_id, node_name, newick_text))
        else:
            s.write(
                u'ERROR - Unable to fetch the Newick subtree for node id "%s" (%s):\n\n%s'
                % (node_or_ottol_id, node_name, newick_text))
Example #39
0
def _get_latest_synthesis_sha_for_study_id(study_id):
    # Fetch this SHA from treemachine. If not found in contributing studies, return None
    try:
        from gluon.tools import fetch
        import simplejson

        method_dict = get_opentree_services_method_urls(request)

        # fetch a list of all studies that contribute to synthesis
        fetch_url = method_dict['getSynthesisSourceList_url']
        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "http:%s" % fetch_url
        # as usual, this needs to be a POST (pass empty fetch_args)
        source_list_response = fetch(fetch_url, data='')
        source_list = simplejson.loads(source_list_response)

        # split these source descriptions, which are in the form '{STUDY_ID_PREFIX}_{STUDY_NUMERIC_ID}_{TREE_ID}_{COMMIT_SHA}'
        contributing_study_info = {
        }  # store (unique) study IDs as keys, commit SHAs as values

        for source_desc in source_list:
            if source_desc == 'taxonomy':
                continue
            source_parts = source_desc.split('_')
            # add default prefix 'pg' to study ID, if not found
            if source_parts[0].isdigit():
                # prepend with default namespace 'pg'
                study_id = 'pg_%s' % source_parts[0]
            else:
                study_id = '_'.join(source_parts[0:2])
            if len(source_parts) == 4:
                commit_SHA_in_synthesis = source_parts[3]
            else:
                commit_SHA_in_synthesis = None
            contributing_study_info[study_id] = commit_SHA_in_synthesis

        return contributing_study_info.get(study_id, '')

    except Exception, e:
        # throw 403 or 500 or just leave it
        raise HTTP(
            500,
            T('Unable to retrieve latest synthesis SHA for study {u}'.format(
                u=study_id)))
Example #40
0
def process_exploits(filename=None):
    """
    Process Canvas Exploits.xml file into the database
    """

    localdb = current.globalenv['db']

    if filename is None:
        expurl = 'http://exploitlist.immunityinc.com/home/serve/live'
        from gluon.tools import fetch
        import sys
        try:
            print("Downloading CANVAS Exploits XML file... Please wait...")
            xmldata = fetch(expurl)
            print("Download complete. %s bytes received" %
                  (sys.getsizeof(xmldata)))
        except Exception, e:
            raise Exception("Error downloading CPE XML file: %s" % (e))
Example #41
0
 def get_user(self):
     request = self.request
     if request.vars.token:
         user = Storage()
         data = urllib.urlencode(dict(token = request.vars.token))
         auth_info_json = fetch(self.auth_url+'?'+data)
         #print auth_info_json
         auth_info = json.loads(auth_info_json)
         if auth_info["identity"] != None:
             self.profile = auth_info
             provider = self.profile["provider"]
             user = self.mappings.get(provider, self.mappings.default)(self.profile)
             #user["password"] = ???
             #user["avatar"] = ???
             return user
         elif self.on_login_failure:
             redirect(self.on_login_failure)
     return None
Example #42
0
    def get_user(self):
        import string
        request = self.environment.request

        if request.vars.token:
            user = Storage()
            data = urllib.urlencode(
                dict(apiKey=self.api_key, token=request.vars.token))
            auth_info_json = fetch("?".join([self.auth_url, data]))
            auth_info = json.loads(auth_info_json)
            if auth_info['stat'] == 'ok':
                self.profile = auth_info['profile']
                provider = self.profile['providerName']
                provider = ''.join(c for c in provider
                                   if c in string.ascii_letters)
                for field in self.auth.settings.table_user.fields:
                    user[field] = self.get_mapping(provider, field)
                if self.on_mapped and user:
                    user = self.on_mapped(user, provider)
                if self.allow_local:
                    db = self.db
                    user_table = self.auth.settings.table_user
                    if 'username' in user_table.fields:
                        username = '******'
                    else:
                        username = '******'
                    existing = db(
                        user_table[username] == user[username]).select()
                    if len(existing):
                        dbuser = existing.first()
                        if dbuser[self.auth.settings.password_field] != None:
                            self.environment.session.flash = '%s already in use' % username.capitalize(
                            )
                            return None
                        if 'registration_key' in user_table.fields:
                            if dbuser['registration_key']:
                                self.environment.session.flash = '%s already in use' % username.capitalize(
                                )
                                return None
                return user
            else:
                return None
                #auth_info['err']['msg']
        return None
Example #43
0
def download_subtree():
    id_type = request.args(0)  # 'ottol-id' or 'node-id'
    node_or_ottol_id = request.args(1)
    max_depth = request.args(2)
    node_name = request.args(3)
    import cStringIO
    import contenttype as c
    s = cStringIO.StringIO()

    try:
        # fetch the Newick tree as JSON from remote site
        from gluon.tools import fetch
        import simplejson

        method_dict = get_opentree_services_method_urls(request)

        # use the appropriate web service for this ID type
        if id_type == 'ottol-id':
            fetch_url = method_dict['getDraftTreeForOttolID_url']
            fetch_args = {'ottId': node_or_ottol_id, 'maxDepth': max_depth}
        else:
            fetch_url = method_dict['getDraftTreeForNodeID_url']
            fetch_args = {'nodeID': node_or_ottol_id, 'maxDepth': max_depth}
        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "http:%s" % fetch_url

        # apparently this needs to be a POST, or it just describes the API
        tree_response = fetch(fetch_url, data=fetch_args)
        tree_json = simplejson.loads(tree_response)
        newick_text = str(tree_json['tree']).encode('utf-8')
        s.write(newick_text)

    except Exception, e:
        # throw 403 or 500 or just leave it
        if id_type == 'ottol-id':
            s.write(
                u'ERROR - Unable to fetch the Newick subtree for ottol id "%s" (%s) with max depth %s:\n\n%s'
                % (node_or_ottol_id, node_name, max_depth, newick_text))
        else:
            s.write(
                u'ERROR - Unable to fetch the Newick subtree for node id "%s" (%s) with max depth %s:\n\n%s'
                % (node_or_ottol_id, node_name, max_depth, newick_text))
Example #44
0
    def get_user(self):
        request = self.request
        # Janrain now sends the token via both a POST body and the query
        # string, so we should keep only one of these.
        token = request.post_vars.token or request.get_vars.token
        if token:
            user = Storage()
            data = urllib.urlencode(dict(apiKey=self.api_key, token=token))
            auth_info_json = fetch(self.auth_url + '?' + data)
            auth_info = json.loads(auth_info_json)

            if auth_info['stat'] == 'ok':
                self.profile = auth_info['profile']
                provider = re.sub('[^\w\-]', '', self.profile['providerName'])
                user = self.mappings.get(provider,
                                         self.mappings.default)(self.profile)
                return user
            elif self.on_login_failure:
                redirect(self.on_login_failure)
        return None
Example #45
0
def extract1(url, l1):  #extracts the top_result urls from the first page
    ret = []
    html = fetch(
        url
    )  #fetches the html page of the given url ans stores it in a string html
    html_splitted = html.split(
        "<A HREF=\"citation.cfm")  #splitting the fetched string
    ht = html_splitted[0]
    del html_splitted[0]
    k = html_splitted[0].split('>')
    if k[1][:-3] == l1[0]:  #if the user inputs title of a paper
        k = html_splitted[0].find('\"')
        s = "http://dl.acm.org/citation.cfm" + html_splitted[0][:k]
        ret.append(s.split('&preflayout')[0] + '&preflayout=flat')
    else:  #if user inputs some keywords
        for i in range(0, l1[2]):
            k = html_splitted[i].find('\"')
            s = "http://dl.acm.org/citation.cfm" + html_splitted[i][:k]
            ret.append(s.split('&preflayout')[0] + '&preflayout=flat')
    return ret
Example #46
0
def _get_latest_synthesis_sha_for_study_id( study_id ):
    # Fetch this SHA from treemachine. If not found in contributing studies, return None
    try:
        from gluon.tools import fetch
        import simplejson

        method_dict = get_opentree_services_method_urls(request)

        # fetch a list of all studies that contribute to synthesis
        fetch_url = method_dict['getSynthesisSourceList_url']
        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "http:%s" % fetch_url
        # as usual, this needs to be a POST (pass empty fetch_args)
        source_list_response = fetch(fetch_url, data='')
        source_list = simplejson.loads( source_list_response )

        # split these source descriptions, which are in the form '{STUDY_ID_PREFIX}_{STUDY_NUMERIC_ID}_{TREE_ID}_{COMMIT_SHA}'
        contributing_study_info = { }   # store (unique) study IDs as keys, commit SHAs as values

        for source_desc in source_list:
            if source_desc == 'taxonomy':
                continue
            source_parts = source_desc.split('_')
            # add default prefix 'pg' to study ID, if not found
            if source_parts[0].isdigit():
                # prepend with default namespace 'pg'
                study_id = 'pg_%s' % source_parts[0]
            else:
                study_id = '_'.join(source_parts[0:2])
            if len(source_parts) == 4:
                commit_SHA_in_synthesis = source_parts[3]
            else:
                commit_SHA_in_synthesis = None
            contributing_study_info[ study_id ] = commit_SHA_in_synthesis

        return contributing_study_info.get( study_id, '')

    except Exception, e:
        # throw 403 or 500 or just leave it
        raise HTTP(500, T('Unable to retrieve latest synthesis SHA for study {u}'.format(u=study_id)))
Example #47
0
def from_poloniex(db, exchg):
    exchg_id = exchg.id
    ##PRINT_AS_FUNC and print(conn) or print conn
    for pair in db_common.get_exchg_pairs(db, exchg_id):
        if not pair.used: continue
        t1 = get_ticker(db, exchg_id, pair.curr1_id)
        t2 = get_ticker(db, exchg_id, pair.curr2_id)
        '''
        https://poloniex.com/public?command=returnTradeHistory&currencyPair=BTC_DOGE
    [{"globalTradeID":13711923,"tradeID":469269,"date":"2016-01-17 11:37:16","type":"sell",
    "rate":"0.00000039","amount":"273.78000000","total":"0.00010677"},        '''
        print(t1, t2, 'pair.ticker:', pair.ticker)
        if Test: continue

        try:
            #if True:
            #params = {'method': 'singlemarketdata', 'marketid': pair.ticker }
            #cryp_url = 'https://' + exchg.url + '/public?command=returnTradeHistory&currencyPair=' + pair.ticker
            cryp_url = 'https://' + exchg.url + '/public?command=returnOrderBook&depth=1&currencyPair=' + pair.ticker
            ## res = {"asks":[["0.00001852",59.39524844]],"bids":[["0.00001851",710.99297675]],"isFrozen":"0"}
            print(cryp_url)
            res = fetch(cryp_url)
            res = json.loads(res)
            if type(res) != dict:
                continue
            if not res.get('isFrozen'): continue
            if True:
                # v1
                sell = 1 / float(res['asks'][0][0])
                buy = 1 / float(res['bids'][0][0])
            else:
                pass
            #return dict(buy= buy, sell= sell)
            print(sell, buy)
            db_common.store_rates(db, pair, sell, buy)
        except Exception as e:
            #else:
            msg = "serv_rates %s :: %s" % (exchg.url, e)
            print(msg)
            continue
    db.commit()
Example #48
0
def download_subtree():
    id_type = request.args(0)  # 'ottol-id' or 'node-id'
    node_or_ottol_id = request.args(1)
    max_depth = request.args(2)
    node_name = request.args(3)
    import cStringIO
    import contenttype as c

    s = cStringIO.StringIO()

    try:
        # fetch the Newick tree as JSON from remote site
        from gluon.tools import fetch
        import simplejson

        # use the appropriate web service for this ID type
        if id_type == "ottol-id":
            fetch_url = "http://opentree-dev.bio.ku.edu:7474/db/data/ext/GoLS/graphdb/getDraftTreeForOttolID"
            fetch_args = {"ottolID": node_or_ottol_id, "maxDepth": max_depth}
        else:
            fetch_url = "http://opentree-dev.bio.ku.edu:7474/db/data/ext/GoLS/graphdb/getDraftTreeForNodeID"
            fetch_args = {"nodeID": node_or_ottol_id, "maxDepth": max_depth}

        # apparently this needs to be a POST, or it just describes the API
        tree_response = fetch(fetch_url, data=fetch_args)
        tree_json = simplejson.loads(tree_response)
        newick_text = tree_json["tree"].encode("utf-8")
        s.write(newick_text)

    except Exception, e:
        # throw 403 or 500 or just leave it
        if id_type == "ottol-id":
            s.write(
                u'ERROR - Unable to fetch the Newick subtree for ottol id "%s" (%s) with max depth %s:\n\n%s'
                % (node_or_ottol_id, node_name, max_depth, newick_text)
            )
        else:
            s.write(
                u'ERROR - Unable to fetch the Newick subtree for node id "%s" (%s) with max depth %s:\n\n%s'
                % (node_or_ottol_id, node_name, max_depth, newick_text)
            )
Example #49
0
def download_subtree():
    id_type = request.args(0)  # 'ottol-id' or 'node-id'
    node_or_ottol_id = request.args(1)
    node_name = request.args(2)
    import cStringIO
    import contenttype as c
    s=cStringIO.StringIO()
     
    try:
        # fetch the Newick tree as JSON from remote site
        from gluon.tools import fetch
        import simplejson

        method_dict = get_opentree_services_method_urls(request)

        # use the appropriate web service for this ID type
        fetch_url = method_dict['getDraftSubtree_url']
        newick_text = 'NEWICK_NOT_FETCHED'
        if id_type == 'ottol-id':
            fetch_args = {'ott_id': int(node_or_ottol_id)}
        else:
            fetch_args = {'node_id': node_or_ottol_id}
        fetch_args['format'] = 'newick';
        fetch_args['height_limit'] = -1;  # TODO: allow for dynamic height, based on max tips?

        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "https:%s" % fetch_url

        # apparently this needs to be a POST, or it just describes the API
        tree_response = fetch(fetch_url, data=fetch_args)
        tree_json = simplejson.loads( tree_response )
        newick_text = str(tree_json.get('newick', 'NEWICK_NOT_FOUND')).encode('utf-8');
        s.write( newick_text )

    except Exception, e:
        # throw 403 or 500 or just leave it
        if id_type == 'ottol-id':
            s.write( u'ERROR - Unable to fetch the Newick subtree for ottol id "%s" (%s):\n\n%s' % (node_or_ottol_id, node_name, newick_text) )
        else:
            s.write( u'ERROR - Unable to fetch the Newick subtree for node id "%s" (%s):\n\n%s' % (node_or_ottol_id, node_name, newick_text) )
Example #50
0
def make_bill():

    cond_id = request.args(0)  #, _target='_blank'
    if not cond_id:
        jam()
        return T('Cond_id empty')

    wager_cond = db.wager_conds[cond_id]
    if not wager_cond:
        jam()
        return T('wager_Cond not found')
    bill_id = wager_cond.bill_id

    if not bill_id:
        from gluon.tools import fetch
        if not wager_cond.wager_id:
            mess = 'wager_is is None for wager_cond[%]' % wager_cond
            return dict(err=mess + ' please contact with support')

        wager = db.wagers[wager_cond.wager_id]
        if not wager.lite_wager_id:
            mess = 'lite_wager_id is None for wager[%s]' % wager.id
            return dict(err=mess + ' please contact with support')

        url = LITEcash.url + LITEcash.make % (
            wager.lite_wager_id, wager.lite_wager_key, wager_cond.id,
            wager.def_bet or 0.1, make_mess(wager, wager_cond))
        print url
        resp = fetch(url)
        print resp
        import gluon.contrib.simplejson as sj
        res = sj.loads(resp)  # {'bill': bill_id }
        err = res.get('error')
        if err:
            return dict(err=err)

        bill_id = res['bill']
        #print 'bill_id :', bill_id
        wager_cond.update_record(bill_id=bill_id)

    redirect(LITEcash.url + LITEcash.show % bill_id)
Example #51
0
    def get_user(self):
        request = self.request
        # Janrain now sends the token via both a POST body and the query
        # string, so we should keep only one of these.
        token = request.post_vars.token or request.get_vars.token
        if token:
            user = Storage()
            data = urllib.urlencode(
                dict(apiKey=self.api_key, token=token))
            auth_info_json = fetch(self.auth_url + '?' + data)
            auth_info = json.loads(auth_info_json)

            if auth_info['stat'] == 'ok':
                self.profile = auth_info['profile']
                provider = re.sub('[^\w\-]', '', self.profile['providerName'])
                user = self.mappings.get(
                    provider, self.mappings.default)(self.profile)
                return user
            elif self.on_login_failure:
                redirect(self.on_login_failure)
        return None
Example #52
0
   def get_user(self):
       import string
       request = self.environment.request
 
       if request.vars.token:
           user = Storage()
           data = urllib.urlencode(dict(apiKey=self.api_key,
                                        token=request.vars.token))
           auth_info_json = fetch("?".join([self.auth_url,data]))
           auth_info = json.loads(auth_info_json)
           if auth_info['stat'] == 'ok':
               self.profile = auth_info['profile']
               provider = self.profile['providerName']
               provider = ''.join(c for c in provider if c in string.ascii_letters)
               for field in self.auth.settings.table_user.fields:
                   user[field] = self.get_mapping(provider,field)
               if self.on_mapped and user:
                   user = self.on_mapped(user,provider)
               if self.allow_local:
                   db = self.db
                   user_table = self.auth.settings.table_user
                   if 'username' in user_table.fields:
                       username = '******'
                   else:
                       username = '******'
                   existing = db(user_table[username]==user[username]).select()
                   if len(existing):
                       dbuser = existing.first()
                       if dbuser[self.auth.settings.password_field] != None:
                           self.environment.session.flash = '%s already in use' % username.capitalize()
                           return None
                       if 'registration_key' in user_table.fields:
                           if dbuser['registration_key']:
                               self.environment.session.flash = '%s already in use' % username.capitalize()
                               return None
               return user
           else:
               return None
               #auth_info['err']['msg']                        
       return None
Example #53
0
def download_subtree():
    id_type = request.args(0)  # 'ottol-id' or 'node-id'
    node_or_ottol_id = request.args(1)
    max_depth = request.args(2)
    node_name = request.args(3)
    import cStringIO
    import contenttype as c
    s=cStringIO.StringIO()
     
    try:
        # fetch the Newick tree as JSON from remote site
        from gluon.tools import fetch
        import simplejson

        method_dict = get_opentree_services_method_urls(request)

        # use the appropriate web service for this ID type
        if id_type == 'ottol-id':
            fetch_url = method_dict['getDraftTreeForOttolID_url']
            fetch_args = {'ottId': node_or_ottol_id, 'maxDepth': max_depth}
        else:
            fetch_url = method_dict['getDraftTreeForNodeID_url']
            fetch_args = {'nodeID': node_or_ottol_id, 'maxDepth': max_depth}
        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "http:%s" % fetch_url

        # apparently this needs to be a POST, or it just describes the API
        tree_response = fetch(fetch_url, data=fetch_args)
        tree_json = simplejson.loads( tree_response )
        newick_text = str(tree_json['tree']).encode('utf-8');
        s.write( newick_text )

    except Exception, e:
        # throw 403 or 500 or just leave it
        if id_type == 'ottol-id':
            s.write( u'ERROR - Unable to fetch the Newick subtree for ottol id "%s" (%s) with max depth %s:\n\n%s' % (node_or_ottol_id, node_name, max_depth, newick_text) )
        else:
            s.write( u'ERROR - Unable to fetch the Newick subtree for node id "%s" (%s) with max depth %s:\n\n%s' % (node_or_ottol_id, node_name, max_depth, newick_text) )
Example #54
0
 def get_user(self):
     request = self.request
     user = None
     if request.vars.connection_token:
         auth_url = "https://%s.api.oneall.com/connections/%s.json" % (self.domain, request.vars.connection_token)
         auth_pw = "%s:%s" % (self.public_key, self.private_key)
         auth_pw = base64.b64encode(auth_pw)
         headers = dict(Authorization="Basic %s" % auth_pw)
         try:
             auth_info_json = fetch(auth_url, headers=headers)
             auth_info = json.loads(auth_info_json)
             data = auth_info["response"]["result"]["data"]
             if data["plugin"]["key"] == "social_login":
                 if data["plugin"]["data"]["status"] == "success":
                     userdata = data["user"]
                     self.profile = userdata["identity"]
                     source = self.profile["source"]["key"]
                     mapping = self.mappings.get(source, self.mappings["default"])
                     user = mapping(self.profile)
         except (JSONDecodeError, KeyError):
             pass
         if user is None and self.on_login_failure:
             redirect(self.on_login_failure)
     return user
 def talk_to_bing(self, query, sources, extra_args={}):
     logging.info('Query:%s'%query)
     logging.info('Sources:%s'%sources)
     logging.info('Other Args:%s'%extra_args)
     
     payload={}
     #payload['Appid'] = self.app_id
     payload['query'] = query
     payload['sources'] = sources
     payload.update(extra_args)
     query_string = urllib.urlencode(payload)
     final_url = self.end_point + query_string
     logging.info('final_url:%s'%final_url)
     json = fetch(final_url)
     data = simplejson.loads(json)
     if 'Errors' in data['SearchResponse']:
         logging.info('Error')
         logging.info('data:%s'%data)
         data = data['SearchResponse']
         errors_list = [el['Message'] for el in data['Errors']]
         error_text = ','.join(errors_list)
         raise BingException(error_text)
     logging.info('data:%s'%data)
     return data
Example #56
0
def fetch_current_synthetic_tree_ids():
    try:
        # fetch the latest IDs as JSON from remote site
        from gluon.tools import fetch
        import simplejson

        method_dict = get_opentree_services_method_urls(request)
        fetch_url = method_dict['getDraftTreeID_url']

        fetch_args = {'startingTaxonName': "cellular organisms"}

        # this needs to be a POST (pass fetch_args or ''); if GET, it just describes the API
        ids_response = fetch(fetch_url, data=fetch_args)

        ids_json = simplejson.loads( ids_response )
        draftTreeName = ids_json['draftTreeName'].encode('utf-8')
        lifeNodeID = ids_json['lifeNodeID'].encode('utf-8')
        # IF we get a separate starting node ID, use it; else we'll start at 'life'
        startingNodeID = ids_json.get('startingNodeID', lifeNodeID).encode('utf-8')
        return (draftTreeName, lifeNodeID, startingNodeID)

    except Exception, e:
        # throw 403 or 500 or just leave it
        return ('ERROR', e.message, 'NO_STARTING_NODE_ID')
Example #57
0
def _get_latest_synthesis_details_for_study_id( study_id ):
    # Fetch the last synthesis SHA *and* any tree IDs (from this study) from
    # treemachine. If the study is not found in contributing studies, return
    # None for both.
    try:
        from gluon.tools import fetch
        import simplejson

        method_dict = get_opentree_services_method_urls(request)

        # fetch a list of all studies that contribute to synthesis
        fetch_url = method_dict['getSynthesisSourceList_url']
        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "https:%s" % fetch_url
        # as usual, this needs to be a POST (pass empty fetch_args)
        source_list_response = fetch(fetch_url, data={'include_source_list':True})
        source_dict = simplejson.loads( source_list_response )['source_id_map']

        # fetch the full source list, then look for this study and its trees
        commit_SHA_in_synthesis = None
        current_study_trees_included = [ ]
        #print(source_dict)
        # ignore source descriptions (e.g. "ot_764@tree1"); just read the details
        for source_details in source_dict.values():
            if source_details.get('study_id', None) == study_id:
                # this is the study we're interested in!
                current_study_trees_included.append( source_details['tree_id'] )
                if commit_SHA_in_synthesis is None:
                    commit_SHA_in_synthesis = source_details['git_sha']
            # keep checking, as each tree will have its own entry
        return commit_SHA_in_synthesis, current_study_trees_included

    except Exception, e:
        # throw 403 or 500 or just leave it
        raise HTTP(500, T('Unable to retrieve latest synthesis details for study {u}'.format(u=study_id)))