Example #1
0
    def _make_request(self, path, args=None, post_args=None):
        """Fetches the given path in the Graph API.

        We translate args to a valid query string. If post_args is
        given, we send a POST request to the given path with the given
        arguments.

        """
        args = args or {}

        if self.access_token:
            if post_args is not None:
                post_args["access_token"] = self.access_token
            else:
                args["access_token"] = self.access_token
        post_data = None if post_args is None else urllib.urlencode(post_args)
        try:
            url = self._make_request_url(path, args)
            if post_data:
                file = urlfetch.fetch(url=url,
                                      method='POST',
                                      payload=post_data,
                                      deadline=self.timeout)
            else:
                file = urlfetch.fetch(url=url,
                                      deadline=self.timeout)
            contents = file.content

        except urllib2.HTTPError, e:
            response = _parse_json(e.read())
            raise GraphAPIError(response)
def track_call(api_action, api_details, x_tba_app_id):
    analytics_id = Sitevar.get_by_id("google_analytics.id")
    if analytics_id is None:
        logging.warning("Missing sitevar: google_analytics.id. Can't track API usage.")
    else:
        GOOGLE_ANALYTICS_ID = analytics_id.contents['GOOGLE_ANALYTICS_ID']
        params = urllib.urlencode({
            'v': 1,
            'tid': GOOGLE_ANALYTICS_ID,
            'cid': uuid.uuid3(uuid.NAMESPACE_X500, str(x_tba_app_id)),
            't': 'event',
            'ec': 'api',
            'ea': api_action,
            'el': api_details,
            'cd1': x_tba_app_id,  # custom dimension 1
            'ni': 1,
            'sc': 'end',  # forces tracking session to end
        })

        # Sets up the call
        analytics_url = 'http://www.google-analytics.com/collect?%s' % params
        urlfetch.fetch(
            url=analytics_url,
            method=urlfetch.GET,
            deadline=10,
        )
Example #3
0
 def fetch_topic():
     response = urlfetch.fetch('http://api.ihackernews.com/page', deadline=10)
     topics = json.loads(response.content)
     topic_id = str(topics['items'][0]['id'])
     
     logging.error(topic_id)
     response = urlfetch.fetch('http://api.ihackernews.com/post/' + topic_id, deadline=10)
     topic = json.loads(response.content)
     topic['graph'] = {}
     topic['all_comments'] = []
     
     def generate_graph(comments):
         """Generates an id mapping between comments and their children.
         This graph is used for javascript layout.
             {
                 "comment_id": ["child_id", "child_id2"],
                 "comment_id2": ...
             }
         """
         for comment in comments:
             topic['all_comments'].append(comment)
             parent = topic['graph'].setdefault(comment['parentId'], [])
             parent.append(comment['id'])
             generate_graph(comment['children'])
     
     generate_graph(topic['comments'])
     return topic
Example #4
0
    def request(self, path, args=None, post_args=None):
        """Fetches the given path in the Graph API.

        We translate args to a valid query string. If post_args is given,
        we send a POST request to the given path with the given arguments.
        """
        if not args: args = {}
        if self.access_token:
            if post_args is not None:
                post_args["access_token"] = self.access_token
            else:
                args["access_token"] = self.access_token
        post_data = None if post_args is None else urllib.urlencode(post_args)
        if not post_data:
            file = urlfetch.fetch("https://graph.facebook.com/" + path + "?" +
                                  urllib.urlencode(args), payload=post_data)
        else:
            file = urlfetch.fetch("https://graph.facebook.com/" + path + "?" +
                                  urllib.urlencode(args), payload=post_data, method="POST")
        try:
            response = _parse_json(file.content)
        finally:
            pass

        if response.get("error"):
            raise GraphAPIError(response["error"]["type"],
                                response["error"]["message"])
        return response
Example #5
0
    def netflix_request(self, path, callback, access_token=None,
                           post_args=None, **args):
        args['output'] = 'json'
        overridepost = False
        if(args.get('override') == 'POST'):
            args.pop('override')
            overridepost = True
            post_args = args
            args = {}
        # Add the OAuth resource request signature if we have credentials
        url = 'http://api.netflix.com' + path
        if access_token:
            #args['output'] = 'json'
            all_args = {}
            all_args.update(args)
            all_args.update(post_args or {})
            method = 'POST' if post_args is not None else 'GET'
            oauth = self._oauth_request_parameters(
                url, access_token, all_args, method=method)
            args.update(oauth)

        if args:
            url += '?' + urllib.urlencode(args)
        try:
            if post_args is not None:
                response = urlfetch.fetch(url, method='POST',
                    payload=urllib.urlencode(post_args), deadline=10)
            else:
                response = urlfetch.fetch(url, deadline=10)
        except urlfetch.DownloadError, e:
            logging.exception(e)
            response = None
Example #6
0
    def get(self, videoid):
        logging.info("Downloading video '%s'", videoid)

        resp = urlfetch.fetch('http://www.youtube.com/get_video_info?&video_id=' + videoid)
        if hasattr(urlparse, 'parse_qs'):
            parse_qs = urlparse.parse_qs
        else:
            parse_qs = cgi.parse_qs
        infomap = parse_qs(resp.content.decode('utf-8'))
        # token = infomap['token'][0]
        # url = "https://www.youtube.com/get_video?" + urllib.urlencode({'video_id': videoid, 't': token, 'fmt': '18', 'asv': 2})
        url_stream = parse_qs(infomap['url_encoded_fmt_stream_map'][0])
        url = url_stream['url'][0]
        # logging.info(url_stream['url'])
        logging.info("Downloading '%s'", url)
        resp = urlfetch.fetch(url)

        for key, value in resp.headers.iteritems():
            if key not in IGNORE_HEADERS:
                self.response.headers[key] = value
        content = resp.content
        if content:
            logging.debug("Len: %dB", len(content))
            self.response.write(content)
        self.response.set_status(resp.status_code)
    def track_notification(self, notification_type_enum, num_keys):
        """
        For more information about GAnalytics Protocol Parameters, visit
        https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters
        """
        analytics_id = Sitevar.get_by_id("google_analytics.id")
        if analytics_id is None:
            logging.warning("Missing sitevar: google_analytics.id. Can't track API usage.")
        else:
            GOOGLE_ANALYTICS_ID = analytics_id.contents['GOOGLE_ANALYTICS_ID']
            params = urllib.urlencode({
                'v': 1,
                'tid': GOOGLE_ANALYTICS_ID,
                'cid': uuid.uuid3(uuid.NAMESPACE_X500, str('tba-notification-tracking')),
                't': 'event',
                'ec': 'notification',
                'ea': NotificationType.type_names[notification_type_enum],
                'ev': num_keys,
                'ni': 1,
                'sc': 'end',  # forces tracking session to end
            })

            analytics_url = 'http://www.google-analytics.com/collect?%s' % params
            urlfetch.fetch(
                url=analytics_url,
                method=urlfetch.GET,
                deadline=10,
            )
Example #8
0
 def delete_move(self, user_id):
     aggregate = "Nothing"
     maximum = 0
     crowd_size = 0
     direction_list = {"Forward": 0, "Right": 0, "Left": 0, "Stop": 0}
     for crowdee in Crowdee.all().filter("room =", self.room.key().name()):
         if crowdee.direction and crowdee.direction != None and crowdee.direction != "None" and crowdee.direction != "Nothing":
             direction_list[crowdee.direction] += 1
             crowd_size += 1
     for d in direction_list.keys():
         if direction_list[d] > maximum:
             maximum = direction_list[d]
             aggregate = d
     self.room.direction = aggregate
     self.room.put()
     #If the room if the VR, post the aggregate to the VR server.
     if self.room.key().name() == "vr":
         if crowd_size != 0:
             speed = maximum / crowd_size
         else:
             speed = 0
         url = "http://ccvcl.org/~khoo/posttome.php"
         form_fields = {"direction": aggregate, "speed": speed, "crowd_size":crowd_size}
         form_data = urllib.urlencode(form_fields)
         urlfetch.fetch(url=url,
                 payload=form_data,
                 method=urlfetch.POST)
     roomUpdate = {
                        'user_id': user_id,
                        'delete': True
                    }
     self.send_update(json.dumps(roomUpdate)) #test 1 another commmit test
Example #9
0
 def get(self):
     self.response.headers['Content-Type'] = 'text/plain'
     code = self.request.get('code')
     if code:
         url = ('https://graph.facebook.com/oauth/access_token?'
                + 'client_id=' + APP_ID
                + '&redirect_uri=' + CANVAS_PAGE
                + '&client_secret=' + APP_SECRET
                + '&code=' + code)
         result = urlfetch.fetch(url)
         if result.status_code == 200:
             data = cgi.parse_qs(result.content)
             access_token = ''.join(data['access_token'])
             url = ('https://graph.facebook.com/me?'
                    + 'access_token=' + access_token)
             result = urlfetch.fetch(url)
             user = json.loads(result.content)
             self.response.out.write('Hello ' + user['first_name'] + ' '
                                     + user['last_name'])
         else:
             self.response.out.write("Authentication error")
     else:
         self.redirect('https://www.facebook.com/dialog/oauth?'
                       + 'client_id=' + APP_ID
                       + '&redirect_uri=' + CANVAS_PAGE)
def track_call(api_action, api_label, x_tba_app_id):
    """
    For more information about GAnalytics Protocol Parameters, visit
    https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters
    """
    analytics_id = Sitevar.get_by_id("google_analytics.id")
    if analytics_id is None:
        logging.warning("Missing sitevar: google_analytics.id. Can't track API usage.")
    else:
        GOOGLE_ANALYTICS_ID = analytics_id.contents['GOOGLE_ANALYTICS_ID']
        params = urllib.urlencode({
            'v': 1,
            'tid': GOOGLE_ANALYTICS_ID,
            'cid': uuid.uuid3(uuid.NAMESPACE_X500, str(x_tba_app_id)),
            't': 'event',
            'ec': 'api-v02',
            'ea': api_action,
            'el': api_label,
            'cd1': x_tba_app_id,  # custom dimension 1
            'ni': 1,
            'sc': 'end',  # forces tracking session to end
        })

        analytics_url = 'http://www.google-analytics.com/collect?%s' % params
        urlfetch.fetch(
            url=analytics_url,
            method=urlfetch.GET,
            deadline=10,
        )
Example #11
0
def get_jid(APPRAT, APPNUM, APSPAC, KOC, METHAF, WETTED, METHOD, AIRFLG, YLOCEN, GRNFLG, GRSIZE, ORCFLG, INCORP, 
            SOL, METHAP, HYDHAP, FOTHAP):
    WETTED=json.dumps(WETTED)
    METHOD=json.dumps(METHOD)
    AIRFLG=json.dumps(AIRFLG)
    GRNFLG=json.dumps(GRNFLG)
    GRSIZE=json.dumps(GRSIZE)
    ORCFLG=json.dumps(ORCFLG)
#    url = 'https://api.picloud.com/r/3303/geneec_fortran'
    url='https://api.picloud.com/r/3303/geneec_fortran_s1' 
    data = urllib.urlencode({"APPRAT":APPRAT, "APPNUM":APPNUM, "APSPAC":APSPAC, "KOC":KOC, "METHAF":METHAF, "WETTED":WETTED,
                             "METHOD":METHOD, "AIRFLG":AIRFLG, "YLOCEN":YLOCEN, "GRNFLG":GRNFLG, "GRSIZE":GRSIZE,
                             "ORCFLG":ORCFLG, "INCORP":INCORP, "SOL":SOL, "METHAP":METHAP, "HYDHAP":HYDHAP, "FOTHAP":FOTHAP})
    
    response = urlfetch.fetch(url=url, payload=data, method=urlfetch.POST, headers=http_headers)    
    jid= json.loads(response.content)['jid']
    output_st = ''
    
    while output_st!="done":
        response_st = urlfetch.fetch(url='https://api.picloud.com/job/?jids=%s&field=status' %jid, headers=http_headers)
        output_st = json.loads(response_st.content)['info']['%s' %jid]['status']

    url_val = 'https://api.picloud.com/job/result/?jid='+str(jid)
    response_val = urlfetch.fetch(url=url_val, method=urlfetch.GET, headers=http_headers)
    output_val = json.loads(response_val.content)['result']
    return(jid, output_st, output_val)
Example #12
0
File: misc.py Project: 2010gis/v2ex
 def get(self, platform, soldier):
     ua = 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.13) Gecko/20101203 Firefox/3.6.13'
     referer = 'http://bfbcs.com/' + platform
     cache_tag = 'bfbcs::' + platform + '/' + soldier
     raw = memcache.get(cache_tag)
     url = 'http://bfbcs.com/stats_' + platform + '/' + soldier
     if raw is None:
         response = urlfetch.fetch(url, headers={'User-Agent' : ua, 'Referer' : referer })
         raw = response.content
         memcache.set(cache_tag, raw, 600)
     pcode = re.findall('([a-z0-9]{32})', raw)
     self.response.out.write('<strong>PCODE</strong> ' + str(pcode[0]) + '<br />')
     if len(pcode) == 1:
         pcode = pcode[0]
         payload = 'request=addplayerqueue&pcode=' + pcode
         self.response.out.write('<strong>PAYLOAD</strong> ' + payload + ' (' + str(len(payload))+ ' bytes)<br />')
         headers = {'User-Agent' : ua, 'Referer' : url, 'X-Requested-With' : 'XMLHttpRequest', 'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8', 'Content-Length' : '61', 'Accept' : 'application/json, text/javascript, */*', 'Accept-Language' : 'en-us,en;q=0.5', 'Accept-Encoding' : 'gzip,deflate', 'Accept-Charset' : 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Keep-Alive' : 115, 'Host' : 'bfbcs.com', 'Pragma' : 'no-cache', 'Cache-Control' : 'no-cache', 'Cookie' : '__utma=7878317.1843709575.1297205447.1298572822.1298577848.12; __utmz=7878317.1297205447.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); sessid=enqd028n30d2tr4lv4ned04qi0; __utmb=7878317.21.10.1298577848; __utmc=7878317' }
         response = urlfetch.fetch(url, payload=payload, headers=headers, method='POST')
         if response.status_code == 500:
             response = urlfetch.fetch(url, payload=payload, headers=headers, method='POST')
             if response.status_code == 500:
                 self.response.out.write('<strong>FAILED</strong>')
             else:
                 self.response.out.write('<strong>RESULT</strong> OK ' + response.content)
         else:
             self.response.out.write('<strong>RESULT</strong> OK ' + response.content)
Example #13
0
File: main.py Project: zuoang/g2fts
    def get(self):
        try:
            mycfg=memcache.get("keyword")
            if not mycfg:
                return
            lastpost=""
            try:
                result = urlfetch.fetch(url="https://plus.google.com/"+self.request.get("id")+"/posts") 
            except Exception,e:
                logging.error("Error fetching last post of %s,Exception:%s" % (self.request.get("id"),str(e)))
                return
            cnt= result.content.split("%s\">" % (mycfg.encode("utf-8")))[:2]
            if len(cnt)>=2:
                cnt= cnt[1].split("</div>")[:1]
                if len(cnt)>=1:
                    lastpost=cnt[0].decode('utf-8')[:200] #只取前100个字符

            conf=self.request.get("id")
            #logging.info("lastpost of %s (%s) memcache:(%s)" % (conf,lastpost,memcache.get(conf)))
            
            cnt=lastpost
            oldcnt=memcache.get(conf)
            if (oldcnt==None ) or (cnt != memcache.get(conf)):
                logging.info("need sync %s (%s) and (%s) not match " % (conf,cnt,memcache.get(conf)))
                res=urlfetch.fetch(url="http://gplus2ft.appspot.com/sync?id="+conf)
                if int(res.status_code)==200:
                    memcache.set(conf,cnt)
                else:
                    logging.error("Server error of %s:%d,will check next time!" % (conf,int(res.status_code)))
Example #14
0
  def testFetchWithEtag(self):
    # Verify that we send "If-none-match", and simulate getting a 304.
    self.mox.StubOutWithMock(urlfetch, 'fetch')
    headers = {'If-none-match': ETAG}
    urlfetch.fetch(SOURCE_URL, headers=headers, deadline=30).AndReturn(
        utils.Struct(status_code=304, headers={}, content='Not modified'))

    self.mox.ReplayAll()
    # Pretend there is existing metadata for 1234 bytes of content.
    old_metadata = {
        'fetch_status': 200,
        'fetch_length': 1234,
        'fetch_etag': ETAG,
        'length': 1234,
        'md5_hash': 'foo'
    }
    # Updated metadata should be the same except fetch_status and fetch_length.
    self.assertEquals({
        'fetch_status': 304,
        'fetch_length': len('Not modified'),
        'fetch_etag': ETAG,
        'length': 1234,
        'md5_hash': 'foo'
    }, metadata_fetch.FetchAndUpdateMetadata(old_metadata, SOURCE_ADDRESS))

    self.mox.VerifyAll()
def FetchAndUpdateMetadata(metadata, address):
    """Fetches a layer and produces an updated metadata dictionary for the layer.

  Args:
    metadata: The current metadata dictionary associated with the URL, or None.
    address: The source address, a string in the form "<type>:<url>".

  Returns:
    The new metadata dictionary (without 'fetch_time'; the caller must set it).
  """
    if ":" not in address:
        return {"fetch_impossible": True}
    layer_type, url = address.split(":", 1)
    headers = {}
    if metadata and "fetch_etag" in metadata:
        headers["If-none-match"] = metadata["fetch_etag"]
    elif metadata and "fetch_last_modified" in metadata:
        headers["If-modified-since"] = metadata["fetch_last_modified"]
    try:
        if layer_type == maproot.LayerType.WMS:
            response = urlfetch.fetch(
                "%s?service=WMS&version=1.1.1&request=GetCapabilities" % url,
                headers=headers,
                deadline=MAX_FETCH_SECONDS,
            )
        else:
            response = urlfetch.fetch(url, headers=headers, deadline=MAX_FETCH_SECONDS)
    except urlfetch.Error, e:
        logging.warn("%r from urlfetch for source: %s", e, address)
        if isinstance(e, urlfetch.InvalidURLError):
            return {"fetch_impossible": True}
        if isinstance(e, urlfetch.ResponseTooLargeError):  # over 32 megabytes
            return {"fetch_error_occurred": True, "fetch_length": 32e6}
        return {"fetch_error_occurred": True}
Example #16
0
def announceActiveServers():

	zoning = zoningConfig(PROJECT_ID)

	logging.debug('Announcing our instances');

	# Make HTTP POST request to the announce urls with active ip:s for each zonegroup.
	payload = {}
	for key, zonegroup in zoning.iteritems():
		instance = memcache.get("active-server-" + key)
		if instance is not None and 'ip' in instance:
			payload[key] = instance['ip']
			payload[key + "_data"] = ""
			logging.debug('Active server for %s is %s.' % (zonegroup, instance['ip']))
			instanceLoad = memcache.get('load-' + instance['name'])
			if instanceLoad is not None and 'data' in instanceLoad:
				payload[key + "_data"] = instanceLoad['data']

	for url in config(PROJECT_ID).announceUrls:
		logging.debug(' - Announcing to: %s' % url);
		urlfetch.fetch(
			url = url,
			method = urlfetch.POST,
			headers = { 'Content-Type': 'application/x-www-form-urlencoded' },
			payload = urllib.urlencode(payload)
		)
Example #17
0
  def testFetchSecondTime(self):
    # Simulate a successful fetch of a document that was previously fetched.
    self.mox.StubOutWithMock(urlfetch, 'fetch')
    headers = {'If-none-match': ETAG}
    urlfetch.fetch(SOURCE_URL, headers=headers, deadline=30).AndReturn(
        utils.Struct(status_code=200, headers=RESPONSE_HEADERS_2,
                     content=SIMPLE_KML_2))

    self.mox.ReplayAll()
    self.assertEquals({
        'fetch_status': 200,
        'fetch_length': len(SIMPLE_KML_2),
        'fetch_last_modified': LAST_MODIFIED_STRING_2,
        'fetch_etag': ETAG_2,
        'update_time': LAST_MODIFIED_TIMESTAMP_2,
        'length': len(SIMPLE_KML_2),
        'md5_hash': hashlib.md5(SIMPLE_KML_2).hexdigest()
    }, metadata_fetch.FetchAndUpdateMetadata({
        'fetch_status': 200,
        'fetch_length': len(SIMPLE_KML),
        'fetch_last_modified': LAST_MODIFIED_STRING,
        'fetch_etag': ETAG,
        'update_time': LAST_MODIFIED_TIMESTAMP,
        'length': len(SIMPLE_KML),
        'md5_hash': hashlib.md5(SIMPLE_KML).hexdigest()
    }, SOURCE_ADDRESS))

    self.mox.VerifyAll()
def ping(request):
    server = "http://phloor.weavrs.com/ping/"
    if settings.APPENGINE_DEV:
        logging.info("pinging local dev environment")
        server = "http://localhost:8002/ping/"

    instance_name = os.environ.get("APPLICATION_ID", "localhost")

    try:
        from deploy_data import data
    except ImportError:
        data = dict(revision="", shipped="")

    logging.info("ship data is %s" % repr(data))

    payload = dict(
        name=instance_name,
        type="prosthetic-runner",
        url="http://%s" % settings.LOCAL_SERVER,
        deployed_at=data["shipped"],
        revision=data["revision"],
        version=data["version"],
    )
    try:
        urlfetch.fetch(server, deadline=1000, method="POST", payload=urlencode(payload))
    except Exception, e:
        logging.warn("can't ping phloor: %s" % e)
    def login(self):
            
      base_url = "https://www.lacrossealerts.com"
      form_fields = {
        "username": "******",
        "password": "******"
      }
      form_data = urllib.urlencode(form_fields)
      
      result = urlfetch.fetch(base_url + '/login',
                              payload=form_data,
                              method=urlfetch.POST,
                              headers={'Content-Type': 'application/x-www-form-urlencoded'},
                              follow_redirects=False)

      logged_in = False
      last_redirect = '';
      
      if result.headers['set-cookie']:
        for cookie_parts in result.header_msg.getheaders('set-cookie') :
            self.session_cookie = cookie_parts.split(';')[0]
      
      while result.status_code == 302 :
              logging.info ( 'redirect : ' + result.headers['location']  )
              last_redirect = result.headers['location']
              result = urlfetch.fetch(base_url + result.headers['location'],
                                      method=urlfetch.POST,
                                      headers={'Cookie' : self.session_cookie},
                                      follow_redirects=False)

      if last_redirect == '/devices' :
         logged_in = True
         logging.info( 'Logged In' ) 
Example #20
0
    def handle_oauth_callback(access_token, provider):
        '''
        this function takes teh access_token and the provider and return the dictionary of the user

        :param access_token:
        :param provider:
        :return: a triple: the user data, the access_token, and the error message (if any)
        '''

        if provider == 'facebook':
            url = "https://graph.facebook.com/me?{0}"
            target_url = url.format(urlencode({'access_token': access_token}))
            fetch = urlfetch.fetch(target_url)
            if not (200 <= fetch.status_code < 300):
                return None, None, json.loads(fetch.content)['error']['message']
            return json.loads(fetch.content), access_token, None
        elif provider == 'google':
            url = 'https://www.googleapis.com/oauth2/v3/userinfo?{0}'
            target_url = url.format(urlencode({'access_token': access_token}))
            fetch = urlfetch.fetch(target_url)
            if not (200 <= fetch.status_code < 300):
                return None, None, json.loads(fetch.content)['error_description']
            user_data = json.loads(fetch.content)

            if 'id' not in user_data and 'sub' in user_data:
                user_data['id'] = user_data['sub']
            return user_data, access_token, None
        else:
            return None, None, 'Invalid provider'
Example #21
0
def restore(username):
  conf = Config()
  if conf.is_testing:
    # Don't do this if we're testing.
    return

  resp = urlfetch.fetch("http://%s/restore/%s" % \
      (conf.DOMAIN_HOST, username),
      method="POST", deadline=10,
      payload=urllib.urlencode({"secret": keymaster.get("api")}),
      follow_redirects=False)

  if resp.status_code != 200:
    # The domain app will handle retrying for us, so we don't block the queue.
    logging.error("User restoration failed with status %d." % \
                  (resp.status_code))

  # Alert the events app that the user's status has changed.
  query = {"username": username, "status": "active"}
  response = urlfetch.fetch("http://%s/api/v1/status_change" % \
                            (conf.EVENTS_HOST), method="POST",
                            payload=urllib.urlencode(query),
                            follow_redirects=False)

  if response.status_code != 200:
    logging.warning("Notifying events app failed.")
def check(url, check_type=u'status', content=u'', title=u''):
    # make sure we're not getting cached content
    fetch_headers = {'Cache-Control':'no-cache,max-age=0', 'Pragma':'no-cache'}
    if check_type == u'status':
        try:
            response = urlfetch.fetch(url, headers=fetch_headers, deadline=60, validate_certificate=False)
            if response.status_code == 200:
                return u'online'
        except:
            pass
    elif check_type == u'title':
        try:
            parsedtitle = parsetitle(url)
            if title in parsedtitle:
                return u'online'
        except:
            pass
    elif check_type == u'content':
        try:
            response = urlfetch.fetch(url, headers=fetch_headers, deadline=60, validate_certificate=False).content
            if content in response:
                return u'online'
        except:
            pass
    return u'offline'
Example #23
0
def get_jid(wat_hl,wat_t,ben_hl,ben_t,unf_hl,unf_t,aqu_hl,aqu_t,hyd_hl,mw,vp,sol,koc,hea_h,hea_r_t,
           noa,dd_out,mm_out,ma_out,sr_out,weather,wea_l,nof,date_f1,nod_out,fl_out,wl_out,ml_out,to_out,
           zero_height_ref,days_zero_full,days_zero_removal,max_frac_cov,mas_tras_cof,leak,ref_d,ben_d,
           ben_por,dry_bkd,foc_wat,foc_ben,ss,wat_c_doc,chl,dfac,q10,area_app):

    url='https://api.picloud.com/r/3303/pfam_s1'
    input_list=[wat_hl,wat_t,ben_hl,ben_t,unf_hl,unf_t,aqu_hl,aqu_t,hyd_hl,mw,vp,sol,koc,hea_h,hea_r_t,
           noa,dd_out,mm_out,ma_out,sr_out,weather,wea_l,nof,date_f1,nod_out,fl_out,wl_out,ml_out,to_out,
           zero_height_ref,days_zero_full,days_zero_removal,max_frac_cov,mas_tras_cof,leak,ref_d,ben_d,
           ben_por,dry_bkd,foc_wat,foc_ben,ss,wat_c_doc,chl,dfac,q10,area_app]
    input_list=json.dumps(input_list)

    data = urllib.urlencode({"input_list":input_list})

    response = urlfetch.fetch(url=url, payload=data, method=urlfetch.POST, headers=http_headers) 
    jid= json.loads(response.content)['jid']
    output_st = ''
        
    while output_st!="done":
        response_st = urlfetch.fetch(url='https://api.picloud.com/job/?jids=%s&field=status' %jid, headers=http_headers)
        output_st = json.loads(response_st.content)['info']['%s' %jid]['status']

    url_val = 'https://api.picloud.com/job/result/?jid='+str(jid)
    response_val = urlfetch.fetch(url=url_val, method=urlfetch.GET, headers=http_headers)
    output_val = json.loads(response_val.content)['result']
    return(jid, output_st, output_val)
Example #24
0
def send(token, url, method='GET', params={}):
    result = urlfetch.fetch(url)

    result = urlfetch.fetch(url=url,
        payload=urllib.urlencode(params),
        method=urlfetch.POST,
        headers={'token': gen_token(token)})
 def test_createSession(self):
     test_url = '/conference/{wcksafe}/session'
     # Ensure default profile is created
     url = '/profile'
     res = urlfetch.fetch(self.urlbase + url, method='GET')
     self.assertEqual(res.status_code, 200)
     # Create conference and get websafe key
     conf = Conference(
         name='Test Conference',
         organizerUserId=json.loads(res.content)['mainEmail']
     )
     wck = conf.put()
     sleep(0.1)
     wcksafe = wck.urlsafe()
     test_url = test_url.format(wcksafe=wcksafe)
     # Ensure no sessions exist yet
     self.assertEqual(0, len(Session.query().fetch(5)))
     # Test endpoint
     params = {
         'name': 'TEST Session',
         'date': '2015-8-10',
         'startTime': '9:10',
         'conferenceKey': wcksafe
     }
     response = urlfetch.fetch(self.urlbase + test_url,
                     payload=json.dumps(params),
                     method=urlfetch.POST,
                     headers={'Content-Type': 'application/json'})
     self.assertEqual(response.status_code, 200)
Example #26
0
    def getBestPrice(self, status):
        logging.info("Processando: %s", status)
        tweet = "Desculpe, não consegui encontrar o produto"
        if status.find("@ondevende") == 0:
            status = status.replace("@ondevende", "").strip()
        else:
            return ""
        
        keyword = urllib.quote(status)
        buscapeUrl = "http://sandbox.buscape.com/service/findOfferList/%s/?keyword=%s&sort=price&format=json" % (keys.buscape_id, keyword)

        try:
            result = urlfetch.fetch(url=buscapeUrl)
            buscapeData = simplejson.loads(result.content.decode("utf-8"))
            
            price = buscapeData["offer"][0]["offer"]["price"]["value"].replace(".",",")
            store = buscapeData["offer"][0]["offer"]["seller"]["sellername"]
            link = buscapeData["offer"][0]["offer"]["links"][0]["link"]["url"]
           
            bitlyUrl = "http://api.bit.ly/v3/shorten?login=%s&apiKey=%s&longUrl=%s&format=txt" % (keys.bitly_login, keys.bitly_key, urllib.quote(link.strip()))
            shortenedLink = urlfetch.fetch(url=bitlyUrl).content
            
            tweet = "em %s por R$%s - %s" % (store, price, shortenedLink)
        except:
            logging.error("Erro ao processar a URL %s" % buscapeUrl)
        
        return tweet
Example #27
0
	def get( self, URL, ARGS = None ):
		
		if self.mode == 'googelappengine':
		
			urlfetch.fetch( URL, None, 'GET', False )
			
		elif self.mode == 'urllib2':
		
			req = urllib2.build_opener( urllib2.HTTPCookieProcessor( self.cookieJar ) )
			
			res = req.open( URL )
			
			headers = str(res.info()).split( "\r\n" )
			
			headerDict = {}
			
			#If i parse out the header names
			#namePattern = re.compile('^\w+?(.\w+):')
			headerPattern = re.compile(':\s')
			
			for header in headers:
				
				if len(header) > 0:
					s = headerPattern.split( header, 1 )
					
					headerDict[s[0].lower()] = s[1]

			#Create Response object
			#ret = FRHTTPResponse( res.read(), headerDict )
			#return ret
			return FRHTTPResponse( res.read(), headerDict )
Example #28
0
def http_request(url, user_agent=USER_AGENT, retry=4, opener=None):
    """Retrieve the contents referenced by the URL using urllib2.

    Retries up to four times (default) on exceptions.
    """
    #request = urllib2.Request(url, headers={'User-Agent':user_agent})
    data = urlencode( {'User-Agent':user_agent} )
    url_response = urlfetch.fetch( url, data, urlfetch.POST ).content

    if not opener:
        opener = urllib2.build_opener()

    # Remember last error
    e = None

    # Repeat request on time-out errors
    tries = retry;
    while tries:
        try:
            url_response = urlfetch.fetch( url, data, urlfetch.POST ).content
            #logging.info(url_response)
            return url_response
            #return opener.open(request)

        except urllib2.HTTPError, e:
            # reraise unexpected protocol errors as PyDeliciousException
            raise PyDeliciousException, "%s" % e

        except urllib2.URLError, e:
            # xxx: Ugly check for time-out errors
            #if len(e)>0 and 'timed out' in arg[0]:
            print >> sys.stderr, "%s, %s tries left." % (e, tries)
            Waiter()
            tries = tries - 1
def translate(phrase, in_lang):
    if in_lang == "en":
        out_lang = "ja"
    else:
        out_lang = "en"

    if True:
        url = (
            "http://api.microsofttranslator.com/V2/Ajax.svc/GetTranslations?appId=F2926FC35C3732CEC3E9C92913745F9C28912821&from="
            + in_lang
            + "&to="
            + out_lang
            + "&maxTranslations=1"
        )
        url += "&text=" + quote(phrase.encode("utf-8"))

        response = urlfetch.fetch(url=url)

        content = re.sub(u"\xEF\xBB\xBF", "", response.content)
        data = json.loads(content)
        translated_text = data["Translations"][0]["TranslatedText"]
        time.sleep(0.1)
    else:
        url = "https://www.googleapis.com/language/translate/v2?"
        url += "&source=" + in_lang
        url += "&target=" + out_lang
        url += "&q=" + quote(phrase.encode("utf-8"))
        url += "&key=" + "AIzaSyAI3PoUAJ_uP0o33EDgUfSEUMALepQAaNA"

        content = urlfetch.fetch(url=url).content
        data = json.loads(content)

        translated_text = data["data"]["translations"][0]["translatedText"]

    return translated_text
Example #30
0
def scrapeDirection(self) :
    for key in self.request.params.keys():
        if (key == 'route'):
            route = self.request.get(key)
        elif (key == 'direction'):
            direction = self.request.get(key)
        elif (key == 'stop'):
            stop = self.request.get(key)
        elif (key == 'stopnumber'):
            stopnumber = self.request.get(key)
    if stopnumber:
        result = urlfetch.fetch(stopsURL + "stopnumber=" + stopnumber)
    else:
        result = urlfetch.fetch(stopsURL + "route=" + route + "&direction=" + direction + "&stop=" + stop)
    departures = []
    if (result.status_code == 200):
        soup = BeautifulSoup(result.content)
        departDiv = soup.html.body.find(id='ctl00_mainContent_NexTripControl1_NexTripResults1_departures')
        if not departDiv:
            return json.dumps([])
        rows = departDiv.findAll(attrs={'class':re.compile(r'\bdata\b')})
        for row in rows:
            route = row.find('span','col1').find('a')
            routeName = row.find('span', 'col2').find('a')
            time = row.find(attrs={'class':re.compile(r'\bcol3\b')})
            actualTime = ('red' not in time['class'].split(' '))
            departures.append({
                'number': route.string,
                'name': routeName.string,
                'time': time.string,
                'actual': actualTime
            })
    return json.dumps(departures)
Example #31
0
    def post(self):
        # We will need the oauth token for posting a gist later so start the
        # query now.
        token_future = models.OAuthToken.query(
            models.OAuthToken.purpose == TOKEN_PURPOSE).get_async()

        owner = self.request.get('owner')
        repo = self.request.get('repo')
        number = self.request.get('number')
        list_files_response = urlfetch.fetch(
            LIST_FILES % {
                'owner': self.request.get('owner'),
                'repo': self.request.get('repo'),
                'number': self.request.get('number')
            })
        if list_files_response.status_code != 200:
            raise GithubApiError('Failed to list files in pull request')

        content = json.loads(list_files_response.content)
        files = [(x['filename'], x['raw_url']) for x in content]

        rpcs = []
        for f in files:
            rpc = urlfetch.create_rpc()
            urlfetch.make_fetch_call(rpc, f[1])
            rpcs.append((rpc, f[0]))

        for rpc in rpcs:
            rpc[0].wait()

        # All file content RPCs finished.
        # TODO: Start the formatting RPCs as soon as individual content RPCs return.
        file_contents = [(rpc[1], rpc[0].get_result().content) for rpc in rpcs]
        rpcs = []

        for content in file_contents:
            rpc = urlfetch.create_rpc()
            rpc.callback = self.CreateCallback(rpc, content)
            urlfetch.make_fetch_call(
                rpc,
                'http://clang.clementine-player.org/format',
                method=urlfetch.POST,
                payload=content[1])
            rpcs.append(rpc)

        for rpc in rpcs:
            rpc.wait()

        # Now we need the oauth token.
        token_result = token_future.get_result()
        if token_result is None:
            raise GithubApiError('No oauth token available')
        else:
            self.oauth_token = token_result.token

        # Post the diff as a gist
        gist_url = self.CreateGist(
            'Patch for %s/%s issue %s' % (owner, repo, number), self.output)
        self.PostComment(
            owner, repo, number,
            'Successfully ran clang format on pull request:\n%s' % gist_url)
Example #32
0
    def urlopen(self, method, url, body=None, headers=None,
                retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT,
                **response_kw):

        retries = self._get_retries(retries, redirect)

        try:
            response = urlfetch.fetch(
                url,
                payload=body,
                method=method,
                headers=headers or {},
                allow_truncated=False,
                follow_redirects=(
                    redirect and
                    retries.redirect != 0 and
                    retries.total),
                deadline=self._get_absolute_timeout(timeout),
                validate_certificate=self.validate_certificate,
            )
        except urlfetch.DeadlineExceededError as e:
            raise TimeoutError(self, e)

        except urlfetch.InvalidURLError as e:
            if 'too large' in str(e):
                raise AppEnginePlatformError(
                    "URLFetch request too large, URLFetch only "
                    "supports requests up to 10mb in size.", e)
            raise ProtocolError(e)

        except urlfetch.DownloadError as e:
            if 'Too many redirects' in str(e):
                raise MaxRetryError(self, url, reason=e)
            raise ProtocolError(e)

        except urlfetch.ResponseTooLargeError as e:
            raise AppEnginePlatformError(
                "URLFetch response too large, URLFetch only supports"
                "responses up to 32mb in size.", e)

        except urlfetch.SSLCertificateError as e:
            raise SSLError(e)

        except urlfetch.InvalidMethodError as e:
            raise AppEnginePlatformError(
                "URLFetch does not support method: %s" % method, e)

        http_response = self._urlfetch_response_to_http_response(
            response, **response_kw)

        # Check for redirect response
        if (http_response.get_redirect_location() and
                retries.raise_on_redirect and redirect):
            raise MaxRetryError(self, url, "too many redirects")

        # Check if we should retry the HTTP response.
        if retries.is_forced_retry(method, status_code=http_response.status):
            retries = retries.increment(
                method, url, response=http_response, _pool=self)
            log.info("Forced retry: %s", url)
            retries.sleep()
            return self.urlopen(
                method, url,
                body=body, headers=headers,
                retries=retries, redirect=redirect,
                timeout=timeout, **response_kw)

        return http_response
Example #33
0
def log_warning(msg, *args):
    do_log(msg, args, logging.info, "<p><b>" + msg + "</b></p>")


def log_error(msg, *args):
    do_log(msg, args, logging.error, "<h2>" + msg + "</h2>")


print "Content-Type: text/html\n"

headers = {'Cache-Control': 'max-age=300'}
if etag:
    print "<p>Using etag: " + etag + "</p>"
    headers['If-None-Match'] = etag
else:
    print "<p>Not using an etag</p>"

result = urlfetch.fetch(FAQ_URL, headers=headers)

if result.status_code == 304:
    print "<p>Page is unchanged (HTTP status 304)</p>"
elif result.status_code != 200:
    log_error("bad HTTP response %d when fetching url %s", result.status_code,
              FAQ_URL)
    sys.exit()

print "<p>Result headers:</p><pre>" + str(result.headers) + "</pre>"

print "<p>Result:</p><pre>" + result.content + "</pre>"
Example #34
0
def gae_post_ex(environ, start_response):
    headers, kwargs = decode_request(environ['HTTP_COOKIE'])

    method = kwargs['method']
    url = kwargs['url']

    #logging.info('%s "%s %s %s" - -', environ['REMOTE_ADDR'], method, url, 'HTTP/1.1')

    if __password__ and __password__ != kwargs.get('password', ''):
        start_response('403 Forbidden', [('Content-Type', 'text/html')])
        return [
            gae_error_html(errno='403',
                           error='Wrong password.',
                           description='GoAgent proxy.ini password is wrong!')
        ]

    fetchmethod = getattr(urlfetch, method, '')
    if not fetchmethod:
        start_response('501 Unsupported', [('Content-Type', 'text/html')])
        return [
            gae_error_html(errno='501',
                           error=('Invalid Method: ' + str(method)),
                           description='Unsupported Method')
        ]

    deadline = Deadline
    headers = dict(headers)
    headers['Connection'] = 'close'
    payload = environ['wsgi.input'].read(
    ) if 'Content-Length' in headers else None

    accept_encoding = headers.get('Accept-Encoding', '')

    errors = []
    for i in xrange(int(kwargs.get('fetchmax', FetchMax))):
        try:
            response = urlfetch.fetch(url,
                                      payload,
                                      fetchmethod,
                                      headers,
                                      allow_truncated=False,
                                      follow_redirects=False,
                                      deadline=deadline,
                                      validate_certificate=False)
            break
        except apiproxy_errors.OverQuotaError as e:
            time.sleep(4)
        except urlfetch.DeadlineExceededError as e:
            errors.append('DeadlineExceededError %s(deadline=%s)' %
                          (e, deadline))
            logging.error('DeadlineExceededError(deadline=%s, url=%r)',
                          deadline, url)
            time.sleep(1)
            deadline = Deadline * 2
        except urlfetch.DownloadError as e:
            errors.append('DownloadError %s(deadline=%s)' % (e, deadline))
            logging.error('DownloadError(deadline=%s, url=%r)', deadline, url)
            time.sleep(1)
            deadline = Deadline * 2
        except urlfetch.ResponseTooLargeError as e:
            response = e.response
            logging.error(
                'ResponseTooLargeError(deadline=%s, url=%r) response(%s)',
                deadline, url, response and response.headers)
            if response and response.headers.get('content-length'):
                response.status_code = 206
                response.headers['accept-ranges'] = 'bytes'
                response.headers['content-range'] = 'bytes 0-%d/%s' % (len(
                    response.content) - 1, response.headers['content-length'])
                response.headers['content-length'] = len(response.content)
                break
            else:
                m = re.search(
                    r'=\s*(\d+)-',
                    headers.get('Range') or headers.get('range') or '')
                if m is None:
                    headers['Range'] = 'bytes=0-%d' % FetchMaxSize
                else:
                    headers.pop('Range', '')
                    headers.pop('range', '')
                    start = int(m.group(1))
                    headers['Range'] = 'bytes=%s-%d' % (start,
                                                        start + FetchMaxSize)
            deadline = Deadline * 2
        except Exception as e:
            errors.append(str(e))
            if i == 0 and method == 'GET':
                deadline = Deadline * 2
    else:
        start_response('500 Internal Server Error',
                       [('Content-Type', 'text/html')])
        return [
            gae_error_html(errno='502',
                           error=('Python Urlfetch Error: ' + str(method)),
                           description=str(errors))
        ]

    #logging.debug('url=%r response.status_code=%r response.headers=%r response.content[:1024]=%r', url, response.status_code, dict(response.headers), response.content[:1024])

    data = response.content
    if 'content-encoding' not in response.headers and len(
            response.content) < DeflateMaxSize and response.headers.get(
                'content-type', '').startswith(
                    ('text/', 'application/json', 'application/javascript')):
        if 'deflate' in accept_encoding:
            response.headers['Content-Encoding'] = 'deflate'
            data = zlib.compress(data)[2:-4]
        elif 'gzip' in accept_encoding:
            response.headers['Content-Encoding'] = 'gzip'
            compressobj = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
                                           zlib.DEFLATED, -zlib.MAX_WBITS,
                                           zlib.DEF_MEM_LEVEL, 0)
            dataio = cStringIO.StringIO()
            dataio.write('\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff')
            dataio.write(compressobj.compress(data))
            dataio.write(compressobj.flush())
            dataio.write(
                struct.pack('<LL',
                            zlib.crc32(data) & 0xFFFFFFFFL,
                            len(data) & 0xFFFFFFFFL))
            data = dataio.getvalue()
    response.headers['Content-Length'] = str(len(data))
    start_response(
        '200 OK',
        [('Content-Type', 'image/gif'),
         ('Set-Cookie',
          encode_request(response.headers, status=str(response.status_code)))])
    return [data]
Example #35
0
def gae_post(environ, start_response):
    data = zlib.decompress(environ['wsgi.input'].read(
        int(environ['CONTENT_LENGTH'])))
    request = dict((k, binascii.a2b_hex(v))
                   for k, _, v in (x.partition('=') for x in data.split('&')))
    #logging.debug('post() get fetch request %s', request)

    method = request['method']
    url = request['url']
    payload = request['payload']

    if __password__ and __password__ != request.get('password', ''):
        return send_notify(start_response, method, url, 403, 'Wrong password.')

    fetchmethod = getattr(urlfetch, method, '')
    if not fetchmethod:
        return send_notify(start_response, method, url, 501, 'Invalid Method')

    deadline = Deadline

    headers = dict(
        (k.title(), v.lstrip())
        for k, _, v in (line.partition(':')
                        for line in request['headers'].splitlines()))
    headers['Connection'] = 'close'

    errors = []
    for i in xrange(FetchMax if 'fetchmax' not in
                    request else int(request['fetchmax'])):
        try:
            response = urlfetch.fetch(url, payload, fetchmethod, headers,
                                      False, False, deadline, False)
            break
        except apiproxy_errors.OverQuotaError as e:
            time.sleep(4)
        except urlfetch.DeadlineExceededError as e:
            errors.append('DeadlineExceededError %s(deadline=%s)' %
                          (e, deadline))
            logging.error('DeadlineExceededError(deadline=%s, url=%r)',
                          deadline, url)
            time.sleep(1)
        except urlfetch.DownloadError as e:
            errors.append('DownloadError %s(deadline=%s)' % (e, deadline))
            logging.error('DownloadError(deadline=%s, url=%r)', deadline, url)
            time.sleep(1)
        except urlfetch.InvalidURLError as e:
            return send_notify(start_response, method, url, 501,
                               'Invalid URL: %s' % e)
        except urlfetch.ResponseTooLargeError as e:
            response = e.response
            logging.error('DownloadError(deadline=%s, url=%r) response(%s)',
                          deadline, url, response and response.headers)
            if response and response.headers.get('content-length'):
                response.status_code = 206
                response.headers['accept-ranges'] = 'bytes'
                response.headers['content-range'] = 'bytes 0-%d/%s' % (len(
                    response.content) - 1, response.headers['content-length'])
                response.headers['content-length'] = len(response.content)
                break
            else:
                headers['Range'] = 'bytes=0-%d' % FetchMaxSize
        except Exception as e:
            errors.append('Exception %s(deadline=%s)' % (e, deadline))
    else:
        return send_notify(start_response, method, url, 500,
                           'Python Server: Urlfetch error: %s' % errors)

    headers = response.headers
    if 'set-cookie' in headers:
        scs = headers['set-cookie'].split(', ')
        cookies = []
        i = -1
        for sc in scs:
            if re.match(r'[^ =]+ ', sc):
                try:
                    cookies[i] = '%s, %s' % (cookies[i], sc)
                except IndexError:
                    pass
            else:
                cookies.append(sc)
                i += 1
        headers['set-cookie'] = '\r\nSet-Cookie: '.join(cookies)
    if 'content-length' not in headers:
        headers['content-length'] = str(len(response.content))
    headers['connection'] = 'close'
    return send_response(start_response, response.status_code, headers,
                         response.content)
Example #36
0
def application(environ, start_response):
    cookie = environ.get('HTTP_COOKIE', '')
    options = environ.get('HTTP_X_GOA_OPTIONS', '')
    if environ['REQUEST_METHOD'] == 'GET' and not cookie:
        if '204' in environ['QUERY_STRING']:
            start_response('204 No Content', [])
            yield ''
        else:
            timestamp = long(os.environ['CURRENT_VERSION_ID'].split('.')[1])/2**28
            ctime = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(timestamp+8*3600))
            html = u'GoAgent Python Server %s \u5df2\u7ecf\u5728\u5de5\u4f5c\u4e86\uff0c\u90e8\u7f72\u65f6\u95f4 %s\n' % (__version__, ctime)
            start_response('200 OK', [('Content-Type', 'text/plain; charset=utf-8')])
            yield html.encode('utf8')
        raise StopIteration

    inflate = lambda x: zlib.decompress(x, -zlib.MAX_WBITS)
    deflate = lambda x: zlib.compress(x)[2:-4]
    rc4crypt = lambda s, k: RC4Cipher(k).encrypt(s) if k else s

    wsgi_input = environ['wsgi.input']
    input_data = wsgi_input.read()

    try:
        if cookie:
            if 'rc4' not in options:
                metadata = inflate(base64.b64decode(cookie))
                payload = input_data or ''
            else:
                metadata = inflate(rc4crypt(base64.b64decode(cookie), __password__))
                payload = rc4crypt(input_data, __password__) if input_data else ''
        else:
            if 'rc4' in options:
                input_data = rc4crypt(input_data, __password__)
            metadata_length, = struct.unpack('!h', input_data[:2])
            metadata = inflate(input_data[2:2+metadata_length])
            payload = input_data[2+metadata_length:]
        headers = dict(x.split(':', 1) for x in metadata.splitlines() if x)
        method = headers.pop('G-Method')
        url = headers.pop('G-Url')
    except (zlib.error, KeyError, ValueError):
        import traceback
        start_response('500 Internal Server Error', [('Content-Type', 'text/html')])
        yield message_html('500 Internal Server Error', 'Bad Request (metadata) - Possible Wrong Password', '<pre>%s</pre>' % traceback.format_exc())
        raise StopIteration

    kwargs = {}
    any(kwargs.__setitem__(x[2:].lower(), headers.pop(x)) for x in headers.keys() if x.startswith('G-'))

    if 'Content-Encoding' in headers:
        if headers['Content-Encoding'] == 'deflate':
            payload = inflate(payload)
            headers['Content-Length'] = str(len(payload))
            del headers['Content-Encoding']

    logging.info('%s "%s %s %s" - -', environ['REMOTE_ADDR'], method, url, 'HTTP/1.1')
    #logging.info('request headers=%s', headers)

    if __password__ and __password__ != kwargs.get('password', ''):
        start_response('403 Forbidden', [('Content-Type', 'text/html')])
        yield message_html('403 Wrong password', 'Wrong password(%r)' % kwargs.get('password', ''), 'GoAgent proxy.ini password is wrong!')
        raise StopIteration

    netloc = urlparse.urlparse(url).netloc

    if __hostsdeny__ and netloc.endswith(__hostsdeny__):
        start_response('403 Forbidden', [('Content-Type', 'text/html')])
        yield message_html('403 Hosts Deny', 'Hosts Deny(%r)' % netloc, detail='url=%r' % url)
        raise StopIteration

    if len(url) > MAX_URL_LENGTH:
        start_response('400 Bad Request', [('Content-Type', 'text/html')])
        yield message_html('400 Bad Request', 'length of URL too long(greater than %r)' % MAX_URL_LENGTH, detail='url=%r' % url)
        raise StopIteration

    if netloc.startswith(('127.0.0.', '::1', 'localhost')):
        start_response('400 Bad Request', [('Content-Type', 'text/html')])
        html = ''.join('<a href="https://%s/">%s</a><br/>' % (x, x) for x in ('google.com', 'mail.google.com'))
        yield message_html('GoAgent %s is Running' % __version__, 'Now you can visit some websites', html)
        raise StopIteration

    fetchmethod = getattr(urlfetch, method, None)
    if not fetchmethod:
        start_response('405 Method Not Allowed', [('Content-Type', 'text/html')])
        yield message_html('405 Method Not Allowed', 'Method Not Allowed: %r' % method, detail='Method Not Allowed URL=%r' % url)
        raise StopIteration

    deadline = URLFETCH_TIMEOUT
    validate_certificate = bool(int(kwargs.get('validate', 0)))
    # https://www.freebsdchina.org/forum/viewtopic.php?t=54269
    accept_encoding = headers.get('Accept-Encoding', '') or headers.get('Bccept-Encoding', '')
    errors = []
    for i in xrange(int(kwargs.get('fetchmax', URLFETCH_MAX))):
        try:
            response = urlfetch.fetch(url, payload, fetchmethod, headers, allow_truncated=False, follow_redirects=False, deadline=deadline, validate_certificate=validate_certificate)
            break
        except apiproxy_errors.OverQuotaError as e:
            time.sleep(5)
        except urlfetch.DeadlineExceededError as e:
            errors.append('%r, deadline=%s' % (e, deadline))
            logging.error('DeadlineExceededError(deadline=%s, url=%r)', deadline, url)
            time.sleep(1)
            deadline = URLFETCH_TIMEOUT * 2
        except urlfetch.DownloadError as e:
            errors.append('%r, deadline=%s' % (e, deadline))
            logging.error('DownloadError(deadline=%s, url=%r)', deadline, url)
            time.sleep(1)
            deadline = URLFETCH_TIMEOUT * 2
        except urlfetch.ResponseTooLargeError as e:
            errors.append('%r, deadline=%s' % (e, deadline))
            response = e.response
            logging.error('ResponseTooLargeError(deadline=%s, url=%r) response(%r)', deadline, url, response)
            m = re.search(r'=\s*(\d+)-', headers.get('Range') or headers.get('range') or '')
            if m is None:
                headers['Range'] = 'bytes=0-%d' % int(kwargs.get('fetchmaxsize', URLFETCH_MAXSIZE))
            else:
                headers.pop('Range', '')
                headers.pop('range', '')
                start = int(m.group(1))
                headers['Range'] = 'bytes=%s-%d' % (start, start+int(kwargs.get('fetchmaxsize', URLFETCH_MAXSIZE)))
            deadline = URLFETCH_TIMEOUT * 2
        except urlfetch.SSLCertificateError as e:
            errors.append('%r, should validate=0 ?' % e)
            logging.error('%r, deadline=%s', e, deadline)
        except Exception as e:
            errors.append(str(e))
            if i == 0 and method == 'GET':
                deadline = URLFETCH_TIMEOUT * 2
    else:
        start_response('500 Internal Server Error', [('Content-Type', 'text/html')])
        error_string = '<br />\n'.join(errors)
        if not error_string:
            logurl = 'https://appengine.google.com/logs?&app_id=%s' % os.environ['APPLICATION_ID']
            error_string = 'Internal Server Error. <p/>try <a href="javascript:window.location.reload(true);">refresh</a> or goto <a href="%s" target="_blank">appengine.google.com</a> for details' % logurl
        yield message_html('502 Urlfetch Error', 'Python Urlfetch Error: %r' % method,  error_string)
        raise StopIteration

    #logging.debug('url=%r response.status_code=%r response.headers=%r response.content[:1024]=%r', url, response.status_code, dict(response.headers), response.content[:1024])

    data = response.content
    response_headers = response.headers
    content_type = response_headers.get('content-type', '')
    if 'content-encoding' not in response_headers and 512 < len(response.content) < URLFETCH_DEFLATE_MAXSIZE and content_type.startswith(('text/', 'application/json', 'application/javascript')):
        if 'gzip' in accept_encoding:
            response_headers['Content-Encoding'] = 'gzip'
            compressobj = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, 0)
            dataio = io.BytesIO()
            dataio.write('\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff')
            dataio.write(compressobj.compress(data))
            dataio.write(compressobj.flush())
            dataio.write(struct.pack('<LL', zlib.crc32(data) & 0xFFFFFFFFL, len(data) & 0xFFFFFFFFL))
            data = dataio.getvalue()
        elif 'deflate' in accept_encoding:
            response_headers['Content-Encoding'] = 'deflate'
            data = deflate(data)
    response_headers['Content-Length'] = str(len(data))
    response_headers_data = deflate('\n'.join('%s:%s' % (k.title(), v) for k, v in response_headers.items() if not k.startswith('x-google-')))
    if 'rc4' not in options or content_type.startswith(('audio/', 'image/', 'video/')):
        start_response('200 OK', [('Content-Type', __content_type__)])
        yield struct.pack('!hh', int(response.status_code), len(response_headers_data))+response_headers_data
        yield data
    else:
        start_response('200 OK', [('Content-Type', __content_type__), ('X-GOA-Options', 'rc4')])
        yield struct.pack('!hh', int(response.status_code), len(response_headers_data))
        yield rc4crypt(response_headers_data, __password__)
        yield rc4crypt(data, __password__)
Example #37
0
    def process_request(self):
        environ = self.environ
        if environ['REQUEST_METHOD'] == 'GET':
            redirect_url = 'https://%s/2' % environ['HTTP_HOST']
            self.start_response('302 Redirect', [('Location', redirect_url)])
            return [redirect_url]

        data = zlib.decompress(environ['wsgi.input'].read(int(environ['CONTENT_LENGTH'])))
        request = dict((k, v.decode('hex')) for k, _, v in (x.partition('=') for x in data.split('&')))

        method = request['method']
        url = request['url']
        payload = request['payload']

        if __password__ and __password__ != request.get('password', ''):
            return self.send_notify(method, url, 403, 'Wrong password.')

        if __hostsdeny__ and urlparse.urlparse(url).netloc.endswith(__hostsdeny__):
            return self.send_notify(method, url, 403, 'Hosts Deny: url=%r' % url)

        fetchmethod = getattr(urlfetch, method, '')
        if not fetchmethod:
            return self.send_notify(method, url, 501, 'Invalid Method')

        deadline = URLFETCH_TIMEOUT

        headers = dict((k.title(), v.lstrip()) for k, _, v in (line.partition(':') for line in request['headers'].splitlines()))
        headers['Connection'] = 'close'

        errors = []
        for _ in xrange(URLFETCH_MAX if 'fetchmax' not in request else int(request['fetchmax'])):
            try:
                response = urlfetch.fetch(url, payload, fetchmethod, headers, False, False, deadline, False)
                break
            except apiproxy_errors.OverQuotaError as e:
                time.sleep(4)
            except urlfetch.DeadlineExceededError as e:
                errors.append('DeadlineExceededError %s(deadline=%s)' % (e, deadline))
                logging.error('DeadlineExceededError(deadline=%s, url=%r)', deadline, url)
                time.sleep(1)
            except urlfetch.DownloadError as e:
                errors.append('DownloadError %s(deadline=%s)' % (e, deadline))
                logging.error('DownloadError(deadline=%s, url=%r)', deadline, url)
                time.sleep(1)
            except urlfetch.InvalidURLError as e:
                return self.send_notify(method, url, 501, 'Invalid URL: %s' % e)
            except urlfetch.ResponseTooLargeError as e:
                response = e.response
                logging.error('ResponseTooLargeError(deadline=%s, url=%r) response(%r)', deadline, url, response)
                m = re.search(r'=\s*(\d+)-', headers.get('Range') or headers.get('range') or '')
                if m is None:
                    headers['Range'] = 'bytes=0-%d' % URLFETCH_MAXSIZE
                else:
                    headers.pop('Range', '')
                    headers.pop('range', '')
                    start = int(m.group(1))
                    headers['Range'] = 'bytes=%s-%d' % (start, start+URLFETCH_MAXSIZE)
                deadline = URLFETCH_TIMEOUT * 2
            except Exception as e:
                errors.append('Exception %s(deadline=%s)' % (e, deadline))
        else:
            return self.send_notify(method, url, 500, 'Python Server: Urlfetch error: %s' % errors)

        headers = response.headers
        if 'content-length' not in headers:
            headers['content-length'] = str(len(response.content))
        headers['connection'] = 'close'
        return self.send_response(response.status_code, headers, response.content)
Example #38
0
    def post(self):
        if not settings.SECRET_FASTSPRING or enki.libutil.is_debug(
        ) or settings.ENKI_EMULATE_STORE:
            self.check_CSRF()

            product = xstr(self.request.get('product'))
            quantity = xint(self.request.get('quantity'))
            purchase_price = xstr(self.request.get('purchase_price'))
            purchaser_email = xstr(self.request.get('purchaser_email'))
            order_type = xstr(self.request.get('order_type'))
            licence_keys = 'not generated'
            user_id = ''
            order_id = webapp2_extras.security.generate_random_string(
                length=10, pool=webapp2_extras.security.DIGITS)

            url = webapp2.uri_for('generatelicencefastspring', _full=True)
            form_fields = {
                'secret': 'pretendsecret',
                'quantity': str(quantity)
            }
            form_data = enki.libutil.urlencode(form_fields)
            result = urlfetch.fetch(url=url,
                                    payload=form_data,
                                    method=urlfetch.POST)
            if result.status_code == 200:
                licence_keys = result.content.replace('-', '')

            referrer = xstr(self.request.get('referrer'))
            token = EnkiModelTokenVerify.get_by_token_type(
                referrer, 'purchasebyuser')
            if token:
                user_id = token.user_id
            licence_key_display = []
            for item in licence_keys.split():
                item_dash = EnkiModelProductKey.insert_dashes_5_10(item)
                licence_key_display.append(item_dash)
            self.add_infomessage(
                MSG.INFORMATION(),
                '<h3>FastSpring Store Emulator - Step 1</h3>' +
                '<h4>Emulated purchase details</h4>' + '<ul>' +
                '<li>&lt;EnkiModelProductKey&gt; #{FastSpring variable} = <em>&lt;emulated value&gt;</em></li>'
                + '<li>product_name #{orderItem.productName} = <em>' +
                product + '</em></li>' + '<li>order_id #{order.id} = <em>' +
                order_id + '</em></li>' +
                '<li>quantity #{orderItem.quantity} = <em>' + xstr(quantity) +
                '</em></li>' +
                '<li>purchase_price #{orderItem.totalUSD} = <em>' +
                purchase_price + '</em></li>' +
                '<li>purchaser_email #{order.customer.email} = <em>' +
                purchaser_email + '</em></li>' +
                '<li>licence_key #{orderItem.fulfillment.licence.licences.list} = <br><em><b>'
                + '<br>'.join(licence_key_display) + '</b></em></li>' +
                '<li>shop_name = <em>Emulator</em></li>' +
                '<li>order_type = <em>' + order_type + '</em></li>' + '</ul>'
                '<h4>Internal data - generated if the purchaser was logged in when they bought the product</h4>'
                + '<ul>' +
                '<li>EnkiModelTokenVerify.user_id = purchaser user_id = <em>' +
                (xstr(user_id) if user_id else 'None') + '</em></li>' +
                '<li>EnkiModelTokenVerify.type purchasebyuser referrer #{order.referrer} = <em>'
                + (xstr(referrer) if referrer else 'None') + '</em></li>' +
                '</ul>')

            url = webapp2.uri_for('ordercompletefastspring', _full=True)
            form_fields = {
                'licence_key': licence_keys,
                'purchase_price': purchase_price,
                'order_id': order_id,
                'order_type': order_type,
                'product_name': product,
                'purchaser_email': purchaser_email,
                'shop_name': 'Emulator',
                'quantity': quantity,
                'referrer': referrer,
                'is_test': 'true'
            }

            form_data = enki.libutil.urlencode(form_fields)
            result = urlfetch.fetch(url=url,
                                    payload=form_data,
                                    method=urlfetch.POST)
            if result.status_code == 200:
                message_view_library = ''
                if self.is_logged_in():
                    message_view_library = '<p><a href="/profile" class="alert-link">View and activate licence keys</a>.</p>'
                self.add_infomessage(
                    MSG.INFORMATION(),
                    '<h3>FastSpring Store Emulator - Step 2</h3><p>Purchase records created<p>'
                    + message_view_library)
            else:
                self.add_infomessage(
                    MSG.WARNING(),
                    '<h3>FastSpring Store Emulator - Step 2 FAILED: Purchase records not created</h3>'
                )

            self.redirect_to_relevant_page()
Example #39
0
	def post(self):
		# request basics
		ip = self.request.remote_addr

		# response, type, cross posting
		params = {}
		self.response.headers['Content-Type'] = "application/json"
		self.response.headers['Access-Control-Allow-Origin'] = '*'

		# check if this IP has any other bids open
		instancebid = InstanceBid.get_incomplete_by_ip(ip)


		# check we have an instancebid already
		if instancebid:
			# validate wisp
			if instancebid.wisp == None:
				instancebid.key.delete()
				return error_response(self, "Deleting bid because no wisp was associated.", 403, params)

			# load the payment address
			if instancebid.instance:
				instancebid.address = instancebid.instance.get().address
				instancebid.ask = instancebid.instance.get().ask
			else:
				# we should have an instance assosciated, so bail on this one
				instancebid.key.delete()
				return error_response(self, "Deleting bid because no instance was associated.", 403, params)

			params['response'] = "error"
			params['message'] = "The calling IP address already has an instance reservation in progress."
			params['instancebid'] = instancebid
			self.response.set_status(403)
			return self.render_template('api/bid.json', **params)	

		# load POSTed JSON
		try:
			request = json.loads(self.request.body)
		except Exception as ex:
			return error_response(self, "Failure in parsing request JSON.", 403, params)

		# load optional values or defaults
		# ipv4 (allow default)
		if 'requires_ipv4' in request:
			requires_ipv4 = request['requires_ipv4']
		else:
			requires_ipv4 = 0
		
		# ipv6 (allow default)
		if 'requires_ipv6' in request:
			requires_ipv6 = request['requires_ipv6']
		else:
			requires_ipv6 = 0

		# providers (allow default)
		if 'providers' in request:
			providers = request['providers']
		else:
			providers = [{u'id': 1, u'name': u'All Providers'}]		

		# flavors (required)
		if 'flavor' in request:
			flavor_name = request['flavor']
			flavor = Flavor.get_by_name(flavor_name)

			# check if flavor was found
			if not flavor:
				return error_response(self, "Flavor not found.", 403, params)

		else:
			return error_response(self, "Flavor name is required.", 403, params)

		# cloud (optional)
		if 'cloud_id' in request:
			cloud_id = request['cloud_id']
			cloud = Cloud.get_by_id(long(cloud_id))

			# check if cloud was found
			if not cloud:
				return error_response(self, "Cloud ID not found.", 403, params)
		else:
			cloud = None

		# disallow both a wisp and a callback_url
		if 'wisp_id' in request and 'callback_url' in request:
			return error_response(self, "A wisp and a callback URL may not be used together.", 403, params)

		# require either a wisp or a callback_url
		if 'wisp_id' not in request and 'callback_url' not in request:
			return error_response(self, "A valid wisp or a callback URL is required.", 403, params)

		# load the wisp, if there is one
		if 'wisp_id' in request:
			wisp_id = request['wisp_id']
			wisp = Wisp.get_by_id(long(wisp_id))
		else:
			wisp = None
		
		# load the callback URL, if there is one
		if 'callback_url' in request:
			callback_url = request['callback_url']
		elif wisp:
			callback_url = wisp.callback_url
		else:
			callback_url = ""

		# test we have a callback_url or a valid image in the wisp
		if callback_url > "":
			try:
				result = urlfetch.fetch(callback_url, deadline=5)
				if result.status_code > 399:
					return error_response(self, "The callback URL is unreachable.", 403, params)
				# test result's image URL
			except Exception as ex:
				return error_response(self, "The callback URL is unreachable.", 403, params)
		elif wisp:
			if wisp.image == None and wisp.dynamic_image_url == None and wisp.project == None:
				return error_response(self, "A valid wisp or a callback URL is required.", 403, params)

		# grab a new bid hash to use for the new bid
		token = generate_token(size=16)
		name = "smr-%s" % generate_token(size=8)

		# create a new bid
		instancebid = InstanceBid()
		instancebid.token = token
		instancebid.name = name
		instancebid.need_ipv4_address = bool(requires_ipv4)
		instancebid.need_ipv6_address = bool(requires_ipv6)
		instancebid.flavor = flavor.key
		instancebid.remote_ip = ip
		instancebid.appliances = providers # providers is already JSON
		instancebid.status = 0
		instancebid.callback_url = callback_url

		# expires in 5 minutes
		epoch_time = int(time.time())
		instancebid.expires = datetime.fromtimestamp(epoch_time+300)

		# add wisp, if present
		if wisp:
			instancebid.wisp = wisp.key
		
		# add cloud, if present
		if cloud:
			instancebid.cloud = cloud.key

		# update
		instancebid.put()

		# sleep for dev
		if config.debug:
			time.sleep(2)

		# reserve the instance
		InstanceBid.reserve_instance_by_token(instancebid.token)

		# get the address, if you got an instance
		if instancebid.instance:
			address = instancebid.instance.get().address
			ask = instancebid.instance.get().ask
		else:
			# no instance was reserved
			instancebid.key.delete()
			return error_response(self, "No valid instances were returned.", 403, params)
			
		# hack address and ask into instancebid object for template (not stored)
		instancebid.address = address
		instancebid.ask = ask

		# build out the response
		params['response'] = "success"
		params['message'] = "A new instance bid has been created."	
		params['instancebid'] = instancebid

		# return response and include cross site POST headers
		self.response.set_status(201)

		return self.render_template('api/bid.json', **params)
Example #40
0
    def open(self, url):
        self.realurl = url

        class resp:  #出现异常时response不是合法的对象,使用一个模拟的
            status_code = 555
            content = None
            headers = {}

        parts = []

        i = 0
        self.start = 0
        response = resp()
        HasTooLargeError = False
        RedirectCnt = 0

        #先判断是否支持断点续传,如果是小文件,可能已经正常下载了
        while i < self.maxFetchCount:
            try:
                response = urlfetch.fetch(url,
                                          payload=None,
                                          method=urlfetch.GET,
                                          headers=self._getHeaders(url, True),
                                          allow_truncated=False,
                                          follow_redirects=False,
                                          deadline=self.timeout,
                                          validate_certificate=False)
                urlnew = response.headers.get('Location')
                if urlnew:
                    url = urlnew if urlnew.startswith("http") else \
                        urlparse.urljoin(url, urlnew)
                    i = 0
                    RedirectCnt += 1
                    if RedirectCnt > 2:
                        break
                else:
                    disp = response.headers.get('Content-Disposition')
                    if disp:
                        s = re.search(r'(?i)filename\s*=\s*(.*)', disp)
                        if s:
                            self.filename = s.group(1).replace('\"', '')
                            if '/' in self.filename:
                                self.filename = self.filename.split('/')[-1]
                    break
            except urlfetch.ResponseTooLargeError as e:
                HasTooLargeError = True
                break
            #except Exception as e:
            #    i += 1

        self.realurl = url
        content_range = response.headers.get('Content-Range')
        if response.status_code not in (200, 206):
            return response
        elif not content_range:
            if HasTooLargeError:
                default_log.warn(
                    'server not support download file resuming at breakpoints.'
                )
                response.content = ''

            return response

        #获取文件总长度
        self.filelen = 0
        try:
            self.filelen = int(content_range.split('/')[-1].strip())
        except:
            pass

        if self.filelen == 0:
            default_log.warn(
                'server not support download file resuming at breakpoints.')
            response.content = ''
            return response
        elif self.filelen > 31457280:  # 30MB
            default_log.warn('file is too large.')
            response.status_code = 413
            return response

        #保存第一部分(1k)
        parts.append(response.content)
        self.start = len(response.content)

        #正式下载
        RedirectCnt = 0
        while i < self.maxFetchCount:
            try:
                response = urlfetch.fetch(url,
                                          payload=None,
                                          method=urlfetch.GET,
                                          headers=self._getHeaders(url),
                                          allow_truncated=False,
                                          follow_redirects=False,
                                          deadline=self.timeout,
                                          validate_certificate=False)
            except OverQuotaError as e:
                default_log.warn('overquota(url:%r)' % url)
                time.sleep(5)
                i += 1
            except urlfetch.DeadlineExceededError as e:
                default_log.warn('timeout(deadline:%s, url:%r)' %
                                 (deadline, url))
                time.sleep(1)
                i += 1
            except urlfetch.DownloadError as e:
                default_log.warn('DownloadError(url:%r)' % url)
                time.sleep(1)
                i += 1
            except urlfetch.ResponseTooLargeError as e:
                default_log.warn(
                    'server not support download file resuming at breakpoints.'
                )
                parts.clear()
                break
            except urlfetch.SSLCertificateError as e:
                #有部分网站不支持HTTPS访问,对于这些网站,尝试切换http
                if url.startswith(r'https://'):
                    url = url.replace(r'https://', r'http://')
                    i = 0
                    default_log.warn(
                        'server not support HTPPS, switch to http.')
                else:
                    break
            except Exception as e:
                break
            else:
                urlnew = response.headers.get('Location')
                if urlnew:
                    url = urlnew if urlnew.startswith("http") else \
                        urlparse.urljoin(url, urlnew)
                    i = 0
                    RedirectCnt += 1
                    if RedirectCnt > 2:
                        break
                elif len(response.content):
                    self.SaveCookies(
                        response.header_msg.getheaders('Set-Cookie'))
                    parts.append(response.content)
                    self.start += len(response.content)
                    if self.start >= self.filelen:
                        break
                    else:
                        i = 0  # 继续下载下一块
                else:
                    break

        self.realurl = url
        if parts:
            response.content = ''.join(parts)
        return response
Example #41
0
 def get(self):
   url = 'https://%s/changes/?q=project:%s+' % (GERRIT_HOST, GERRIT_PROJECT)
   url += self.request.query_string
   result = urlfetch.fetch(url)
   self.response.status_int = result.status_code
   self.response.write(result.content[4:])  # 4: -> Strip Gerrit XSSI chars.
        if capability == "all" or capability == "datastore_write":
            try:
                entry = StatusText(key_name="bazbookey")
                entry.content = "bazbooval"
                if entry.put():
                    health['datastore_write'] = RUNNING
                else:
                    health['datastore_write'] = FAILED
                    logging.error("Datastore write FAILED no exception given")
            except Exception, e:
                health['datastore_write'] = FAILED
                logging.error("Datastore write FAILED %s" % (str(e)))

        if capability == "all" or capability == "images":
            try:
                image = urlfetch.fetch(
                    "http://localhost/images/status_running.gif").content
                images.horizontal_flip(image)
                images.vertical_flip(image)
                images.rotate(image, 90)
                images.rotate(image, 270)
                health['images'] = RUNNING
            except Exception, e:
                health['images'] = FAILED
                logging.error("images API FAILED %s" % (str(e)))

        if capability == "all" or capability == "memcache":
            try:
                if memcache.set("boo", "baz", 10):
                    health['memcache'] = RUNNING
                else:
                    health['memcache'] = FAILED
Example #43
0
def post():
    request = gae_decode_data(
        zlib.decompress(
            sys.stdin.read(int(os.environ.get('CONTENT_LENGTH', -1)))))
    #logging.debug('post() get fetch request %s', request)

    method = request['method']
    url = request['url']
    payload = request['payload']

    if __password__ and __password__ != request.get('password', ''):
        return print_notify(method, url, 403, 'Wrong password.')

    fetchmethod = getattr(urlfetch, method, '')
    if not fetchmethod:
        return print_notify(method, url, 501, 'Invalid Method')

    if 'http' != url[:4]:
        return print_notify(method, url, 501, 'Unsupported Scheme')

    deadline = Deadline[1 if payload else 0]

    headers = dict(
        (k, v.lstrip())
        for k, _, v in (line.partition(':')
                        for line in request['headers'].splitlines()))
    headers['connection'] = 'close'

    fetchrange = 'bytes=0-%d' % (FetchMaxSize - 1)
    if 'range' in headers:
        try:
            start, end = re.search(r'(\d+)?-(\d+)?',
                                   headers['range']).group(1, 2)
            if start or end:
                if not start and int(end) > FetchMaxSize:
                    end = '1023'
                elif not end or int(end) - int(start) + 1 > FetchMaxSize:
                    end = str(FetchMaxSize - 1 + int(start))
                fetchrange = 'bytes=%s-%s' % (start, end)
        except:
            pass

    errors = []
    for i in xrange(int(request.get('fetchmax', FetchMax))):
        try:
            response = urlfetch.fetch(url,
                                      payload,
                                      fetchmethod,
                                      headers,
                                      follow_redirects=False,
                                      deadline=deadline,
                                      validate_certificate=False)
            #if method=='GET' and len(response.content)>0x1000000:
            #    raise urlfetch.ResponseTooLargeError(None)
            break
        except apiproxy_errors.OverQuotaError, e:
            time.sleep(4)
        except DeadlineExceededError, e:
            errors.append(str(e))
            logging.error('DeadlineExceededError(deadline=%s, url=%r)',
                          deadline, url)
            time.sleep(1)
            deadline = Deadline[1]
Example #44
0
	def post(self, instance_name):
		# paramters, assume failure, response type
		params = {}
		params['response'] = "error"
		self.response.headers['Content-Type'] = "application/json"

		# request basics
		ip = self.request.remote_addr

		try:
			body = json.loads(self.request.body)
			instance_schema = schemas['InstanceSchema'](**body['instance'])
			appliance_schema = schemas['ApplianceSchema'](**body['appliance'])

			# try to authenticate appliance
			if not Appliance.authenticate(appliance_schema.apitoken.as_dict()):
				logging.error("%s is using an invalid token(%s) or appliance deactivated."
					% (ip, appliance_schema.apitoken.as_dict()))
				return error_response(self, "Token is not valid.", 401, params)

			# fetch appliance and instance
			appliance = Appliance.get_by_token(appliance_schema.apitoken.as_dict())

			instance = Instance.get_by_name_appliance(
				instance_schema.name.as_dict(), 
				appliance.key
			)

			# if instance doesn't already exist, create it
			if not instance:
				wisp = Wisp.get_user_default(appliance.owner)
				if not wisp:
					wisp = Wisp.get_system_default()
				instance = Instance(wisp=wisp.key)

			# wrap instance into api shim in order to translate values from structure
			# of api to structure of model. I hope at some point in the future the two
			# models are similar enough so we can entirely drop this shim
			instance_shim = InstanceApiShim(instance)

			# update instance with values from post
			ApiSchemaHelper.fill_object_from_schema(
				instance_schema, instance_shim)

			# associate instance with it's appliance
			instance_shim.appliance = appliance

		except Exception as e:
			return error_response(self, 'Error in creating or updating instance from '
				'post data, with message {0}'.format(str(e)), 500, {})


		# update local instance
		instance.put()

		# update appliance ip address hint
		if instance.state > 3 and instance.ipv4_address:
			appliance.ipv4enabled = True
		if instance.state > 3 and instance.ipv6_address:
			appliance.ipv6enabled = True
		appliance.put()

		# sleep for dev
		if config.debug:
			time.sleep(1)

		# send update information to channel
		if instance.token:

			output = {
				"name": instance.name,
				"token": instance.token,
				"state": instance.state,
			}
			channel.send_message(instance.token, json.dumps(output))		

		# pop a reload just in case user is on their cloud page
		if instance.owner:
			user_info = User.get_by_id(long(instance.owner.id()))
			channel.send_message(user_info.key.urlsafe(), "reload")

		# convert bid to instance
		# check if there is an instance bid reservation on this instance
		instancebid = InstanceBid.get_by_instance(instance.key)
		if instancebid:
			# check for a bid callback_url (entered in the callback field on the launcher)
			if instancebid.callback_url > "":
				# put the callback into the instance
				instance.callback_url = instancebid.callback_url
			
			elif instancebid.wisp:
				# otherwise, get the wisp's callback URL	
				callback_url = instancebid.wisp.get().callback_url
			
				# if the wisp has an empty callback URL, populate the instance with the wisp's bid details
				if callback_url == "" or callback_url == None:
					instance.wisp = instancebid.wisp
					instance.owner = instancebid.wisp.get().owner
					instance.cloud = instancebid.cloud
				else:
					# we have a custom callback in the wisp itself, so move to instance
					instance.callback_url = callback_url

			# update the instance
			instance.put()

			# delete the instance reservation
			instancebid.key.delete()

		# proxy custom callback
	
		"""
		BEGIN CODE CALLOUT
		"""
		# August 24, 2014
		#
		# The following code retrieves content from a remote server and proxies it to 
		# an appliance for starting an instance. It is at this point an anonymous user's
		# callback pattern is most venerable to being monitored and manipulated.
		# 
		# The author of this software issues a warrant canary tweet to @stackape which 
		# should appear on the 1st of each month indicating the absence of a gag order
		# having been issued to StackMonkey, Utter.io, or Kord Campbell.
		#
		# Please monitor the code block between BEGIN CODE CALLOUT and END CODE CALLOUT 
		# if the canary tweet does not appear on schedule. Please monitor any callback
		# file's retrieval patterns if the canary tweet does not appear on schedule.
		# Please report any findings to the community sections of the project/site.
		#
		# Remember, the code is Open Source. Use that fact to the community's advantage.
		# 
		# The warrant canary will likely be your only indication a change has possibly
		# taken place to the system.  These changes may or may not enable a government 
		# agency to intercept and monitor instances using a custom URL callback.
		#
		# Kord Campbell
		#
		if instance.callback_url:
			try:
				result = urlfetch.fetch(instance.callback_url, deadline=5)
			except Exception as ex:
				logging.error("Error fetching callback URL content.")
				instance.console_output = "Error fetching callback url=(%s)'s' content. %s" % (instance.callback_url, ex)
				instance.put()

				# user may be sitting on an instance reservation here, so reload the page
				# this will force the handler to redirect the user to the instance page
				channel.send_message(instance.token, "reload")
				return error_response(self, "Error fetching callback URL content.", 401, params)

			# return content retrieved from callback URL if the JSON returned by this method includes
			# a callback_url in the data, the appliance will follow the URL and will not call this API 
			# again during the life of the instance.
			self.response.headers['Content-Type'] = 'application/json'
			self.response.write(json.dumps(json.loads(result.content), sort_keys=True, indent=2))
			
			# return from here	
			return

		"""
		END CODE CALLOUT
		"""

		# at this point we have one of two scenarios:
		# 1. an external instance start (registered user with appliance, sans instancebid)
		# 2. registered user using a normal wisp WITHOUT a callback_url

		# grab the instance's wisp
		if instance.wisp:
			# if instance is using a wisp
			wisp = Wisp.get_by_id(instance.wisp.id())
		else:
			# no wisp on instance
			wisp = Wisp.get_user_default(instance.owner)

		# deliver default system wisp if none (external instance start)
		if not wisp:
			wisp = Wisp.get_system_default()

		# load wisp image
		if not wisp.use_dynamic_image:
			image = wisp.image.get()
		else:
			image = wisp.get_dynamic_image()

		# pop the ssh_key into an array
		if wisp.ssh_key:
			ssh_keys = []
			for line in iter(wisp.ssh_key.splitlines()):
				ssh_keys.append(line)
		else:
			ssh_keys = [""]

		# 
		# pop the post creation script into an array
		if wisp.post_creation:
			post_creation = []
			for line in iter(wisp.post_creation.splitlines()):
				post_creation.append(line)
		else:
			post_creation = [""]

		# some of replay's magic - need docs on this
		start_params = schemas['InstanceStartParametersSchema']()
		data = {
			'image': image,
			'callback_url': wisp.callback_url if wisp.callback_url else "",
			'ssh_keys': ssh_keys,
			'post_create': post_creation}
		ApiSchemaHelper.fill_schema_from_object(start_params, data)

		self.response.set_status(200)
		self.response.headers['Content-Type'] = 'application/json'

		# write dictionary as json string
		self.response.out.write(json.dumps(
				# retrieve dict from schema
				start_params.as_dict()))
Example #45
0
    def delete(self):
        if lindenip.inrange(os.environ['REMOTE_ADDR']
                            ) != 'Production':  #only allow access from sl
            self.error(403)
        elif self.request.headers['X-SecondLife-Shard'] != 'Production':
            logging.warning("Attempt while on beta grid %s" %
                            (self.request.headers['X-SecondLife-Shard']))
            self.response.set_status(305)
        else:
            av = self.request.headers[
                'X-SecondLife-Owner-Key']  #get owner av key
            avname = self.request.headers[
                'X-SecondLife-Owner-Name']  #get owner av name
            subbie = self.request.path.split("/")[
                -1]  #get key of sub from path
            if avname != "(Loading...)":
                relations.update_av(av, avname)  #resolve key 2 name for owner
            logging.info("Remove sub request from %s (%s) for sub (%s)" %
                         (av, avname, subbie))
            answer = 0
            # first check if owner
            record = AvTokenValue.gql(
                "WHERE av = :1 AND token = :2", subbie,
                g_owner).get()  # request owner record of subbie
            if record is not None:
                #found a record for that subbie
                if av in record.value:
                    # and the av is stil the owner, so remove it
                    ownerlist = record.value.split(",")
                    owner_index = ownerlist.index(av)
                    del ownerlist[owner_index:owner_index + 2]
                    if ownerlist == []:
                        #list is emty, so just delete the record
                        record.delete()
                        logging.info(
                            "Remove sub request from %s for %s: Primary owner record deleted"
                            % (avname, subbie))
                    else:
                        #build the new secowner list and save it
                        s = ""
                        for x in ownerlist:
                            s += x + ","
                        logging.info(s.rstrip(','))
                        # update the records value
                        record.value = s.rstrip(',')
                        # and save it
                        record.put()
                        logging.info(
                            "Remove sub request from %s for %s: Primary record updated: %s"
                            % (avname, subbie, record.value))

                    #update the reealtion db
                    relations.delete(av, "owns", subbie)
                    # and prepare the answer for sl
                    answer += 1

            #now we do the same for the secowner
            record = AvTokenValue.gql(
                "WHERE av = :1 AND token = :2", subbie,
                g_secowner).get()  # request owner record of subbie
            if record is not None:
                #found a record for that subbie
                if av in record.value:
                    # and the av is stil the owner, so remove it
                    ownerlist = record.value.split(",")
                    owner_index = ownerlist.index(av)
                    del ownerlist[owner_index:owner_index + 2]
                    if ownerlist == []:
                        #list is emty, so just delete the record
                        record.delete()
                        logging.info(
                            "Remove sub request from %s for %s: Secower owner record deleted"
                            % (avname, subbie))
                    else:
                        #build the new secowner list and save it
                        s = ""
                        for x in ownerlist:
                            s += x + ","
                        logging.info(s.rstrip(','))
                        # update the records value
                        record.value = s.rstrip(',')
                        # and save it
                        record.put()
                        logging.info(
                            "Remove sub request from %s for %s: Secower record updated: %s"
                            % (avname, subbie, record.value))

                    #update the reealtion db
                    relations.delete(av, "secowns", subbie)
                    # and prepare the answer for sl
                    answer += 2

            # updating relation again due to the bug 716: the relations got not always properly updated, so we need to be sure it happens now
            if ((answer == 0) | (answer == 2)):
                if (relations.delete(av, "owns", subbie) == 1):
                    logging.info(
                        "Remove sub request from %s for %s: Not in subbies db, but primary owner relation removed"
                        % (avname, subbie))
                    answer += 1
            if ((answer == 0) | (answer == 1)):
                if (relations.delete(av, "secowns", subbie) == 1):
                    logging.info(
                        "Remove sub request from %s for %s: Not in subbies db, but secondary owner relation removed"
                        % (avname, subbie))
                    answer += 2

            # in case the answer is 0, something is wrong and the DBs from cmds and data drifted appart. We send a delete request to cmds, which hopfully fixex it
            if answer == 0:
                logging.info(
                    'Relation not found, sending safety request to cmds')
                result = urlfetch.fetch(cmdurl + '/relation/?safety/%s/%s' %
                                        (subbie, av),
                                        method="DELETE",
                                        headers={'sharedpass': sharedpass})
                if result.status_code == 202:
                    logging.info('Answer from cmds received: %s' %
                                 result.content)
                    answer = int(result.content)
                else:
                    logging.info(
                        'Problem with answer from cmds, status %d\n%s' %
                        (result.status_code, result.content))

            #answer to sl so we know what happened
            self.response.headers['Content-Type'] = 'text/plain'
            self.response.out.write("%d" % answer)
            self.response.set_status(200)
    def get(self):
        logging.info('Starting Main handler')

        url = "https://crs.iformbuilder.com/exzact/dataJSON.php?PAGE_ID=8669630&TABLE_NAME=_data11323_mapping_services_sante_mentales_survey&USERNAME=IBERSLINK&PASSWORD=PASSword@123"

        result = urlfetch.fetch(url=url, deadline=3600)

        if result.status_code == 200:
            data = json.loads(result.content)
            count = 0
            error_count = 0
            for obj in data:
                if count < 15000:
                    record = Record.Record()
                    full_obj = obj["record"]
                    for k in full_obj:
                        try:
                            if not full_obj[k] == None:
                                if isinstance(full_obj[k], (int, long)):
                                    full_obj[k] = str(full_obj[k])
                                setattr(record, k.lower(),
                                        full_obj[k].encode("utf-8").strip())

                                if k.encode("utf-8").strip().lower() == "gps_":
                                    logging.info("length")
                                    logging.info(
                                        len(full_obj[k].encode(
                                            "utf-8").strip().lower()))
                                    if len(
                                            str(full_obj[k].encode("utf-8").
                                                strip().lower())) is not 0:
                                        k = full_obj[k].encode("utf-8").strip()
                                        try:
                                            latitude_index = k.index(
                                                "Latitude:")
                                            latitude_index = latitude_index + 9
                                            latitude = k[
                                                latitude_index:latitude_index +
                                                8]
                                            #logging.info(latitude)

                                            setattr(
                                                record, "latitude",
                                                latitude.encode(
                                                    "utf-8").strip())

                                            longitude_index = k.index(
                                                "Longitude:")
                                            longitude_index = longitude_index + 10
                                            longitude = k[longitude_index:
                                                          longitude_index + 8]
                                            #logging.info(longitude)
                                            setattr(
                                                record, "longitude",
                                                longitude.encode(
                                                    "utf-8").strip())
                                        except:
                                            latitude = k[:9]
                                            setattr(
                                                record, "latitude",
                                                latitude.encode(
                                                    "utf-8").strip())

                                            longitude = k[11:20]
                                            setattr(
                                                record, "latitude",
                                                latitude.encode(
                                                    "utf-8").strip())
                                    else:
                                        setattr(record, "latitude", "empty")
                                        setattr(record, "latitude", "empty")

                        except Exception as e:
                            error_count += 1
                            logging.info("error_count")
                            logging.info(error_count)
                            logging.info(k)
                            #logging.info(full_obj[k])
                            logging.info(e)

                    record.put()
                    count += 1
            logging.info("Finished")
            return True
Example #47
0
    def get(self):
        question = self.request.get("question")
        if question != '':
            form = {"q": question}
            form_data = urllib.urlencode(form)
            resp = urlfetch.fetch(host + "ask",
                                  payload=form_data,
                                  method="POST",
                                  follow_redirects=False)
            if resp.status_code == 200:
                logging.debug("OK")
                logging.debug(result.content)
            else:
                logging.debug("FAILED ON QUESTION")
                pass
            country = 'US'
            indicator = 'TM.TAX.MRCH.SM.AR.ZS'
            pass
        else:
            country = self.request.get("country")
            indicator = self.request.get("indicator")
            pass
        start = self.request.get("start")
        if start == '':
            start = '2000'
        end = self.request.get("end")
        if end == '':
            end = '2012'

        if indicator == '':
            indicator = 'TM.TAX.MRCH.SM.AR.ZS'
            pass
        data = {}

        country_code = country.lower()
        indicator = indicator.upper()
        url = "http://api.worldbank.org/countries/" + country_code + "/indicators/" + indicator + "?" + \
        "date=" + start + ":" + end + "&" + "format=" + "json"

        resp = urlfetch.fetch(url, method="GET", follow_redirects=True)
        if resp.status_code == 200:
            logging.debug(resp.status_code)
            try:
                data = json.loads(resp.content)
            except:
                logging.info(resp.content)
                pass
        else:
            logging.debug(resp.status_code)
            logging.debug(resp.content)
            pass
        rows = {}
        old_viz_data = []
        viz_data = []
        countries = {}
        try:
            for row in data[1]:
                key = row['country']['id']
                countries[key] = row['country']['value']
                try:
                    rows[row['date']][key] = row['value']
                except:
                    rows[row['date']] = {}
                    rows[row['date']][key] = row['value']
                pass

            for yr in rows.keys():
                viz_row = {"date": date(int(yr), 1, 1)}
                for k in rows[yr].keys():
                    try:
                        viz_row[k] = float(rows[yr][k])
                    except:
                        viz_row[k] = None
                        pass
                    pass
                viz_data.append(viz_row)
        except:
            pass

            chart_data = {}
            chart_data['cols'] = [{
                'id': 'date',
                'label': 'Date',
                'type': 'number'
            }, {
                'id': 'value',
                'label': 'Value',
                'type': 'number'
            }]
            chart_data['rows'] = rows
            pass

        indicator_value = data[1][0]['indicator']['value']

        viz_desc = {"date": ("date", "Date")}
        order = ["date"]
        for k in countries.keys():
            viz_desc[k] = ("number", countries[k])
            order.append(k)

        data_table = DataTable(viz_desc)
        data_table.LoadData(viz_data)
        template_values = {
            'question': SafeString(question),
            'start': start,
            'end': end,
            'country': country,
            'indicator': indicator,
            'data': data_table.ToJSon(columns_order=order, order_by="date"),
            'title': SafeString(indicator_value)
        }

        if self.request.path == '/':
            path = os.path.join(os.path.dirname(__file__),
                                'templates/index.html')
        else:
            path = os.path.join(os.path.dirname(__file__),
                                'templates' + self.request.path)
            pass

        self.response.out.write(template.render(path, template_values))
        return
Example #48
0
    def create(cls,
               blob_info=None,
               data=None,
               filename=None,
               url=None,
               mime_type=None,
               **kwargs):
        """
        Create an ``Image``. Use this class method rather than creating an image with the constructor. You must provide one
        of the following parameters ``blob_info``, ``data``, or ``url`` to specify the image data to use.

        :param blob_info: The `Blobstore`_ data to use as the image data. If this parameter is not ``None``, all
            other parameters will be ignored as they are not needed.
        :param data: The image data that should be put in the `Blobstore`_ and used as the image data.
        :param filename: The filename of the image data. If not provided, the filename will be guessed from the URL
            or, if there is no URL, it will be set to the stringified `Key`_ of the image entity.
        :param url: The URL to fetch the image data from and then place in the `Blobstore`_ to be used as the image data.
        :param mime_type: The `mime type`_ to use for the `Blobstore`_ image data.
            If ``None``, it will attempt to guess the mime type from the url fetch response headers or the filename.
        :param parent:  Inherited from `Model`_. The `Model`_ instance or `Key`_ instance for the entity that is the new
            image's parent.
        :param key_name: Inherited from `Model`_. The name for the new entity. The name becomes part of the primary key.
        :param key: Inherited from `Model`_. The explicit `Key`_ instance for the new entity.
            Cannot be used with ``key_name`` or ``parent``. If ``None``, falls back on the behavior for ``key_name`` and
            ``parent``.
        :param kwargs: Initial values for the instance's properties, as keyword arguments.  Useful if subclassing.
        :return: An instance of the ``Image`` class.
        """
        if filename is not None:
            filename = filename.encode('ascii', 'ignore')
        if url is not None:
            url = url.encode('ascii', 'ignore')
        if blob_info is not None:
            kwargs['blob_info'] = blob_info
            return cls.create_new_entity(**kwargs)
        if data is None:
            if url is not None:
                response = urlfetch.fetch(url)
                data = response.content
                mime_type = mime_type or response.headers.get(
                    'Content-Type', None)
                if filename is None:
                    path = urlparse.urlsplit(url)[2]
                    filename = path[path.rfind('/') + 1:]
        if data is None:
            raise db.Error("No image data")
        image = cls.create_new_entity(source_url=url, **kwargs)
        filename = filename or str(image.key())
        mime_type = mime_type or mimetypes.guess_type(
            filename)[0] or 'application/octet-stream'
        if mime_type not in config.VALID_MIME_TYPES:
            message = "The image mime type (%s) isn't valid" % mime_type
            logging.warning(message)
            image.delete()
            raise images.BadImageError(message)
        blob_file_name = files.blobstore.create(
            mime_type=mime_type, _blobinfo_uploaded_filename=filename)
        with files.open(blob_file_name, 'a') as f:
            f.write(data)
        files.finalize(blob_file_name)
        image.blob_info = files.blobstore.get_blob_key(blob_file_name)
        image.put()
        image = cls.get(str(image.key()))
        if image is not None and image.blob_info is None:
            logging.error("Failed to create image: %s" % filename)
            image.delete()
            image = None
        return image
Example #49
0
def download(url):
    response = urlfetch.fetch(url, follow_redirects=True)
    return response
Example #50
0
 def _oauth2_request(self, url, token, token_param='access_token'):
     """Makes an HTTP request with OAuth 2.0 access token using App Engine 
 URLfetch API.
 """
     target_url = url.format(urlencode({token_param: token}))
     return urlfetch.fetch(target_url).content
Example #51
0
def application(environ, start_response):
    if environ[
            'REQUEST_METHOD'] == 'GET' and 'HTTP_X_URLFETCH_PS1' not in environ:
        # xxnet 自用
        timestamp = long(
            os.environ['CURRENT_VERSION_ID'].split('.')[1]) / 2**28
        ctime = time.strftime('%Y-%m-%d %H:%M:%S',
                              time.gmtime(timestamp + 8 * 3600))
        start_response('200 OK', [('Content-Type', 'text/plain')])
        yield 'GoAgent Python Server %s works, deployed at %s\n' % (
            __version__, ctime)
        if len(__password__) > 2:
            yield 'Password: %s%s%s' % (__password__[0], '*' *
                                        (len(__password__) - 2),
                                        __password__[-1])
        raise StopIteration

    start_response('200 OK', [('Content-Type', 'image/gif'),
                              ('X-Server', 'GPS ' + __version__)])

    if environ['REQUEST_METHOD'] == 'HEAD':
        raise StopIteration
        # 请求头则已经完成

    options = environ.get('HTTP_X_URLFETCH_OPTIONS', '')
    # 不知道怎么直接获得的
    # 但一般,此段语句无用
    if 'rc4' in options and not __password__:
        # 如果客户端需要加密,但gae无密码

        # 但rc4 如不改源码,则恒为假
        yield format_response(
            400, {'Content-Type': 'text/html; charset=utf-8'},
            message_html(
                '400 Bad Request',
                'Bad Request (options) - please set __password__ in gae.py',
                'please set __password__ and upload gae.py again'))
        raise StopIteration

    try:
        if 'HTTP_X_URLFETCH_PS1' in environ:
            # 第一部分
            payload = inflate(base64.b64decode(environ['HTTP_X_URLFETCH_PS1']))
            body = inflate(
                base64.b64decode(
                    # 第二部分 即原始body
                    environ['HTTP_X_URLFETCH_PS2'])
            ) if 'HTTP_X_URLFETCH_PS2' in environ else ''
        else:
            # POST
            # POST 获取数据的方式
            wsgi_input = environ['wsgi.input']
            input_data = wsgi_input.read(
                int(environ.get('CONTENT_LENGTH', '0')))
            if 'rc4' in options:
                input_data = RC4Cipher(__password__).encrypt(input_data)
            payload_length, = struct.unpack('!h', input_data[:2])  # 获取长度
            payload = inflate(input_data[2:2 + payload_length])  #  获取负载
            body = input_data[2 + payload_length:]  #  获取body

        raw_response_line, payload = payload.split('\r\n', 1)
        method, url = raw_response_line.split()[:2]
        # http content:
        # 此为body
        #{
        # pack_req_head_len: 2 bytes,#POST 时使用

        # pack_req_head : deflate{
        # 此为负载
        # original request line,
        # original request headers,
        # X-URLFETCH-kwargs HEADS, {
        # password,
        # maxsize, defined in config AUTO RANGE MAX SIZE
        # timeout, request timeout for GAE urlfetch.
        #}
        #}
        # body
        #}

        headers = {}
        # 获取 原始头
        for line in payload.splitlines():
            key, value = line.split(':', 1)
            headers[key.title()] = value.strip()
    except (zlib.error, KeyError, ValueError):
        import traceback
        yield format_response(
            500, {'Content-Type': 'text/html; charset=utf-8'},
            message_html('500 Internal Server Error',
                         'Bad Request (payload) - Possible Wrong Password',
                         '<pre>%s</pre>' % traceback.format_exc()))
        raise StopIteration

    # 获取gae用的头
    kwargs = {}
    any(
        kwargs.__setitem__(x[len('x-urlfetch-'):].lower(), headers.pop(x))
        for x in headers.keys() if x.lower().startswith('x-urlfetch-'))

    if 'Content-Encoding' in headers and body:
        # fix bug for LinkedIn android client
        if headers['Content-Encoding'] == 'deflate':
            try:
                body2 = inflate(body)
                headers['Content-Length'] = str(len(body2))
                del headers['Content-Encoding']
                body = body2
            except BaseException:
                pass

    logging.info('%s "%s %s %s" - -', environ['REMOTE_ADDR'], method, url,
                 'HTTP/1.1')

    # 参数使用
    if __password__ and __password__ != kwargs.get('password', ''):
        yield format_response(
            403, {'Content-Type': 'text/html; charset=utf-8'},
            message_html('403 Wrong password',
                         'Wrong password(%r)' % kwargs.get('password', ''),
                         'GoAgent proxy.ini password is wrong!'))
        raise StopIteration

    netloc = urlparse.urlparse(url).netloc

    if __hostsdeny__ and netloc.endswith(__hostsdeny__):
        yield format_response(
            403, {'Content-Type': 'text/html; charset=utf-8'},
            message_html(
                '403 Hosts Deny',
                'Hosts Deny(%r)' % netloc,
                detail=
                '公用appid因为资源有限,限制观看视频和文件下载等消耗资源过多的访问,请使用自己的appid <a href=" https://github.com/XX-net/XX-Net/wiki/Register-Google-appid" target="_blank">帮助</a> '
            ))
        raise StopIteration

    if len(url) > MAX_URL_LENGTH:
        yield format_response(
            400, {'Content-Type': 'text/html; charset=utf-8'},
            message_html('400 Bad Request',
                         'length of URL too long(greater than %r)' %
                         MAX_URL_LENGTH,
                         detail='url=%r' % url))
        raise StopIteration

    if netloc.startswith(('127.0.0.', '::1', 'localhost')):
        # 测试用
        yield format_response(
            400, {'Content-Type': 'text/html; charset=utf-8'},
            message_html(
                'GoAgent %s is Running' % __version__,
                'Now you can visit some websites',
                ''.join('<a href="https://%s/">%s</a><br/>' % (x, x)
                        for x in ('google.com', 'mail.google.com'))))
        raise StopIteration

    fetchmethod = getattr(urlfetch, method, None)
    if not fetchmethod:
        yield format_response(
            405, {'Content-Type': 'text/html; charset=utf-8'},
            message_html('405 Method Not Allowed',
                         'Method Not Allowed: %r' % method,
                         detail='Method Not Allowed URL=%r' % url))
        raise StopIteration

    timeout = int(kwargs.get('timeout', URLFETCH_TIMEOUT))
    validate_certificate = bool(int(kwargs.get('validate', 0)))
    maxsize = int(kwargs.get('maxsize', 0))
    # https://www.freebsdchina.org/forum/viewtopic.php?t=54269
    accept_encoding = headers.get('Accept-Encoding', '') or headers.get(
        'Bccept-Encoding', '')
    errors = []
    allow_truncated = False
    for i in xrange(int(kwargs.get('fetchmax', URLFETCH_MAX))):
        try:
            response = urlfetch.fetch(
                url,
                body,
                fetchmethod,
                headers,
                allow_truncated=allow_truncated,
                follow_redirects=False,
                deadline=timeout,
                validate_certificate=validate_certificate)
            # 获取真正response
            break
        except apiproxy_errors.OverQuotaError as e:
            time.sleep(5)
        except urlfetch.DeadlineExceededError as e:
            errors.append('%r, timeout=%s' % (e, timeout))
            logging.error('DeadlineExceededError(timeout=%s, url=%r)', timeout,
                          url)
            time.sleep(1)

            # 必须truncaated
            allow_truncated = True
            m = re.search(r'=\s*(\d+)-',
                          headers.get('Range') or headers.get('range') or '')
            if m is None:
                headers['Range'] = 'bytes=0-%d' % (maxsize or URLFETCH_MAXSIZE)
            else:
                headers.pop('Range', '')
                headers.pop('range', '')
                start = int(m.group(1))
                headers['Range'] = 'bytes=%s-%d' % (
                    start, start + (maxsize or URLFETCH_MAXSIZE))

            timeout *= 2
        except urlfetch.DownloadError as e:
            errors.append('%r, timeout=%s' % (e, timeout))
            logging.error('DownloadError(timeout=%s, url=%r)', timeout, url)
            time.sleep(1)
            timeout *= 2
        except urlfetch.ResponseTooLargeError as e:
            errors.append('%r, timeout=%s' % (e, timeout))
            response = e.response
            logging.error(
                'ResponseTooLargeError(timeout=%s, url=%r) response(%r)',
                timeout, url, response)

            allow_truncated = True
            m = re.search(r'=\s*(\d+)-',
                          headers.get('Range') or headers.get('range') or '')
            if m is None:
                headers['Range'] = 'bytes=0-%d' % (maxsize or URLFETCH_MAXSIZE)
            else:
                headers.pop('Range', '')
                headers.pop('range', '')
                start = int(m.group(1))
                headers['Range'] = 'bytes=%s-%d' % (
                    start, start + (maxsize or URLFETCH_MAXSIZE))
            timeout *= 2
        except urlfetch.SSLCertificateError as e:
            errors.append('%r, should validate=0 ?' % e)
            logging.error('%r, timeout=%s', e, timeout)
        except Exception as e:
            errors.append(str(e))
            stack_str = "stack:%s" % traceback.format_exc()
            errors.append(stack_str)
            if i == 0 and method == 'GET':
                timeout *= 2
    else:
        error_string = '<br />\n'.join(errors)
        if not error_string:
            logurl = 'https://appengine.google.com/logs?&app_id=%s' % os.environ[
                'APPLICATION_ID']
            error_string = 'Internal Server Error. <p/>try <a href="javascript:window.location.reload(true);">refresh</a> or goto <a href="%s" target="_blank">appengine.google.com</a> for details' % logurl
        yield format_response(
            502, {'Content-Type': 'text/html; charset=utf-8'},
            message_html('502 Urlfetch Error',
                         'Python Urlfetch Error: %r' % method, error_string))
        raise StopIteration

    #logging.debug('url=%r response.status_code=%r response.headers=%r response.content[:1024]=%r', url, response.status_code, dict(response.headers), response.content[:1024])

    #以上实现fetch 的细节

    status_code = int(response.status_code)
    data = response.content
    response_headers = response.headers
    response_headers['X-Head-Content-Length'] = response_headers.get(
        'Content-Length', '')
    # for k in response_headers:
    #    v = response_headers[k]
    #    logging.debug("Head:%s: %s", k, v)
    content_type = response_headers.get('content-type', '')
    content_encoding = response_headers.get('content-encoding', '')
    # 也是分片合并之类的细节
    if status_code == 200 and maxsize and len(
            data) > maxsize and response_headers.get(
                'accept-ranges', '').lower() == 'bytes' and int(
                    response_headers.get('content-length', 0)):
        logging.debug("data len:%d max:%d", len(data), maxsize)
        status_code = 206
        response_headers['Content-Range'] = 'bytes 0-%d/%d' % (maxsize - 1,
                                                               len(data))
        data = data[:maxsize]
    if 'gzip' in accept_encoding:
        if (data and status_code == 200 and content_encoding == ''
                and is_text_content_type(content_type) and is_deflate(data)):
            # ignore wrong "Content-Type"
            type = guess_type(url)[0]
            if type is None or is_text_content_type(type):
                if 'deflate' in accept_encoding:
                    response_headers[
                        'Content-Encoding'] = content_encoding = 'deflate'
                else:
                    data = inflate(data)
    else:
        if content_encoding in ('gzip', 'deflate', 'br'):
            del response_headers['Content-Encoding']
            content_encoding = ''
    if status_code == 200 and content_encoding == '' and 512 < len(
            data) < URLFETCH_DEFLATE_MAXSIZE and is_text_content_type(
                content_type):
        if 'gzip' in accept_encoding:
            response_headers['Content-Encoding'] = 'gzip'
            compressobj = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
                                           zlib.DEFLATED, -zlib.MAX_WBITS,
                                           zlib.DEF_MEM_LEVEL, 0)
            dataio = io.BytesIO()
            dataio.write('\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff')
            dataio.write(compressobj.compress(data))
            dataio.write(compressobj.flush())
            dataio.write(
                struct.pack('<LL',
                            zlib.crc32(data) & 0xFFFFFFFF,
                            len(data) & 0xFFFFFFFF))
            data = dataio.getvalue()
        elif 'deflate' in accept_encoding:
            response_headers['Content-Encoding'] = 'deflate'
            data = deflate(data)
    response_headers['Content-Length'] = str(len(data))
    if 'rc4' not in options:
        yield format_response(status_code, response_headers, '')
        yield data
    else:
        cipher = RC4Cipher(__password__)
        yield cipher.encrypt(format_response(status_code, response_headers,
                                             ''))
        yield cipher.encrypt(data)
    def get(self, event_key):
        if tba_config.CONFIG["env"] == "prod":  # disable in prod for now
            logging.error("Tried to restore {} from CSV in prod! No can do.".format(event_key))
            return

        event = Event.get_by_id(event_key)

        # alliances
        result = urlfetch.fetch(self.ALLIANCES_URL.format(event.year, event_key, event_key))
        if result.status_code != 200:
            logging.warning('Unable to retreive url: ' + (self.ALLIANCES_URL.format(event.year, event_key, event_key)))
        else:
            data = result.content.replace('frc', '')
            alliance_selections = CSVAllianceSelectionsParser.parse(data)

            event_details = EventDetails(
                id=event_key,
                alliance_selections=alliance_selections
            )
            EventDetailsManipulator.createOrUpdate(event_details)

        # awards
        result = urlfetch.fetch(self.AWARDS_URL.format(event.year, event_key, event_key))
        if result.status_code != 200:
            logging.warning('Unable to retreive url: ' + (self.AWARDS_URL.format(event.year, event_key, event_key)))
        else:
            # convert into expected input format
            data = StringIO.StringIO()
            writer = csv.writer(data, delimiter=',')
            for row in csv.reader(StringIO.StringIO(result.content), delimiter=','):
                writer.writerow([event.year, event.event_short, row[1], row[2].replace('frc', ''), row[3]])

            awards = []
            for award in CSVAwardsParser.parse(data.getvalue()):
                awards.append(Award(
                    id=Award.render_key_name(event.key_name, award['award_type_enum']),
                    name_str=award['name_str'],
                    award_type_enum=award['award_type_enum'],
                    year=event.year,
                    event=event.key,
                    event_type_enum=event.event_type_enum,
                    team_list=[ndb.Key(Team, 'frc{}'.format(team_number)) for team_number in award['team_number_list']],
                    recipient_json_list=award['recipient_json_list']
                ))
            AwardManipulator.createOrUpdate(awards)

        # matches
        result = urlfetch.fetch(self.MATCHES_URL.format(event.year, event_key, event_key))
        if result.status_code != 200:
            logging.warning('Unable to retreive url: ' + (self.MATCHES_URL.format(event.year, event_key, event_key)))
        else:
            data = result.content.replace('frc', '').replace('{}_'.format(event_key), '')
            match_dicts, _ = OffseasonMatchesParser.parse(data)
            matches = [
                Match(
                    id=Match.renderKeyName(
                        event.key.id(),
                        match.get("comp_level", None),
                        match.get("set_number", 0),
                        match.get("match_number", 0)),
                    event=event.key,
                    year=event.year,
                    set_number=match.get("set_number", 0),
                    match_number=match.get("match_number", 0),
                    comp_level=match.get("comp_level", None),
                    team_key_names=match.get("team_key_names", None),
                    alliances_json=match.get("alliances_json", None)
                )
            for match in match_dicts]
            MatchManipulator.createOrUpdate(matches)

        # rankings
        result = urlfetch.fetch(self.RANKINGS_URL.format(event.year, event_key, event_key))
        if result.status_code != 200:
            logging.warning('Unable to retreive url: ' + (self.RANKINGS_URL.format(event.year, event_key, event_key)))
        else:
            # convert into expected input format
            rankings = list(csv.reader(StringIO.StringIO(result.content), delimiter=','))

            event_details = EventDetails(
                id=event_key,
                rankings=rankings
            )
            EventDetailsManipulator.createOrUpdate(event_details)

        self.response.out.write("Done restoring {}!".format(event_key))
Example #53
0
def getReviews(language, appId):
    url = "https://market.android.com/getreviews"

    url = "%s?id=%s&reviewType=1&rating=0&reviewSortOrder=0&pageNum=0&hl=en" % (
        url, appId)

    params = {"xhr": 1}

    req = urlfetch.fetch(
        url=url,
        headers={
            'User-Agent':
            'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.63 Safari/535.7'
        },
        payload=urllib.urlencode(params),
        method=urlfetch.POST).content

    regex = re.compile('.*?("htmlContent".*)', re.DOTALL)
    req = unicode(req, 'utf-8').encode('utf-8')
    m = regex.match(req)

    json_text = '{' + m.group(1)
    data = json.loads(json_text)

    html = data['htmlContent'].replace('&nbsp;', ' ').replace('&amp;', '&')

    tree = lxml.html.fromstring(html)

    reviews = []

    for el in tree.findall('div'):
        if el.find('span/strong') is not None:
            user = el.find('span/strong').text
            date = el.find('span[2]').text.strip()
        else:
            user = None
            date = el.find('span[1]').text.strip()

        if el.find('p') is not None:
            review = el.find('p').text
        else:
            review = None

        date = date.replace("on ", '')
        date = datetime.strptime(date, "%B %d, %Y")

        if len(el.xpath("text()")) > 0:
            version = el.xpath("text()")[0].strip()
        else:
            version = ""

        if re.search(u"with", version):
            # Phone with version - Samsung Galaxy S with version 1.2.03
            m = re.search("\((.*) with version (.*)\)", version)
            version = m.group(2)
            device = m.group(1)
        else:
            m = re.search("Version ([\d\.]*)", version)

            if m is None:
                # Phone without version - SEMC Xperia X10
                m = re.search("\((.*)\)", version)
                version = None
                if m:
                    device = m.group(1)
                else:
                    device = None
            else:
                # Version without phone
                version = m.group(1)
                device = None

        title = el.find('div/h4').text

        if device is None:
            m1 = re.search("galaxy nexus", review or "", re.I)
            m2 = re.search("galaxy nexus", title or "", re.I)

            if m1 is not None or m2 is not None:
                device = "Galaxy Nexus"

        review = {
            "user":
            user,
            "date":
            date,
            "version":
            version,
            "device":
            device,
            "title":
            title,
            "rank":
            int(
                el.xpath(
                    "count(div/div/div[contains(@class,'SPRITE_star_on_dark')])"
                )),
            "review":
            review
        }

        reviews.append(review)

    return reviews
Example #54
0
    def get(self):
        try:
            # Authenticate with wordnik
            username = "******"
            password = "******"
            url = "http://api.wordnik.com:80/v4/account.json/authenticate/{}?password={}&api_key={}".format(
                username, password, _WORDNIK_API_KEY)
            result = urlfetch.fetch(url)
            if result.status_code != 200:
                raise endpoints.UnauthorizedException(
                    "Bad status code {} when trying to authenticate with wordnik: {}"
                    .format(result.status_code, result.content))
            authToken = json.decode(result.content)["token"]

            # Check memcache
            listCacheName = "wordnik-lists"
            dateCacheName = "wordnik-lists-last-processed"
            lists = memcache.get(listCacheName)
            messages = []
            if not lists:
                if lists is None:
                    memcache.add(listCacheName, {})
                # Get word lists from wordnik
                url = "http://api.wordnik.com:80/v4/account.json/wordLists?api_key={}&auth_token={}".format(
                    _WORDNIK_API_KEY, authToken)
                result = urlfetch.fetch(url)
                if result.status_code != 200:
                    raise endpoints.UnauthorizedException(
                        "Bad status code {} when trying to get wordlists from wordnik {}: {}"
                        .format(result.status_code, permalink, result.content))
                # Get and add all wordlists in parallel
                items = json.decode(result.content)
                lists = {}
                last_processed = memcache.get(dateCacheName)
                for item in items:
                    last_activity = parse(item["lastActivityAt"])
                    if not last_processed or last_activity > last_processed:
                        lists[item["permalink"]] = item["name"]
                memcache.replace("wordnik-lists", lists)
                if last_processed is None:
                    memcache.add(dateCacheName,
                                 datetime.datetime.now(tzlocal()))
                else:
                    memcache.replace(dateCacheName,
                                     datetime.datetime.now(tzlocal()))
                messages.append("retrieved lists {} to process".format(lists))
            else:
                messages.append(
                    "found existing lists {} to process".format(lists))
            while lists:
                permalink, name = lists.popitem()
                try:
                    WordList.AddWordnikList(authToken, name,
                                            permalink).get_result()
                    messages.append(
                        "Added wordlist {} successfully".format(name))
                except Exception as e:
                    messages.append("Failed to add wordlist {} : {}".format(
                        name, str(e)))
                memcache.replace(listCacheName, lists)
            self.response.write("<br/>".join(messages))
        except Exception as e:
            messages.append(str(e))
Example #55
0
 def get_token_info(credentials):
     """Get the token information from Google for the given credentials."""
     url = (TOKEN_INFO_ENDPOINT % credentials.access_token)
     return urlfetch.fetch(url)
Example #56
0
 def get(self):
     address = self.request.get('address')
     if not address:
         return 500
     fetched = urlfetch.fetch('https://maps.googleapis.com/maps/api/geocode/json?address={0}&sensor=false&region=JP&language=ja&key=AIzaSyA17W3s7i1mvELoe5TcvJFVc798wNMDM7Q'.format(urllib.quote(address.encode('utf-8'))))
     return self.render_json(json.loads(fetched.content))
Example #57
0
class MainHandler(webapp.RequestHandler):
    Software = "GAppProxy/2.0.0"
    # hop to hop header should not be forwarded
    H2H_Headers = [
        "connection", "keep-alive", "proxy-authenticate",
        "proxy-authorization", "te", "trailers", "transfer-encoding", "upgrade"
    ]
    Forbid_Headers = ["if-range"]
    #Forbid_Headers = ["if-range", "accept-encoding"]
    Fetch_Max = 3
    spammer_list = ["52.69.164.198", "54.92.72.114"]

    def sendErrorPage(self, status, description):
        self.response.headers["Content-Type"] = "application/octet-stream"
        # http over http
        # header
        self.response.out.write("HTTP/1.1 %d %s\r\n" % (status, description))
        self.response.out.write("Server: %s\r\n" % self.Software)
        self.response.out.write("Content-Type: text/html\r\n")
        self.response.out.write("\r\n")
        # body
        content = "<h1>Fetch Server Error</h1><p>Error Code: %d<p>Message: %s" % (
            status, description)
        self.response.out.write(zlib.compress(content))

    def post(self):
        visitor_ip = self.request.remote_addr
        if visitor_ip not in self.spammer_list:
            self.sendErrorPage(
                590, ("Invalid local proxy, Method not allowed. IP: %s") %
                (visitor_ip))
            return
        try:
            # get post data
            orig_method = self.request.get("method").encode("utf-8")
            orig_path = base64.b64decode(
                self.request.get("encoded_path").encode("utf-8"))
            orig_headers = base64.b64decode(
                self.request.get("headers").encode("utf-8"))
            orig_post_data = base64.b64decode(
                self.request.get("postdata").encode("utf-8"))

            # check method
            if orig_method != "GET" and orig_method != "HEAD" and orig_method != "POST":
                # forbid
                self.sendErrorPage(590,
                                   "Invalid local proxy, Method not allowed.")
                return
            if orig_method == "GET":
                method = urlfetch.GET
            elif orig_method == "HEAD":
                method = urlfetch.HEAD
            elif orig_method == "POST":
                method = urlfetch.POST

            # check path
            (scm, netloc, path, params, query,
             _) = urlparse.urlparse(orig_path)
            if (scm.lower() != "http"
                    and scm.lower() != "https") or not netloc:
                self.sendErrorPage(590,
                                   "Invalid local proxy, Unsupported Scheme.")
                return
            # create new path
            new_path = urlparse.urlunparse(
                (scm, netloc, path, params, query, ""))

            # make new headers
            new_headers = {}
            content_length = 0
            si = StringIO.StringIO(orig_headers)
            while True:
                line = si.readline()
                line = line.strip()
                if line == "":
                    break
                # parse line
                (name, _, value) = line.partition(":")
                name = name.strip()
                value = value.strip()
                nl = name.lower()
                if nl in self.H2H_Headers or nl in self.Forbid_Headers:
                    # don't forward
                    continue
                new_headers[name] = value
                if name.lower() == "content-length":
                    content_length = int(value)
            # predined header
            new_headers["Connection"] = "close"

            # check post data
            if content_length != 0:
                if content_length != len(orig_post_data):
                    logging.warning(
                        "Invalid local proxy, Wrong length of post data, %d!=%d."
                        % (content_length, len(orig_post_data)))
                    #self.sendErrorPage(590, "Invalid local proxy, Wrong length of post data, %d!=%d." % (content_length, len(orig_post_data)))
                    #return
            else:
                orig_post_data = ""
            if orig_post_data != "" and orig_method != "POST":
                self.sendErrorPage(
                    590, "Invalid local proxy, Inconsistent method and data.")
                return
        except Exception, e:
            self.sendErrorPage(591, "Fetch server error, %s." % str(e))
            return

        # fetch, try * times
        range_request = False
        for i in range(self.Fetch_Max):
            try:
                # the last time, try with Range
                if i == self.Fetch_Max - 1 and method == urlfetch.GET and not new_headers.has_key(
                        "Range"):
                    range_request = True
                    new_headers["Range"] = "bytes=0-65535"
                # fetch
                resp = urlfetch.fetch(new_path, orig_post_data, method,
                                      new_headers, False, False)
                # ok, got
                if range_request:
                    range_supported = False
                    for h in resp.headers:
                        if h.lower() == "accept-ranges":
                            if resp.headers[h].strip().lower() == "bytes":
                                range_supported = True
                                break
                        elif h.lower() == "content-range":
                            range_supported = True
                            break
                    if range_supported:
                        self.sendErrorPage(
                            592,
                            "Fetch server error, Retry with range header.")
                    else:
                        self.sendErrorPage(
                            591,
                            "Fetch server error, Sorry, file size up to Google's limit and the target server doesn't accept Range request."
                        )
                    return
                break
            except Exception, e:
                logging.warning("urlfetch.fetch(%s) error: %s." %
                                (range_request and "Range" or "", str(e)))
Example #58
0
    def create_and_notify(delf,
                          request,
                          channel_name=(str, ),
                          channel_id=(str, ),
                          title=(str, ),
                          content=(str, )):

        broadcasted_msg = PushMessage(channel_id=channel_id,
                                      channel_name=channel_name,
                                      title=title,
                                      content=content)
        datamsg = f3.messages.serialize(MessageMsg, broadcasted_msg)
        broadcasted_msg.put()

        #get subscribers of that channel
        subs = Subscriber.query(Subscriber.channels == channel_id)
        #make email recipients string
        recipients = ""
        if subs is not None:
            for sub in subs:
                #only email subscribers with email ntifications enabled
                if sub.email_enabled and sub.email_verified:
                    #re.match(r"[^@]+@[^@]+\.[^@]+", sub.object_id):
                    recipients += sub.email + ","

        #send the emails
        if recipients:
            mail.send_mail(
                sender="*****@*****.**",
                to=recipients,
                subject="Message from " + channel_name,
                body=content)

        #send the sms
        sms_recipients = []
        client = TwilioRestClient(twilio_acc, twilio_tkn)

        if subs is not None:
            for sub in subs:
                #only email subscribers with sms ntifications enabled
                if sub.sms_enabled and sub.sms_verified:
                    message = client.messages.create(to=sub.phone_number,
                                                     from_="+12057915054",
                                                     body=channel_name + "-" +
                                                     content)

        #send parse message
        url = "https://api.parse.com/1/push"
        payload_content = {
            'channels': [channel_id],
            'data': {
                'alert': channel_name + ": " + title + "-" + content
            }
        }
        result = urlfetch.fetch(url=url,
                                payload=json.dumps(payload_content),
                                method=urlfetch.POST,
                                headers={
                                    'Content-Type': 'application/json',
                                    'X-Parse-Application-Id': parse_appid,
                                    'X-Parse-REST-API-Key': parse_apikey
                                })

        return datamsg
class EventHelper(object):
    """
    Helper class for Events.
    """
    @classmethod
    def groupByWeek(self, events):
        """
        Events should already be ordered by start_date
        """
        to_return = collections.OrderedDict()  # key: week_label, value: list of events

        current_week = 1
        week_start = None
        weekless_events = []
        offseason_events = []
        preseason_events = []
        for event in events:
            if event.official and event.event_type_enum in {EventType.CMP_DIVISION, EventType.CMP_FINALS}:
                if CHAMPIONSHIP_EVENTS_LABEL in to_return:
                    to_return[CHAMPIONSHIP_EVENTS_LABEL].append(event)
                else:
                    to_return[CHAMPIONSHIP_EVENTS_LABEL] = [event]
            elif event.official and event.event_type_enum in {EventType.REGIONAL, EventType.DISTRICT, EventType.DISTRICT_CMP}:
                if (event.start_date is None or
                   (event.start_date.month == 12 and event.start_date.day == 31)):
                    weekless_events.append(event)
                else:
                    if week_start is None:
                        diff_from_thurs = (event.start_date.weekday() - 3) % 7  # 3 is Thursday
                        week_start = event.start_date - datetime.timedelta(days=diff_from_thurs)

                    if event.start_date >= week_start + datetime.timedelta(days=7):
                        current_week += 1
                        week_start += datetime.timedelta(days=7)

                    label = REGIONAL_EVENTS_LABEL.format(current_week)
                    if label in to_return:
                        to_return[label].append(event)
                    else:
                        to_return[label] = [event]
            elif event.event_type_enum == EventType.PRESEASON:
                preseason_events.append(event)
            else:
                # everything else is an offseason event
                offseason_events.append(event)

        # Add weekless + other events last
        if weekless_events:
            to_return[WEEKLESS_EVENTS_LABEL] = weekless_events
        if offseason_events:
            to_return[OFFSEASON_EVENTS_LABEL] = offseason_events
        if preseason_events:
            to_return[PRESEASON_EVENTS_LABEL] = preseason_events

        return to_return

    @classmethod
    def distantFutureIfNoStartDate(self, event):
        if not event.start_date:
            return datetime.datetime(2177, 1, 1, 1, 1, 1)
        else:
            return event.start_date

    @classmethod
    def calculateTeamWLTFromMatches(self, team_key, matches):
        """
        Given a team_key and some matches, find the Win Loss Tie.
        """
        wlt = {"win": 0, "loss": 0, "tie": 0}

        for match in matches:
            if match.has_been_played and match.winning_alliance is not None:
                if match.winning_alliance == "":
                    wlt["tie"] += 1
                elif team_key in match.alliances[match.winning_alliance]["teams"]:
                    wlt["win"] += 1
                else:
                    wlt["loss"] += 1
        return wlt

    @classmethod
    def getTeamWLT(self, team_key, event):
        """
        Given a team_key, and an event, find the team's Win Loss Tie.
        """
        match_keys = Match.query(Match.event == event.key, Match.team_key_names == team_key).fetch(500, keys_only=True)
        return self.calculateTeamWLTFromMatches(team_key, ndb.get_multi(match_keys))

    @classmethod
    def getWeekEvents(self):
        """
        Get events this week
        In general, if an event is currently going on, it shows up in this query
        An event shows up in this query iff:
        a) The event is within_a_day
        OR
        b) The event.start_date is on or within 4 days after the closest Thursday
        """
        today = datetime.datetime.today()

        # Make sure all events to be returned are within range
        two_weeks_of_events_keys_future = Event.query().filter(
          Event.start_date >= (today - datetime.timedelta(days=7))).filter(
          Event.start_date <= (today + datetime.timedelta(days=7))).order(
          Event.start_date).fetch_async(50, keys_only=True)

        events = []
        diff_from_thurs = 3 - today.weekday()  # 3 is Thursday. diff_from_thurs ranges from 3 to -3 (Monday thru Sunday)
        closest_thursday = today + datetime.timedelta(days=diff_from_thurs)

        two_weeks_of_event_futures = ndb.get_multi_async(two_weeks_of_events_keys_future.get_result())
        for event_future in two_weeks_of_event_futures:
            event = event_future.get_result()
            if event.within_a_day:
                events.append(event)
            else:
                offset = event.start_date.date() - closest_thursday.date()
                if (offset == datetime.timedelta(0)) or (offset > datetime.timedelta(0) and offset < datetime.timedelta(4)):
                    events.append(event)

        return events

    @classmethod
    def getEventsWithinADay(self):
        week_events = self.getWeekEvents()
        ret = []
        for event in week_events:
            if event.within_a_day:
                ret.append(event)
        return ret

    @classmethod
    def getShortName(self, name_str):
        match = re.match(r'(MAR |PNW )?(FIRST Robotics|FRC)?(.*)(FIRST Robotics|FRC)?(District|Regional|Region|State|Tournament|FRC|Field)( Competition| Event| Championship)?', name_str)
        if match:
            short = match.group(3)
            match = re.match(r'(.*)(FIRST Robotics|FRC)', short)
            if match:
                return match.group(1).strip()
            else:
                return short.strip()

        return name_str.strip()

    @classmethod
    def get_timezone_id(cls, event_dict):
        if event_dict.get('location', None) is None:
            logging.warning('Could not get timezone for event {}{} with no location!'.format(event_dict['year'], event_dict['event_short']))
            return None

        # geocode request
        geocode_params = urllib.urlencode({
            'address': event_dict['location'],
            'sensor': 'false',
        })
        geocode_url = 'https://maps.googleapis.com/maps/api/geocode/json?%s' % geocode_params
        try:
            geocode_result = urlfetch.fetch(geocode_url)
        except Exception, e:
            logging.warning('urlfetch for geocode request failed: {}'.format(geocode_url))
            logging.info(e)
            return None
        if geocode_result.status_code != 200:
            logging.warning('Geocoding for event {}{} failed with url {}'.format(event_dict['year'], event_dict['event_short'], geocode_url))
            return None
        geocode_dict = json.loads(geocode_result.content)
        if not geocode_dict['results']:
            logging.warning('No geocode results for event location: {}'.format(event_dict['location']))
            return None
        lat = geocode_dict['results'][0]['geometry']['location']['lat']
        lng = geocode_dict['results'][0]['geometry']['location']['lng']

        # timezone request
        tz_params = urllib.urlencode({
            'location': '%s,%s' % (lat, lng),
            'timestamp': 0,  # we only care about timeZoneId, which doesn't depend on timestamp
            'sensor': 'false',
        })
        tz_url = 'https://maps.googleapis.com/maps/api/timezone/json?%s' % tz_params
        try:
            tz_result = urlfetch.fetch(tz_url)
        except Exception, e:
            logging.warning('urlfetch for timezone request failed: {}'.format(tz_url))
            logging.info(e)
            return None
Example #60
0
 def get(self):
     url = self.request.query_string
     logging.info("fetching: " + url)
     data = fetch(url)
     self.response.headers["content-type"] = data.headers["content-type"]
     self.response.out.write(data.content)