def __init__(self): from google.appengine.api.capabilities import CapabilitySet import sys memcache_service = CapabilitySet('memcache', methods=['add''set','get']) datastore_service = CapabilitySet('datastore_v3', methods=['write']) self.memcache_ok = memcache_service.is_enabled() self.datastore_ok = datastore_service.is_enabled()
def wrapped(request, *args, **kwargs): datastore_write = CapabilitySet("datastore_v3", capabilities=["write"]) datastore_writable = datastore_write.will_remain_enabled_for(60) if not datastore_writable: logging.warn("Datastore is not writable. %s" % datastore_write.admin_message()) if not request.is_xhr: return redirect(url_for(endpoint)) return view(request, *args, **kwargs)
def wrapped(request, *args, **kwargs): datastore_write = CapabilitySet('datastore_v3', capabilities=['write']) datastore_writable = datastore_write.will_remain_enabled_for(60) if not datastore_writable: logging.warn('Datastore is not writable. %s' % datastore_write.admin_message()) if not request.is_xhr: return redirect(url_for(endpoint)) return view(request, *args, **kwargs)
def incr_count(entity_key, counter_name, txn_def, incr_amt=1, interval=_DEF_UPDATE_INTERVAL, entity_val=0): ''' Increment a counter. Updates a backing entity with contents of memcached. Can handle increments only right now. Returns nothing. Raises exception on error.''' # Don't worry about decrements right now. if incr_amt < 0: return 0 # Generate memcached keys. lock_key = _get_key(_MACRO_LOCK, counter_name, entity_key) count_key = _get_key(_MACRO_COUNT, counter_name, entity_key) incr_key = _get_key(_MACRO_INCR, counter_name, entity_key) if _DEBUG: logging.debug("keys: %s %s %s" % (lock_key, count_key, incr_key)) # Check to see if memecached is up. look_ahead_time = 10 + interval memcache_ops = CapabilitySet('memcache', methods=['add']) memcache_down = not memcache_ops.will_remain_enabled_for(look_ahead_time) # If memcache is down or interval seconds has passed, update # the datastore. if memcache_down or memcache.add(lock_key, None, time=interval): # Update the datastore incr = int(memcache.get(incr_key) or 0) + incr_amt if _DEBUG: logging.debug("incr(%s): updating datastore with %d", counter_name, incr) memcache.set(incr_key, 0) try: stored_count = db.run_in_transaction(txn_def, entity_key, incr, counter_name) except: memcache.set(incr_key, incr) logging.error('Counter(%s): unable to update datastore counter.', counter_name) raise memcache.set(count_key, stored_count) return stored_count # Majority of the time, this branch is taken. else: incr = memcache.get(incr_key) if incr is None: # incr_key in memcache should be set. If not, two possibilities: # 1) memcache has failed between last datastore update. # 2) this branch has executed before memcache set in update branch (unlikely) stored_count = db.run_in_transaction(txn_def, entity_key, incr_amt, counter_name) memcache.set(count_key, stored_count) memcache.set(incr_key, 0) logging.error('Counter(%s): possible memcache failure in update interval.', counter_name) return stored_count # Memcache increment. else: memcache.incr(incr_key, delta=incr_amt) if _DEBUG: logging.debug("incr(%s): incrementing memcache with %d", counter_name, incr_amt) return get_count(entity_key, counter_name, entity_val)
def is_appengine_maintenance(): datastore_readonly = memcache.get("appengine_datastore_readonly") if datastore_readonly is None: datastore_write = CapabilitySet('datastore_v3', capabilities=['write']) datastore_readonly = not datastore_write.is_enabled() memcache.set( "appengine_datastore_readonly", datastore_readonly, 10*60, ) return datastore_readonly
def wrapped(request, *args, **kwargs): datastore_write = CapabilitySet('datastore_v3', capabilities=['write']) datastore_writable = datastore_write.will_remain_enabled_for(60) if not datastore_writable: logging.warn('Datastore is not writable. %s' % datastore_write.admin_message()) if not request.is_xhr: # Saving session will also fail. if hasattr(request, 'session'): del(request.session) return redirect(url_for(endpoint)) return view(request, *args, **kwargs)
def decr(self, value=-1): #if value > 0: # raise ValueError('CachedCounter cannot handle negative numbers.') def update_count(name, decr, error_possible=False): entity = Counter.get_by_key_name(name) if entity: entity.count += decr logging.debug("decr(%s): update_count on retrieved entity by %d to %d", name, decr, entity.count) else: entity = Counter(key_name=name, count=decr) logging.debug("decr(%s): update_count on new entity set to %d", name, decr) if error_possible: entity.error_possible = True entity.put() return entity.count look_ahead_time = 10 + self._update_interval memcache_ops = CapabilitySet('memcache', methods=['add']) memcache_down = not memcache_ops.will_remain_enabled_for(look_ahead_time) if memcache_down or memcache.add(self._lock_key, None, time=self._update_interval): # Update the datastore decr = int(memcache.get(self._decr_key) or 0) + value logging.debug("decr(%s): updating datastore with %d", self._name, decr) memcache.set(self._decr_key, 0) try: stored_count = db.run_in_transaction(update_count, self._name, decr) except: memcache.set(self._decr_key, decr) logging.error('Counter(%s): unable to update datastore counter.', self._name) raise memcache.set(self._count_key, stored_count) return stored_count else: decr = memcache.get(self._decr_key) if decr is None: # _decr_key in memcache should be set. If not, two possibilities: # 1) memcache has failed between last datastore update. # 2) this branch has executed before memcache set in update branch (unlikely) stored_count = db.run_in_transaction(update_count, self._name, value, error_possible=True) memcache.set(self._count_key, stored_count) memcache.set(self._decr_key, 0) logging.error('Counter(%s): possible memcache failure in update interval.', self._name) return stored_count else: memcache.decr(self._decr_key, delta=value) logging.debug("decr(%s): decrementing memcache with %d", self._name, value) return self.count
def get(self): now = datetime.datetime.now() now_time_t = int(time.mktime(now.timetuple())) is_enabled = CapabilitySet('memcache').is_enabled() memcache_stats = memcache.get_stats() if self.request.get('output') in ('text', 'txt'): self.response.out.write(now_time_t) self.response.out.write(' up' if is_enabled else ' down') self.response.out.write(' h:%(hits)s' ' m:%(misses)s' ' bh:%(byte_hits)s' ' i:%(items)s' ' b:%(bytes)s' ' oia:%(oldest_item_age)s' '\n' % memcache_stats) self.response.headers['Content-Type'] = "text/text" else: template_values = { 'now': now.ctime(), 'now_time_t': now_time_t, 'is_enabled': is_enabled, 'memcache_stats': memcache_stats, } self.render_jinja2_template("memcache_stats.html", template_values)
def run(self, checks=DEFAULT_CAPABILITY_CHECKS): """ This method is used to run the specified set of tests """ # reset the availability list self.availability = [] # iterate over the list of checks and execute them for check_instance in checks: # ensure that we have a title if (not KEY_TITLE in check_instance) or (not KEY_PACKAGE in check_instance): logging.warning("Invalid check defined: %s", check_instance) continue logging.debug("Running the %s check", check_instance[KEY_TITLE]) # initialise determine the capabilities we are looking for caps = [] if KEY_CAPABILITIES in check_instance: caps = check_instance[KEY_CAPABILITIES] # create the capability set instance capset = CapabilitySet(check_instance[KEY_PACKAGE], caps, ['*']) # create the service availability record service_avail = { 'title': check_instance[KEY_TITLE], # determine whether the service is available now 'avail_now': capset.is_enabled(), # determine whether the service will still be available in one hour 'avail_hour': capset.will_remain_enabled_for(3600), # update the availability array 'avail_day': capset.will_remain_enabled_for(86400), } # log the results logging.debug("Completed availability check, results below\n%s", service_avail) # add the availability to the service list self.availability += service_avail
def run(self, checks = DEFAULT_CAPABILITY_CHECKS): """ This method is used to run the specified set of tests """ # reset the availability list self.availability = [] # iterate over the list of checks and execute them for check_instance in checks: # ensure that we have a title if (not KEY_TITLE in check_instance) or (not KEY_PACKAGE in check_instance): logging.warning("Invalid check defined: %s", check_instance) continue logging.debug("Running the %s check", check_instance[KEY_TITLE]) # initialise determine the capabilities we are looking for caps = [] if KEY_CAPABILITIES in check_instance: caps = check_instance[KEY_CAPABILITIES] # create the capability set instance capset = CapabilitySet(check_instance[KEY_PACKAGE], caps, ['*']) # create the service availability record service_avail = { 'title': check_instance[KEY_TITLE], # determine whether the service is available now 'avail_now': capset.is_enabled(), # determine whether the service will still be available in one hour 'avail_hour': capset.will_remain_enabled_for(3600), # update the availability array 'avail_day': capset.will_remain_enabled_for(86400), } # log the results logging.debug("Completed availability check, results below\n%s", service_avail) # add the availability to the service list self.availability += service_avail
def handle_result(rpc): try: result = rpc.get_result() except urlfetch.Error: return else: jid = self.jids[id(rpc)] emails_map = parse(result.content) emails = list() for email in emails_map: if not Mail.get_by_key_name(email['id']): if Mail(key_name=email['id']).put(): str = 'From: %(author)s\nTitle: %(title)s\nSummary: %(summary)s\nTime: %(time)s\n%(url)s' % email emails.insert(0, str) if emails: while CapabilitySet('xmpp').is_enabled(): try: xmpp.send_message(jid, '\n\n'.join(emails)) except xmpp.Error: pass else: break
def get_or_post(self, method='POST'): """ Handles the GET/POST request. FIXME: this is getting a touch long """ # ensure that we have our services for the next 30s (length of a single request) unavailable = set() for service in REQUIRED_SERVICES: if not CapabilitySet(service).is_enabled(): unavailable.add(service) if unavailable: raise RequiredServicesUnavailableRuntimeError(unavailable) # the case of headers is inconsistent on dev_appserver and appengine # ie 'X-AppEngine-TaskRetryCount' vs. 'X-AppEngine-Taskretrycount' lowerCaseHeaders = dict([ (key.lower(), value) for key, value in self.request.headers.items() ]) taskName = lowerCaseHeaders.get('x-appengine-taskname') retryCount = int(lowerCaseHeaders.get('x-appengine-taskretrycount', 0)) # Taskqueue can invoke multiple tasks of the same name occassionally. Here, we'll use # a datastore transaction as a semaphore to determine if we should actually execute this or not. if taskName: semaphoreKey = '%s--%s' % (taskName, retryCount) semaphore = RunOnceSemaphore(semaphoreKey, None) if not semaphore.writeRunOnceSemaphore(payload='fantasm')[0]: # we can simply return here, this is a duplicate fired task logging.info( 'A duplicate task "%s" has been queued by taskqueue infrastructure. Ignoring.', taskName) self.response.status_code = 200 return # pull out X-Fantasm-* headers headers = None for key, value in self.request.headers.items(): if key.startswith(HTTP_REQUEST_HEADER_PREFIX): headers = headers or {} if ',' in value: headers[key] = [v.strip() for v in value.split(',')] else: headers[key] = value.strip() requestData = { 'POST': self.request.POST, 'GET': self.request.GET }[method] method = requestData.get('method') or method machineName = getMachineNameFromRequest(self.request) # get the incoming instance name, if any instanceName = requestData.get(INSTANCE_NAME_PARAM) # get the incoming state, if any fsmState = requestData.get(STATE_PARAM) # get the incoming event, if any fsmEvent = requestData.get(EVENT_PARAM) assert (fsmState and instanceName ) or True # if we have a state, we should have an instanceName assert (fsmState and fsmEvent ) or True # if we have a state, we should have an event obj = TemporaryStateObject() # make a copy, add the data fsm = getCurrentFSM().createFSMInstance(machineName, currentStateName=fsmState, instanceName=instanceName, method=method, obj=obj, headers=headers) # in "immediate mode" we try to execute as much as possible in the current request # for the time being, this does not include things like fork/spawn/contuniuations/fan-in immediateMode = IMMEDIATE_MODE_PARAM in requestData.keys() if immediateMode: obj[IMMEDIATE_MODE_PARAM] = immediateMode obj[MESSAGES_PARAM] = [] fsm.Queue = NoOpQueue # don't queue anything else # pylint: disable-msg=W0201 # - initialized outside of ctor is ok in this case self.fsm = fsm # used for logging in handle_exception # pull all the data off the url and stuff into the context for key, value in requestData.items(): if key in NON_CONTEXT_PARAMS: continue # these are special, don't put them in the data # deal with ...a=1&a=2&a=3... value = requestData.get(key) valueList = requestData.getall(key) if len(valueList) > 1: value = valueList if key.endswith('[]'): key = key[:-2] value = [value] if key in fsm.contextTypes.keys(): fsm.putTypedValue(key, value) else: fsm[key] = value if not (fsmState or fsmEvent): # just queue up a task to run the initial state transition using retries fsm[STARTED_AT_PARAM] = time.time() # initialize the fsm, which returns the 'pseudo-init' event fsmEvent = fsm.initialize() else: # add the retry counter into the machine context from the header obj[RETRY_COUNT_PARAM] = retryCount # add the actual task name to the context obj[TASK_NAME_PARAM] = taskName # dispatch and return the next event fsmEvent = fsm.dispatch(fsmEvent, obj) # loop and execute until there are no more events - any exceptions # will make it out to the user in the response - useful for debugging if immediateMode: while fsmEvent: fsmEvent = fsm.dispatch(fsmEvent, obj) self.response.headers['Content-Type'] = 'application/json' data = { 'obj': obj, 'context': fsm, } self.response.out.write(json.dumps(data, cls=Encoder))
class cron_handler(webapp.RequestHandler): def get(self, cron_id): cron_id = int(cron_id) data = Session.get_all(shard=cron_id) for u in data: jid = u.key().name() try: self.process(u) except CapabilityDisabledError: try: xmpp.send_presence(jid, presence_show=xmpp.PRESENCE_SHOW_AWAY) except xmpp.Error: pass else: try: xmpp.send_presence(jid) except xmpp.Error: pass def process(self, u): jid = u.key().name() try: flag = xmpp.get_presence(jid) except (xmpp.Error, DeadlineExceededError): flag = True if not flag: u.delete() return google_user = GoogleUser.get_by_jid(jid) if google_user is None: u.delete() return time_delta = int(time()) - google_user.last_update if time_delta < google_user.interval * 60 - 30: return _ = lambda x: gettext(x, locale=google_user.locale) twitter_user = TwitterUser.get_by_twitter_name( google_user.enabled_user, google_user.jid) if twitter_user is None: google_user.enabled_user = '' Db.set_datastore(google_user) return api = twitter.Api(consumer_key=config.OAUTH_CONSUMER_KEY, consumer_secret=config.OAUTH_CONSUMER_SECRET, access_token_key=twitter_user.access_token_key, access_token_secret=twitter_user.access_token_secret) try: self._user = api.verify_credentials() if not self._user or 'screen_name' not in self._user: raise twitter.TwitterError except twitter.TwitterError: google_user.retry += 1 if google_user.retry >= config.MAX_RETRY: GoogleUser.disable(jid=google_user.jid) xmpp.send_message(google_user.jid, _('NO_AUTHENTICATION')) else: Db.set_cache(google_user) return finally: if google_user.retry > 0: google_user.retry = 0 Db.set_cache(google_user) if twitter_user.twitter_name != self._user['screen_name']: twitter_user.twitter_name = self._user['screen_name'] Db.set_cache(twitter_user) google_user.enabled_user = self._user['screen_name'] Db.set_cache(google_user) utils.set_jid(google_user.jid) home_statuses = [] home_mention_statuses = [] all_statuses = [] at_username = '******' + google_user.enabled_user if google_user.display_timeline & MODE_HOME or google_user.display_timeline & MODE_MENTION: home_rpc = api.get_home_timeline(since_id=google_user.last_msg_id, async=True) else: home_rpc = None if google_user.display_timeline & MODE_LIST: list_rpc = api.get_list_statuses(user=google_user.list_user, id=google_user.list_id, since_id=google_user.last_list_id, async=True) else: list_rpc = None if google_user.display_timeline & MODE_MENTION: mention_rpc = api.get_mentions( since_id=google_user.last_mention_id, async=True) else: mention_rpc = None if google_user.display_timeline & MODE_DM: dm_rpc = api.get_direct_messages(since_id=google_user.last_dm_id, async=True) else: dm_rpc = None if google_user.display_timeline & MODE_HOME: try: home_statuses = api._process_result(home_rpc) if home_statuses: all_statuses.extend(home_statuses) if home_statuses[0]['id'] > google_user.last_msg_id: google_user.last_msg_id = home_statuses[0]['id'] except twitter.TwitterInternalServerError: pass except BaseException: err = StringIO('') traceback.print_exc(file=err) logging.error(google_user.jid + ' Home:\n' + err.getvalue()) if google_user.display_timeline & MODE_MENTION: try: statuses = api._process_result(mention_rpc) if statuses: all_statuses.extend(statuses) if statuses[0]['id'] > google_user.last_mention_id: google_user.last_mention_id = statuses[0]['id'] if not google_user.display_timeline & MODE_HOME: try: home_statuses = api._process_result(home_rpc) except twitter.TwitterInternalServerError: pass except BaseException: err = StringIO('') traceback.print_exc(file=err) logging.error(google_user.jid + ' Home:\n' + err.getvalue()) else: if home_statuses: if home_statuses[0]['id'] > google_user.last_msg_id: google_user.last_msg_id = home_statuses[0][ 'id'] home_mention_statuses = [ x for x in home_statuses if at_username in x['text'] and x['id'] > google_user.last_mention_id ] if home_mention_statuses: all_statuses.extend(home_mention_statuses) except twitter.TwitterInternalServerError: pass except BaseException: err = StringIO('') traceback.print_exc(file=err) logging.error(google_user.jid + ' Mention:\n' + err.getvalue()) if google_user.display_timeline & MODE_LIST: try: statuses = api._process_result(list_rpc) if statuses: if statuses[0]['id'] > google_user.last_list_id: google_user.last_list_id = statuses[0]['id'] for i in range(len(statuses) - 1, -1, -1): if at_username in statuses[i]['text'] and statuses[i][ 'id'] <= google_user.last_mention_id: del statuses[i] all_statuses.extend(statuses) except twitter.TwitterInternalServerError: pass except BaseException, e: if 'Not found' not in e.message: err = StringIO('') traceback.print_exc(file=err) logging.error(google_user.jid + ' List:\n' + err.getvalue()) if all_statuses: all_statuses.sort(cmp=lambda x, y: cmp(x['id'], y['id'])) last = all_statuses[-1]['id'] for i in range(len(all_statuses) - 2, -1, -1): if last == all_statuses[i]['id']: del all_statuses[i] else: last = all_statuses[i]['id'] content = utils.parse_statuses(all_statuses, filter_self=True, reverse=False) if content.strip(): IdList.flush(google_user.jid) while CapabilitySet('xmpp').is_enabled(): try: xmpp.send_message(google_user.jid, content) except xmpp.Error: pass else: break if google_user.display_timeline & MODE_DM: try: statuses = api._process_result(dm_rpc) content = utils.parse_statuses(statuses) if content.strip(): while CapabilitySet('xmpp').is_enabled(): try: xmpp.send_message( google_user.jid, _('DIRECT_MESSAGES') + '\n\n' + content) except xmpp.Error: pass else: break if statuses[-1]['id'] > google_user.last_dm_id: google_user.last_dm_id = statuses[-1]['id'] except twitter.TwitterInternalServerError: pass except BaseException: err = StringIO('') traceback.print_exc(file=err) logging.error(google_user.jid + ' DM:\n' + err.getvalue()) google_user.last_update = int(time()) Db.set_datastore(google_user)
def process(self): global _locale try: message = xmpp.Message(self.request.POST) except xmpp.InvalidMessageError: return jid = message.sender.split('/')[0] self._google_user = GoogleUser.get_by_jid(jid) if self._google_user is None: self._google_user = GoogleUser.add(jid) _locale = self._google_user.locale if self._google_user.enabled_user: self._twitter_user = TwitterUser.get_by_twitter_name( self._google_user.enabled_user, self._google_user.jid) self._api = Dummy() if self._twitter_user is None: self._google_user.enabled_user = '' else: self._api = twitter.Api( consumer_key=config.OAUTH_CONSUMER_KEY, consumer_secret=config.OAUTH_CONSUMER_SECRET, access_token_key=self._twitter_user.access_token_key, access_token_secret=self._twitter_user.access_token_secret) try: self._user = self._api.verify_credentials() if not self._user: raise twitter.TwitterAuthenticationError except twitter.TwitterAuthenticationError: self._google_user.retry += 1 if self._google_user.retry >= config.MAX_RETRY: GoogleUser.disable(self._google_user.jid) xmpp.send_message(self._google_user.jid, _('NO_AUTHENTICATION')) else: Db.set_datastore(self._google_user) return else: if self._google_user.retry > 0: self._google_user.retry = 0 if self._twitter_user.twitter_name != self._user[ 'screen_name']: self._twitter_user.twitter_name = self._user[ 'screen_name'] self._google_user.enabled_user = self._user[ 'screen_name'] else: self._twitter_user = Dummy() self._api = Dummy() self._user = Dummy() utils.set_jid(self._google_user.jid) result = self.parse_command(message.body) if result is None: return if result: while CapabilitySet('xmpp').is_enabled(): try: message.reply(result) except xmpp.Error: pass else: break IdList.flush(self._google_user.jid) Db.set_datastore(self._google_user)
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import random from wsgiref.handlers import CGIHandler from google.appengine.api.labs import taskqueue from google.appengine.api import memcache from google.appengine.api.capabilities import CapabilitySet memcache_service = CapabilitySet('memcache', methods=['set','get']) hot_handler_queue = taskqueue.Queue(name='hothandler') HOT_HANDLER_PREFIX = '/_ah/queue/hothandler/' def wsgi_app(env, res): """ visit '/_ah/queue/hothandler/start' as admin to start a task """ token = env['PATH_INFO'].replace(HOT_HANDLER_PREFIX,'') cur_token = memcache.get(HOT_HANDLER_PREFIX) if cur_token is None: if not memcache_service.is_enabled(): cur_token = token if token in [cur_token, 'start']: next_token = str(random.random()) url = '%s%s'%(HOT_HANDLER_PREFIX, next_token) next_task = taskqueue.Task(countdown=10, url=url) hot_handler_queue.add(next_task) memcache.set(HOT_HANDLER_PREFIX, next_token)
def run_app( api=None, dict=dict, sys=sys, API_HANDLERS=API_HANDLERS, DEVNULL=DEVNULL, ERROR=ERROR, ERROR_HEADER=ERROR_HEADER, NOTFOUND=NOTFOUND, ): """The core application runner.""" env = dict(os.environ) kwargs = {} sys._boot_stdout = sys.stdout sys.stdout = DEVNULL write = sys._boot_stdout.write try: http_method = env['REQUEST_METHOD'] content_type = env.get('CONTENT-TYPE', '') args = [arg for arg in env['PATH_INFO'].split('/') if arg] if args: api = args[0] # return a NotFoundError if it doesn't look like a valid api call if (http_method != 'POST') or (api not in API_HANDLERS): write(ERROR_HEADER) write(NOTFOUND) return # force the request to be over SSL when on a production deployment if RUNNING_ON_GOOGLE_SERVERS and env.get('HTTPS') not in SSL_FLAGS: write(ERROR_HEADER) write(NOTAUTHORISED) return # we assume that the request is utf-8 encoded, but that the request # kwarg "keys" are in ascii and the kwarg values to be in utf-8 if ';' in content_type: content_type = content_type.split(';', 1)[0] # parse the POST body if it exists and is of a known content type if content_type in VALID_REQUEST_CONTENT_TYPES: post_environ = env.copy() post_environ['QUERY_STRING'] = '' post_data = FieldStorage( environ=post_environ, fp=env['wsgi.input'] ).list if post_data: for field in post_data: key = field.name if field.filename: continue if key not in API_REQUEST_KEYS: continue value = unicode(field.value, UTF8, 'strict') kwargs[key] = value # check that there's a token and it validates if 0: # @/@ signature = kwargs.pop('sig', None) if not signature: write(ERROR_HEADER) write(NOTAUTHORISED) return if not validate_tamper_proof_string( 'token', token, key=API_KEY, timestamped=True ): logging.info("Unauthorised API Access Attempt: %r", token) write(UNAUTH) return handler, store_needed = api_definition # check if the datastore and memcache services are available if store_needed: disabled = None if not CapabilitySet('datastore_v3', capabilities=['write']).is_enabled(): disabled = 'datastore' elif not CapabilitySet('memcache', methods=['set']).is_enabled(): disabled = 'memcache' if disabled: write(ERROR_HEADER) write(DISABLED % disabled) return try: # try and respond with the result of calling the api handler args = tuple(args) result = handler(*args, **kwargs) if result: write(OK_HEADER) write(json_encode(result)) else: write(ERROR) except Exception, error: # log the error and return it as json logging.error(''.join(format_exception(*sys.exc_info()))) write(ERROR_HEADER) write(json_encode({ "error": error.__class__.__name__, "error_msg": str(error) })) except: # this shouldn't ever happen, but just in case... logging.critical(''.join(format_exception(*sys.exc_info()))) write(ERROR_HEADER) write(json_encode({ "error": error.__class__.__name__, "error_msg": str(error) })) finally: sys.stdout = sys._boot_stdout