def delete_course(cls): """Called back repeatedly from deferred queue dispatcher.""" try: kind_name = cls.get_any_undeleted_kind_name() if not kind_name: # No entity types remain to be deleted from the Datastore for # this course (i.e. namespace), so call (in no particular # order) callbacks waiting to be informed of course deletion. ns_name = namespace_manager.get_namespace() common_utils.run_hooks( cls.COURSE_DELETED_HOOKS.itervalues(), ns_name) logging.info( 'CourseDeleteHandler found no entity types to delete for ' 'namespace %s; deletion complete.', ns_name) return model = Model(kind_name) keys = list(db.Query(Model(kind_name), keys_only=True).run( batch_size=cls.DELETE_BATCH_SIZE)) entities.delete(keys) logging.info( 'CourseDeleteHandler deleted %d entities of type %s from ' 'namespace %s', len(keys), kind_name, namespace_manager.get_namespace()) deferred.defer(cls.delete_course) except Exception: logging.critical( 'Failed when attempting to delete course for namespace %s', namespace_manager.get_namespace()) common_utils.log_exception_origin() raise
def claim_proxy(): """Claim the given proxy id for the current user.""" body = flask.request.get_json() if body is None: flask.abort(400, 'JSON body and mime type required.') proxy_id = body.get('proxy_id', None) if proxy_id is None or proxy_id == '': flask.abort(400, 'proxy_id required.') # this will run as a user, so we don't need to authenticate # it (already done in main). Running in users namespace. assert namespace_manager.get_namespace() != '' building_id = namespace_manager.get_namespace() # We need to reset the namespace to access the proxies namespace_manager.set_namespace(None) proxy = Proxy.get_by_id(proxy_id) if proxy == None: logging.info('Proxy \'%s\' not found', proxy_id) flask.abort(404) if proxy.building_id is not None: flask.abort(400, 'Proxy already claimed') proxy.building_id = building_id proxy.put() return ('', 201)
def push_batch(): """Push all the events that have been caused by this request.""" batch = flask.g.get('events', None) setattr(flask.g, 'events', None) if batch is None: return logging.info('Sending %d events to proxy', len(batch)) pusher_shim = pusher_client.get_client() # Now figure out what channel to post these to. # Building id should always be in the namespace # for any request which triggers events. # So we use the namespace. Horrid. assert namespace_manager.get_namespace() != '' building_id = namespace_manager.get_namespace() try: namespace_manager.set_namespace(None) proxies = Proxy.query(Proxy.building_id == building_id).iter() for proxy in proxies: channel_id = 'private-%s' % proxy.key.string_id() logging.info('Pushing %d events to channel %s', len(batch), channel_id) pusher_shim.push(channel_id, batch) finally: namespace_manager.set_namespace(building_id)
def run(self): """Index the course.""" namespace = namespace_manager.get_namespace() logging.info('Running indexing job for namespace %s. Incremental: %s', namespace_manager.get_namespace(), self.incremental) app_context = sites.get_app_context_for_namespace(namespace) # Make a request URL to make sites.get_course_for_current_request work sites.set_path_info(app_context.slug) indexing_stats = { 'deleted_docs': 0, 'num_indexed_docs': 0, 'doc_types': collections.Counter(), 'indexing_time_secs': 0, 'locales': [] } for locale in app_context.get_allowed_locales(): stats = clear_index(namespace, locale) indexing_stats['deleted_docs'] += stats['deleted_docs'] for locale in app_context.get_allowed_locales(): app_context.set_current_locale(locale) course = courses.Course(None, app_context=app_context) stats = index_all_docs(course, self.incremental) indexing_stats['num_indexed_docs'] += stats['num_indexed_docs'] indexing_stats['doc_types'] += stats['doc_types'] indexing_stats['indexing_time_secs'] += stats['indexing_time_secs'] indexing_stats['locales'].append(locale) return indexing_stats
def delete_course(cls): """Called back repeatedly from deferred queue dispatcher.""" try: kind = metadata.Kind.all().get() if not kind: logging.info( 'CourseDeleteHandler found no entity types to delete for ' 'namespace %s; deletion complete.', namespace_manager.get_namespace()) return kind_name = kind.kind_name model = Model(kind_name) keys = list(db.Query(Model(kind_name), keys_only=True).run( batch_size=cls.DELETE_BATCH_SIZE)) entities.delete(keys) logging.info( 'CourseDeleteHandler deleted %d entities of type %s from ' 'namespace %s', len(keys), kind_name, namespace_manager.get_namespace()) deferred.defer(cls.delete_course) except Exception: logging.critical( 'Failed when attempting to delete course for namespace %s', namespace_manager.get_namespace()) common_utils.log_exception_origin() raise
def delete_course(cls): """Called back repeatedly from deferred queue dispatcher.""" try: kind_name = cls.get_any_undeleted_kind_name() if not kind_name: # No entity types remain to be deleted from the Datastore for # this course (i.e. namespace), so call (in no particular # order) callbacks waiting to be informed of course deletion. ns_name = namespace_manager.get_namespace() common_utils.run_hooks(cls.COURSE_DELETED_HOOKS.itervalues(), ns_name) logging.info( 'CourseDeleteHandler found no entity types to delete for ' 'namespace %s; deletion complete.', ns_name) return model = Model(kind_name) keys = list( db.Query(Model(kind_name), keys_only=True).run(batch_size=cls.DELETE_BATCH_SIZE)) entities.delete(keys) logging.info( 'CourseDeleteHandler deleted %d entities of type %s from ' 'namespace %s', len(keys), kind_name, namespace_manager.get_namespace()) deferred.defer(cls.delete_course) except Exception: logging.critical( 'Failed when attempting to delete course for namespace %s', namespace_manager.get_namespace()) common_utils.log_exception_origin() raise
def run(self): """Index the course.""" namespace = namespace_manager.get_namespace() logging.info( "Running indexing job for namespace %s. Incremental: %s", namespace_manager.get_namespace(), self.incremental, ) app_context = sites.get_app_context_for_namespace(namespace) # Make a request URL to make sites.get_course_for_current_request work sites.set_path_info(app_context.slug) indexing_stats = { "num_indexed_docs": 0, "doc_types": collections.Counter(), "indexing_time_secs": 0, "locales": [], } for locale in app_context.get_allowed_locales(): app_context.set_current_locale(locale) course = courses.Course(None, app_context=app_context) stats = index_all_docs(course, self.incremental) indexing_stats["num_indexed_docs"] += stats["num_indexed_docs"] indexing_stats["doc_types"] += stats["doc_types"] indexing_stats["indexing_time_secs"] += stats["indexing_time_secs"] indexing_stats["locales"].append(locale) return indexing_stats
def query_rec_counter(q, limit, index_name='dwc', sort=None, curs=search.Cursor()): """ Makes a search from curs. Returns count of records in search, new cursor """ if not curs: curs = search.Cursor() if q.startswith('id:'): did = q.split(':')[1].strip() namespace = namespace_manager.get_namespace() results = search.Index(name=index_name, namespace=namespace).get_range(start_id=did, limit=1) if results: recs = len(results.results) return recs, None, SEARCH_VERSION else: # logging.info('No results from search.Index() for namespace=%s index_name=%s \ # query=%s\nVersion: %s' % (namespace, index_name, q, SEARCH_VERSION)) return 0, None, SEARCH_VERSION # Always use 10,000 as the value for number_found_accuracy. Based on # extensive testing, using this maximum allowed value results in the best # count accuracy and incurs only a minor performance penalty. options = search.QueryOptions( limit=limit, # See Stucky research, Mar 2014. number_found_accuracy=10000, cursor=curs, ids_only=True) max_retries = 2 retry_count = 0 error = None while retry_count < max_retries: try: query = search.Query(query_string=q, options=options) namespace = namespace_manager.get_namespace() results = search.Index(name=index_name, namespace=namespace).search(query) # Try with an explicitly set deadline to overcome failed queries on # multiple "booleans" such as haslength, hasmass, hasmedia, isfossil, etc. # results = search.Index(name=index_name, namespace=namespace).search(query, deadline=50) if results: recs = len(results.results) return recs, results.cursor, SEARCH_VERSION else: logging.info('No results from query %s for namespace=%s \ index_name=%s\nVersion: %s' % (q, namespace, index_name, SEARCH_VERSION)) return 0, None, SEARCH_VERSION except Exception, e: logging.error( 'Search failed.\nQUERY:\n %s\nERROR:\n%s\nVersion: %s' % (q, e, SEARCH_VERSION)) error = e retry_count += 1
def run(self): """Index the course.""" namespace = namespace_manager.get_namespace() logging.info('Running indexing job for namespace %s. Incremental: %s', namespace_manager.get_namespace(), self.incremental) app_context = sites.get_app_context_for_namespace(namespace) course = courses.Course(None, app_context=app_context) return index_all_docs(course, self.incremental)
def testTouch(self): original_namespace = namespace_manager.get_namespace() files.Touch('/tmp/touch.txt', namespace='tmp') # Verify tmp file exists. self.assertTrue(files.Exists('/tmp/touch.txt', namespace='tmp')) # Verify non-tmp file does not exist. self.assertFalse(files.Exists('/tmp/touch.txt')) # Verify current namespace. self.assertEqual(original_namespace, namespace_manager.get_namespace())
def query_rec_counter(q, limit, index_name='dwc', sort=None, curs=search.Cursor()): """ Makes a search from curs. Returns count of records in search, new cursor """ if not curs: curs = search.Cursor() if q.startswith('id:'): did = q.split(':')[1].strip() namespace = namespace_manager.get_namespace() results = search.Index(name=index_name, namespace=namespace).get_range( start_id=did, limit=1) if results: recs = len(results.results) return recs, None, SEARCH_VERSION else: # logging.info('No results from search.Index() for namespace=%s index_name=%s \ # query=%s\nVersion: %s' % (namespace, index_name, q, SEARCH_VERSION)) return 0, None, SEARCH_VERSION # Always use 10,000 as the value for number_found_accuracy. Based on # extensive testing, using this maximum allowed value results in the best # count accuracy and incurs only a minor performance penalty. options = search.QueryOptions( limit=limit, # See Stucky research, Mar 2014. number_found_accuracy=10000, cursor=curs, ids_only=True) max_retries = 2 retry_count = 0 error = None while retry_count < max_retries: try: query = search.Query(query_string=q, options=options) namespace = namespace_manager.get_namespace() results = search.Index(name=index_name, namespace=namespace).search(query) # Try with an explicitly set deadline to overcome failed queries on # multiple "booleans" such as haslength, hasmass, hasmedia, isfossil, etc. # results = search.Index(name=index_name, namespace=namespace).search(query, deadline=50) if results: recs = len(results.results) return recs, results.cursor, SEARCH_VERSION else: logging.info('No results from query %s for namespace=%s \ index_name=%s\nVersion: %s' % (q, namespace, index_name, SEARCH_VERSION)) return 0, None, SEARCH_VERSION except Exception, e: logging.error('Search failed.\nQUERY:\n %s\nERROR:\n%s\nVersion: %s' % (q,e,SEARCH_VERSION) ) error = e retry_count += 1
def testListFiles(self): original_namespace = namespace_manager.get_namespace() files.Touch('/tmp/touch.txt', namespace='tmp') # Verify list dir for tmp and non-tmp namespaces. self.assertSequenceEqual([['tmp'], []], files.ListDir('/', namespace='tmp')) tmp_files = files.ListFiles('/tmp', namespace='tmp') self.assertEqual(1, len(tmp_files)) nontmp_files = files.ListFiles('/tmp') self.assertEqual(0, len(nontmp_files)) # Verify current namespace. self.assertEqual(original_namespace, namespace_manager.get_namespace())
def testListDir(self): original_namespace = namespace_manager.get_namespace() files.Touch('/tmp/touch.txt', namespace='tmp') # Verify list dir for tmp and non-tmp namespaces. self.assertSequenceEqual([['tmp'], []], files.ListDir('/', namespace='tmp')) dirs, file_ents = files.ListDir('/tmp', namespace='tmp') self.assertEqual(0, len(dirs)) self.assertEqual(1, len(file_ents)) self.assertEqual('/tmp/touch.txt', file_ents[0].path) self.assertSequenceEqual([[], []], files.ListDir('/')) # Verify current namespace. self.assertEqual(original_namespace, namespace_manager.get_namespace())
def fetch_icals_test(request): set_namespace(get_namespace()) logging.warning('namespace: %s'% get_namespace()) try: logging.warning("Starting New Ical fetch thing") q=ICalendarSource.all().filter('status = ', 'approved') logging.warning(str(q)) for cal in q: cal.fetch() logging.warning(str(cal)) except Exception,e: logging.error("AHHHHHHHHHHHHHH Ical ting failed - %s in \n%s"% (traceback.format_exc(),str(request.POST)))
def post(self, userURL): userNamespace = namespace_manager.get_namespace() # USERS ARE STORED IN THE DEFAULT NAMESPACE namespace_manager.set_namespace('') user = self.current_user # newName=self.request.get('userName') newWebsiteURL = self.request.get('userWebsite') newUserAreas = self.request.get('userAreas') newUserProfession = self.request.get('userProfession') newUserBios = self.request.get('userBio') newEmail = self.request.get('userEmail') newNotificationFrequency = self.request.get( 'userNotificationFrequency') try: user.update(self, newWebsiteURL, newUserAreas, newUserProfession, newUserBios, newEmail, newNotificationFrequency) except WhysaurusException as e: self.response.out.write( self.template_render('message.html', {'message': str(e)})) namespace_manager.set_namespace(userNamespace) self.response.out.write( self.template_render('profile.html', self.makeTemplateValues(user, user)))
def testNamespaces(self): """Tests namespace support.""" from google.appengine.api import namespace_manager namespace = namespace_manager.get_namespace() class Author(db.Model): name = db.StringProperty() class Book(db.Model): title = db.StringProperty() try: namespace_manager.set_namespace('testing') a = Author(name='Douglas Adams', key_name='douglasadams') a.put() b = Book(parent=a, title="Last Chance to See") b.put() query = Book.all().filter("title =", "Last Chance to See") self.assertEqual(query.get().title, b.title) finally: namespace_manager.set_namespace(namespace) query = Book.all().filter("title =", "Last Chance to See") self.assertEqual(query.get(), None)
def memcache_get(self, key, for_cas=False, namespace=None, use_cache=False): """An auto-batching wrapper for memcache.get() or .get_multi(). Args: key: Key to set. This must be a string; no prefix is applied. for_cas: If True, request and store CAS ids on the Context. namespace: Optional namespace. Returns: A Future (!) whose return value is the value retrieved from memcache, or None. """ if not isinstance(key, str): raise TypeError('key must be a string; received %r' % key) if not isinstance(for_cas, bool): raise ValueError('for_cas must be a bool; received %r' % for_cas) if namespace is None: namespace = namespace_manager.get_namespace() options = (for_cas, namespace) batcher = self._memcache_get_batcher if use_cache: return batcher.add_once(key, options) else: return batcher.add(key, options)
def get_overrides(cls, force_update=False): """Returns current property overrides, maybe cached.""" # Check if datastore property overrides are enabled at all. has_value, environ_value = UPDATE_INTERVAL_SEC.get_environ_value() if (has_value and environ_value == 0) or (UPDATE_INTERVAL_SEC.default_value == 0): return # Check if cached values are still fresh. now = long(time.time()) age = now - cls.last_update_time if force_update or age < 0 or age >= cls.update_interval: try: old_namespace = namespace_manager.get_namespace() try: namespace_manager.set_namespace( appengine_config.DEFAULT_NAMESPACE_NAME) cls.load_from_db() finally: namespace_manager.set_namespace(old_namespace) except Exception as e: # pylint: disable-msg=broad-except logging.error('Failed to load properties from a database: %s.', str(e)) finally: # Avoid overload and update timestamp even if we failed. cls.last_update_time = now cls.update_index += 1 return cls.db_overrides
def update_note_parents(entity): """Update note parents property""" namespace = namespace_manager.get_namespace() if namespace != 'newyear': namespace_manager.set_namespace('newyear') entity.parents = get_note_parents(entity) yield op.db.Put(entity)
def init_xsrf_secret_if_none(cls): """Verifies that non-default XSRF secret exists; creates one if not.""" # Any non-default value is fine. if XSRF_SECRET.value and XSRF_SECRET.value != XSRF_SECRET.default_value: return # All property manipulations must run in the default namespace. old_namespace = namespace_manager.get_namespace() try: namespace_manager.set_namespace( appengine_config.DEFAULT_NAMESPACE_NAME) # Look in the datastore directly. entity = ConfigPropertyEntity.get_by_key_name(XSRF_SECRET.name) if not entity: entity = ConfigPropertyEntity(key_name=XSRF_SECRET.name) # Any non-default non-None value is fine. if (entity.value and not entity.is_draft and (str(entity.value) != str(XSRF_SECRET.default_value))): return # Initialize to random value. entity.value = base64.urlsafe_b64encode( os.urandom(XSRF_SECRET_LENGTH)) entity.is_draft = False entity.put() finally: namespace_manager.set_namespace(old_namespace)
def set_multi(self, data, expire=_DEFAULT_TTL, namespace=None): from kay.ext.live_settings.models import KayLiveSetting old_namespace = namespace_manager.get_namespace() try: if namespace is not None: namespace_manager.set_namespace(namespace) data_items = data.items() db.put( map( lambda x: KayLiveSetting( key_name=x[0], ttl=expire, value=x[1]), data_items)) memcache.set_multi( dict( map(lambda x: ("kay:live:%s" % x[0], (x[1], expire)), data_items))) for key, value in data_items: self._set_local_cache(key, value, ttl=expire, namespace=namespace) finally: if namespace is not None: namespace_manager.set_namespace(old_namespace)
def post_start_creation(self, filename, options): """Start object creation with a POST. This implements the resumable upload XML API. Args: filename: gcs filename of form /bucket/filename. options: a dict containing all user specified request headers. e.g. {'content-type': 'foo', 'x-goog-meta-bar': 'bar'}. Returns: a token (blobkey) used for continuing upload. """ ns = namespace_manager.get_namespace() try: namespace_manager.set_namespace('') common.validate_file_path(filename) token = self._filename_to_blobkey(filename) gcs_file = _AE_GCSFileInfo_.get_by_key_name(token) self._cleanup_old_file(gcs_file) new_file = _AE_GCSFileInfo_(key_name=token, filename=filename, finalized=False) new_file.options = options new_file.put() return token finally: namespace_manager.set_namespace(ns)
def emit(self, record): """Log an error to the datastore, if applicable. Args: The logging.LogRecord object. See http://docs.python.org/library/logging.html#logging.LogRecord """ try: if not record.exc_info: return signature = self.__GetSignature(record.exc_info) old_namespace = namespace_manager.get_namespace() try: namespace_manager.set_namespace('') if not memcache.add(signature, None, self.log_interval): return db.run_in_transaction_custom_retries(1, self.__EmitTx, signature, record.exc_info) finally: namespace_manager.set_namespace(old_namespace) except Exception: self.handleError(record)
def setUp(self): super(ExtraTabsTests, self).setUp() self.base = '/' + COURSE_NAME app_context = actions.simple_add_course( COURSE_NAME, ADMIN_EMAIL, 'Extra Tabs Course') self.old_namespace = namespace_manager.get_namespace() namespace_manager.set_namespace('ns_%s' % COURSE_NAME) self.course = courses.Course(None, app_context) courses.Course.ENVIRON_TEST_OVERRIDES = { 'course': { 'extra_tabs': [ { 'label': 'FAQ', 'position': 'left', 'visibility': 'all', 'url': '', 'content': 'Frequently asked questions'}, { 'label': 'Resources', 'position': 'right', 'visibility': 'student', 'url': 'http://www.example.com', 'content': 'Links to resources'}] } } self.faq_url = 'modules/extra_tabs/render?index=0' actions.login(STUDENT_EMAIL, is_admin=False) actions.register(self, STUDENT_NAME)
def index(self): """Fires off an indexing MR job over files in GCS at supplied path.""" input_class = (input_readers.__name__ + "." + input_readers.FileInputReader.__name__) path = self.request.get('path') shard_count = self.request.get_range('shard_count', default=8) processing_rate = self.request.get_range('processing_rate', default=100) files_pattern = '/gs/%s' % path # Create file on GCS to log any failed index puts: namespace = namespace_manager.get_namespace() filename = '/gs/vn-staging/errors/failures-%s-all.csv' % namespace write_path = files.gs.create(filename, mime_type='text/tab-separated-values', acl='public-read') mrid = mrc.start_map( path, "vertnet.service.search.build_search_index", input_class, { "input_reader": dict( files=[files_pattern], format='lines'), "resource": path, "write_path": write_path, "processing_rate": processing_rate, "shard_count": shard_count }, mapreduce_parameters={'done_callback': '/mr/finalize'}, shard_count=shard_count) IndexJob(id=mrid, namespace=namespace, resource=path, write_path=write_path, failed_logs=['NONE']).put()
def set(self, key, value, expire=_DEFAULT_TTL, namespace=None): from kay.ext.live_settings.models import KayLiveSetting old_namespace = namespace_manager.get_namespace() try: if namespace is not None: namespace_manager.set_namespace(namespace) new_setting = KayLiveSetting( key_name=key, ttl=expire, value=value, ) new_setting.put() # Set the memcached key to never expire. It only expires # if it is evicted from memory. TTLs are handled by the # in-memory cache. memcache.set("kay:live:%s" % key, (value, expire)) self._set_local_cache(key, value, ttl=expire, namespace=namespace) finally: if namespace is not None: namespace_manager.set_namespace(old_namespace) return new_setting
def main(self): """Main method of the deferred task.""" logging.info('Job started: %s', self._job_name) time_started = time.time() old_namespace = namespace_manager.get_namespace() try: namespace_manager.set_namespace(self._namespace) try: db.run_in_transaction(DurableJobEntity._start_job, self._job_name) result = self.run() db.run_in_transaction(DurableJobEntity._complete_job, self._job_name, transforms.dumps(result), long(time.time() - time_started)) logging.info('Job completed: %s', self._job_name) except (Exception, runtime.DeadlineExceededError) as e: logging.error(traceback.format_exc()) logging.error('Job failed: %s\n%s', self._job_name, e) db.run_in_transaction(DurableJobEntity._fail_job, self._job_name, traceback.format_exc(), long(time.time() - time_started)) raise deferred.PermanentTaskFailure(e) finally: namespace_manager.set_namespace(old_namespace)
def getKeySecret(handler, update_auth=None): namespace = namespace_manager.get_namespace() try: namespace_manager.set_namespace(config.NAMESPACE) key = AuthKey.get_by_key_name("master") if update_auth: if not key or key.secret != update_auth: try: fetch = urlfetch.fetch( "%s/auth/?site=%s&auth=%s" % (config.SECURE_HOST, handler.request.headers["Host"], update_auth)) except: return if fetch.status_code == 200 and fetch.content == "OK": key = AuthKey(key_name="master") key.secret = update_auth try: key.put() except: pass if key: return key.secret finally: namespace_manager.set_namespace(namespace)
def get(self, userURL): userNamespace = namespace_manager.get_namespace() user = self.current_user # USERS ARE STORED IN THE DEFAULT NAMESPACE namespace_manager.set_namespace('') profileUser = WhysaurusUser.getByUrl(userURL) if profileUser: logging.info("got Profile User. Areas were: " + str(profileUser.privateAreas)) namespace_manager.set_namespace(userNamespace) if user: permissionToView = user.isAdmin or profileUser.privateAreas == [] or self.session.get( 'currentArea') in profileUser.privateAreas else: permissionToView = len(profileUser.privateAreas) == 0 if profileUser and permissionToView: self.response.out.write( self.template_render( 'profile.html', self.makeTemplateValues(user, profileUser))) else: self.response.out.write('Could not find user: ' + userURL)
def get_overrides(cls, force_update=False): """Returns current property overrides, maybe cached.""" now = long(time.time()) age = now - cls.last_update_time max_age = UPDATE_INTERVAL_SEC.get_value(db_overrides=cls.db_overrides) if force_update or age < 0 or age >= max_age: # Value of '0' disables all datastore overrides. if UPDATE_INTERVAL_SEC.get_value() == 0: cls.db_overrides = {} return cls.db_overrides # Load overrides from a datastore. try: old_namespace = namespace_manager.get_namespace() try: namespace_manager.set_namespace( appengine_config.DEFAULT_NAMESPACE_NAME) cls.load_from_db() finally: namespace_manager.set_namespace(old_namespace) except Exception as e: # pylint: disable-msg=broad-except logging.error( 'Failed to load properties from a database: %s.', str(e)) finally: # Avoid overload and update timestamp even if we failed. cls.last_update_time = now cls.update_index += 1 return cls.db_overrides
def MakeFollows(self): """ # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ADD FOLLOWS FOR ADMIN USERS # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ """ nextURL = None firstURL = self.request.get('nexturl') query = PointRoot.query().order(PointRoot.url) if firstURL: query = query.filter(PointRoot.url >= firstURL) pointRoots = query.fetch(11) if len(pointRoots) == 11: nextURL = pointRoots[-1].url pointRootsToReview = pointRoots[:10] else: pointRootsToReview = pointRoots i = 0 for pointRoot in pointRootsToReview: pointRootKey = pointRoot.key followers = {} versions = pointRoot.getAllVersions() for point in versions: if point.version == 1: followers[point.authorURL] = 'created' elif not point.authorURL in followers: followers[point.authorURL] = 'edited' for comment in pointRoot.getComments(): if not comment.userUrl in followers: followers[comment.userUrl] = 'commented' logging.info('ROOT: %s FOLLOWERS: %s' % (pointRoot.url, str(followers))) for url in followers.iterkeys(): followType = followers[url] previousNamespace = namespace_manager.get_namespace() if previousNamespace and previousNamespace != '': namespace_manager.set_namespace('') # DEFAULT NAMESPACE usr = WhysaurusUser.getByUrl(url) namespace_manager.set_namespace(previousNamespace) else: usr = WhysaurusUser.getByUrl(url) logging.info('Trying to follow for U:%s, R:%s, T:%s' % (url, pointRoot.url, followType)) f = None f = Follow.createFollow(usr.key, pointRootKey, followType) if f: i = i + 1 logging.info('ADDED follow for U:%s, R:%s, T:%s' % (url, pointRoot.url, followType)) logging.info('Added %d follows' % i) if nextURL: t = Task(url="/MakeFollows", params={'nexturl': nextURL}) t.add(queue_name="notifications") logging.info('Requeing MakeFollows task to start at url %s ' % nextURL)
def authenticate(): """Check this request comes from a valid proxy.""" assert namespace_manager.get_namespace() == '' header = flask.request.headers.get('awesomation-proxy', None) if header != 'true': logging.debug('Incorrent header for proxy auth - ' 'awesomation-proxy = \'%s\'', header) return None if flask.request.endpoint not in {'device.handle_events', 'pushrpc.pusher_client_auth_callback'}: logging.debug('Endpoint not allowed for proxy auth - ' '\'%s\'', flask.request.endpoint) return None auth = flask.request.authorization if not auth: logging.error('Proxy auth requires basic auth!') return None proxy = Proxy.get_or_insert( auth.username, secret=auth.password) # if we fetched the proxy, # need to check the secret. if proxy.secret != auth.password: logging.error('Incorrect secret for proxy auth!') return None return proxy
def resolveNamespace(self, override, enable_prefix=True): _EMPTY_NAMESPACE = '__NONE__' prefix = None namespace = None if enable_prefix and self.cacherConfig['prefix_namespace']: if self.cacherConfig['prefix_mode'] == 'explicit': prefix = self.cacherConfig['prefix'] elif self.cacherConfig['prefix_mode'] == 'inherit': prefix = namespace_manager.get_namespace() if override is None: if self.namespace is None: namespace = None else: namespace = self.namespace else: namespace = override if namespace is None and prefix is None: return _EMPTY_NAMESPACE elif namespace is None and prefix is not None: return prefix elif namespace is not None and prefix is None: return namespace else: return self.cacherConfig.get('namespace_seperator', '::').join([prefix, namespace])
def instance_namespace_context(): previous_namespace = namespace_manager.get_namespace() try: namespace_manager.set_namespace(INSTANCE_NAMESPACE) yield finally: namespace_manager.set_namespace(previous_namespace)
def wrapped_method(*args, **kwargs): original_namespace = namespace_manager.get_namespace() try: namespace_manager.set_namespace(DATASTORE_NAMESPACE) return original_method(*args, **kwargs) finally: namespace_manager.set_namespace(original_namespace)
def send_daily_birthday_messages(): """ It goes through all the namespaces making queries to the datastore for users who have the same birth day and birth month as today """ today = datetime.now() current_namespace = namespace_manager.get_namespace() for namespace in get_namespaces(): # Forget about the default empty namespace if namespace: logging.debug("Birthdays for today [%s]-[%s] namespace [%s]", today.month, today.day, namespace) namespace_manager.set_namespace(namespace) #Clear the cached list for the day memcache.delete('birthdays') #Go through every birthday celebrant for celebrant in get_birthdays(month=today.month, day=today.day): logging.info("Found a birthday for today! %s", celebrant.email) #Schedule sending the email deferred.defer(send_birthday_message, celebrant, _queue="mail-queue") #Restore to the original namespace namespace_manager.set_namespace(current_namespace) return 'Birthday messages were scheduled for today'
def test_config_visible_from_any_namespace(self): """Test that ConfigProperty is visible from any namespace.""" assert ( config.UPDATE_INTERVAL_SEC.value == config.UPDATE_INTERVAL_SEC.default_value) new_value = config.UPDATE_INTERVAL_SEC.default_value + 5 # Add datastore override for known property. prop = config.ConfigPropertyEntity( key_name=config.UPDATE_INTERVAL_SEC.name) prop.value = str(new_value) prop.is_draft = False prop.put() # Check visible from default namespace. config.Registry.last_update_time = 0 assert config.UPDATE_INTERVAL_SEC.value == new_value # Check visible from another namespace. old_namespace = namespace_manager.get_namespace() try: namespace_manager.set_namespace( 'ns-test_config_visible_from_any_namespace') config.Registry.last_update_time = 0 assert config.UPDATE_INTERVAL_SEC.value == new_value finally: namespace_manager.set_namespace(old_namespace)
def setUp(self): super(CertificateCriteriaTestCase, self).setUp() self.base = '/' + self.COURSE_NAME context = actions.simple_add_course(self.COURSE_NAME, self.ADMIN_EMAIL, 'Certificate Criteria') self.old_namespace = namespace_manager.get_namespace() namespace_manager.set_namespace('ns_%s' % self.COURSE_NAME) self.course = courses.Course(None, context) self.course.save() self.TEST_USER = actions.login('*****@*****.**') actions.register(self, self.TEST_USER.email()) self.student = ( models.StudentProfileDAO.get_enrolled_student_by_user_for( self.TEST_USER, context)) # Override course.yaml settings by patching app_context. self.get_environ_old = sites.ApplicationContext.get_environ self.certificate_criteria = [] def get_environ_new(app_context): environ = self.get_environ_old(app_context) environ['certificate_criteria'] = self.certificate_criteria return environ sites.ApplicationContext.get_environ = get_environ_new
def run(self): """Clear the index.""" namespace = namespace_manager.get_namespace() logging.info('Running clearing job for namespace %s.', namespace) app_context = sites.get_app_context_for_namespace(namespace) course = courses.Course(None, app_context=app_context) return clear_index(course)
def post(self): logging.info('self.request') logging.info(self.request) token = self.request.get('token') version = self.request.get('appver') if not version: version = 0 path = self.request.path pathArray = path.split("/") apiVersion = int(pathArray[2]) aID = pathArray[3] cTime = int(time.time()) auInfo={} aInfo={} param={} allresult={} gdNamespace = namespace_manager.get_namespace() appNamespace = 'APP_'+aID namespace_manager.set_namespace(gdNamespace) #find app try: aInfo = DB_App.get_by_id(aID) except Exception, e: self.printError("dont find app1",100,aInfo.secretKey) return
def put_copy(self, src, dst): """Copy file from src to dst. Metadata is copied. Args: src: /bucket/filename. This file must exist. dst: /bucket/filename """ common.validate_file_path(src) common.validate_file_path(dst) ns = namespace_manager.get_namespace() try: namespace_manager.set_namespace('') src_blobkey = self._filename_to_blobkey(src) source = _AE_GCSFileInfo_.get_by_key_name(src_blobkey) token = self._filename_to_blobkey(dst) new_file = _AE_GCSFileInfo_(key_name=token, filename=dst, finalized=True) new_file.options = source.options new_file.etag = source.etag new_file.size = source.size new_file.creation = datetime.datetime.utcnow() new_file.put() finally: namespace_manager.set_namespace(ns) local_file = self.blob_storage.OpenBlob(src_blobkey) self.blob_storage.StoreBlob(token, local_file)
def post_start_creation(self, filename, options): """Start object creation with a POST. This implements the resumable upload XML API. Only major limitation of current implementation is that we don't support multiple upload sessions for the same GCS file. Previous _AE_GCSFileInfo (which represents either a finalized file, or an upload session) will be removed when a new upload session is created. Args: filename: gcs filename of form /bucket/filename. options: a dict containing all user specified request headers. e.g. {'content-type': 'foo', 'x-goog-meta-bar': 'bar'}. Returns: a token (blobkey) used for continuing upload. """ ns = namespace_manager.get_namespace() try: namespace_manager.set_namespace('') common.validate_file_path(filename) token = self._filename_to_blobkey(filename) gcs_file = _AE_GCSFileInfo_.get_by_key_name(token) self._cleanup_old_file(gcs_file) new_file = _AE_GCSFileInfo_(key_name=token, filename=filename, finalized=False) new_file.options = options new_file.put() return token finally: namespace_manager.set_namespace(ns)
def setUp(self): super(NewsTestBase, self).setUp() self.base = '/' + self.COURSE_NAME self.app_context = actions.simple_add_course( self.COURSE_NAME, self.ADMIN_EMAIL, 'Title') self.maxDiff = None # Show full text expansion of expect mismatches. # Simplify equivalence checks by supplying a deep comparator, rather # than getting object instance equality comparison. def news_items_are_equal(thing_one, thing_two): return (thing_one.resource_key == thing_two.resource_key and thing_one.when == thing_two.when and thing_one.url == thing_two.url and thing_one.labels == thing_two.labels) news.NewsItem.__eq__ = news_items_are_equal news.NewsItem.__repr__ = lambda x: x.__dict__.__repr__() def seen_items_are_equal(thing_one, thing_two): return (thing_one.resource_key == thing_two.resource_key and abs((thing_one.when - thing_two.when).total_seconds()) < 2) news.SeenItem.__eq__ = seen_items_are_equal news.SeenItem.__repr__ = lambda x: x.__dict__.__repr__() news.I18nTitleRegistry.register( 'test', lambda x: 'Test Item ' + x.split(':', 1)[1]) self.old_namespace = namespace_manager.get_namespace() namespace_manager.set_namespace(self.NAMESPACE) # News is always enabled during own-module tests so that we do not # accumulate any regressions within ourselves. Other modules are # free to simply disable if they wish. self.save_is_enabled = news.is_enabled news.is_enabled = lambda: True if not news.custom_module.enabled: news.custom_module.enable()
def outputPLstats(keyname, namespace = ''): import processDesires originalNamespace = namespace_manager.get_namespace() namespace_manager.set_namespace(namespace) try: meta = meSchema.metaAlg.get_by_key_name(keyname) trades = eval(meta.CashDelta) if namespace == '': stockList = ['HBC','CME','GOOG','INTC'] else: stockList = [namespace] tradeDict = {} for stock in stockList: tradeDict[stock] = {'P':0.0,'L':0.0,'Ptrades':[],'Ltrades':[]} for trade in trades: stock = trade['Symbol'] if not (trade['PandL'] <= -10.0 and trade['PandL'] >= -14.0) and trade['PandL'] - 10.0 > 0.0: tradeDict[stock]['P'] += trade['PandL'] - 10.0 tradeDict[stock]['Ptrades'].append(trade['PandL'] - 10.0) elif not (trade['PandL'] <= -10.0 and trade['PandL'] >= -14.0) and trade['PandL'] - 10.0 < 0.0: tradeDict[stock]['L'] += trade['PandL'] - 10.0 tradeDict[stock]['Ltrades'].append(trade['PandL'] - 10.0) for stock in tradeDict: try: stdDevP, meanP = processDesires.getStandardDeviationMean(tradeDict[stock]['Ptrades']) stdDevL, meanL = processDesires.getStandardDeviationMean(tradeDict[stock]['Ltrades']) print stock, ':', tradeDict[stock]['P'], ':', max(tradeDict[stock]['Ptrades']), print ':', len(tradeDict[stock]['Ptrades']), ':', meanP, ' stdDev:', stdDevP print stock, ':', tradeDict[stock]['L'], ':', min(tradeDict[stock]['Ltrades']), print ':', len(tradeDict[stock]['Ltrades']), ':', meanL, ' stdDev:', stdDevL except: print 'Error Encountered!' finally: namespace_manager.set_namespace(originalNamespace)
def testNamespaces(self): """Tests namespace support.""" from google.appengine.api import namespace_manager namespace = namespace_manager.get_namespace() class Author(db.Model): name = db.StringProperty() class Book(db.Model): title = db.StringProperty() try: namespace_manager.set_namespace("testing") a = Author(name="Douglas Adams", key_name="douglasadams") a.put() b = Book(parent=a, title="Last Chance to See") b.put() query = Book.all().filter("title =", "Last Chance to See") self.assertEqual(query.get().title, b.title) finally: namespace_manager.set_namespace(namespace) query = Book.all().filter("title =", "Last Chance to See") self.assertEqual(query.get(), None)
def outputPerStockHistoryForChart(keyname): from math import floor,ceil originalNamespace = namespace_manager.get_namespace() try: namespace_manager.set_namespace('') meta = meSchema.metaAlg.get_by_key_name(keyname) finally: namespace_manager.set_namespace(originalNamespace) stopSteps = [] history = eval(meta.history) for hist in history: stopSteps.append(hist['step']) stopSteps.sort() trades = eval(meta.CashDelta) returnDict = {'HBC':{},'CME':{},'GOOG':{},'INTC':{}} for stopStep in stopSteps: for key in returnDict: returnDict[key][stopStep] = 0.0 for trade in trades: tradeIndex = (trade['step'] - stopSteps[0])/400.0 stopStep = stopSteps[int(ceil(tradeIndex))] returnDict[trade['Symbol']][stopStep] += trade['PandL'] for stock in returnDict: print stock,':', for step in stopSteps: print returnDict[stock][step], print
def setUp(self): super(CertificateCriteriaTestCase, self).setUp() self.base = '/' + self.COURSE_NAME context = actions.simple_add_course( self.COURSE_NAME, self.ADMIN_EMAIL, 'Certificate Criteria') self.old_namespace = namespace_manager.get_namespace() namespace_manager.set_namespace('ns_%s' % self.COURSE_NAME) self.course = courses.Course(None, context) self.course.save() self.TEST_USER = actions.login('*****@*****.**') actions.register(self, self.TEST_USER.email()) self.student = ( models.StudentProfileDAO.get_enrolled_student_by_user_for( self.TEST_USER, context)) # Override course.yaml settings by patching app_context. self.get_environ_old = sites.ApplicationContext.get_environ self.certificate_criteria = [] def get_environ_new(app_context): environ = self.get_environ_old(app_context) environ['certificate_criteria'] = self.certificate_criteria return environ sites.ApplicationContext.get_environ = get_environ_new
def __init__(self, key_start=None, key_end=None, direction=None, include_start=True, include_end=True, namespace=None, _app=None): """Initialize a KeyRange object. Args: key_start: The starting key for this range. key_end: The ending key for this range. direction: The direction of the query for this range. include_start: Whether the start key should be included in the range. include_end: Whether the end key should be included in the range. namespace: The namespace for this range. If None then the current namespace is used. """ if direction is None: direction = KeyRange.ASC assert direction in (KeyRange.ASC, KeyRange.DESC) self.direction = direction self.key_start = key_start self.key_end = key_end self.include_start = include_start self.include_end = include_end if namespace is not None: self.namespace = namespace else: self.namespace = namespace_manager.get_namespace() self._app = _app
def post(self): userNamespace = namespace_manager.get_namespace() namespace_manager.set_namespace('') results = {'result': False} user = self.current_user action=self.request.get('action') if (action == 'createPrivateArea'): # Anyone can create a private area newName = self.request.get('privateAreaName') newDisplayName = self.request.get('privateAreaDisplayName') results = self.createPrivateArea(newName, newDisplayName) elif (action == 'saveUsers'): if user.isAdmin: users = json.loads(self.request.get('newUserValues')) results = self.saveUsers(users) else: results['error'] = "User must be admin." resultJSON = json.dumps(results) self.response.headers["Content-Type"] = 'application/json; charset=utf-8' self.response.out.write(resultJSON) namespace_manager.set_namespace(userNamespace)