def put_one_document(self, msg): doc_id = '{channel_id}_{user}_{ts}'.format(channel_id=msg.channel_id, user=msg.user, ts=int(msg.ts)) doc = search.Document(doc_id=doc_id, fields=[ search.TextField(name='text', value=msg.text), search.AtomField(name='user_name', value=msg.get_user_name()), search.AtomField(name='channel_id', value=msg.channel_id), search.AtomField(name='msg_key', value=str(msg.key.id())), search.DateField(name='ts', value=msg.get_datetime()), ]) # Index the document. try: self.index.put(doc) except search.PutError, e: result = e.results[0] if result.code == search.OperationResult.TRANSIENT_ERROR: # possibly retry indexing result.object_id return self.put_one_document(msg)
def update_search_index(self, owner, repo, version_key, library, bower): metadata = json.loads(library.metadata) registry_metadata = json.loads(library.registry_metadata) if library.registry_metadata else None npm_description = registry_metadata.get('description', '') if registry_metadata else '' npm_keywords = registry_metadata.get('keywords', []) if registry_metadata else [] fields = [ search.AtomField(name='owner', value=owner), search.TextField(name='repo', value=repo), search.AtomField(name='kind', value=library.kind), search.AtomField(name='version', value=version_key.id()), search.TextField(name='github_description', value=metadata.get('description', '')), search.TextField(name='bower_description', value=bower.get('description', '')), search.TextField(name='npm_description', value=npm_description), search.TextField(name='bower_keywords', value=' '.join(bower.get('keywords', []))), search.TextField(name='npm_keywords', value=' '.join(npm_keywords)), search.TextField(name='prefix_matches', value=' '.join(util.generate_prefixes_from_list( util.safe_split_strip(metadata.get('description')) + util.safe_split_strip(bower.get('description')) + util.safe_split_strip(repo)))), ] # Generate weighting field weights = [(repo, 10)] analysis = Content.get_by_id('analysis', parent=version_key) if analysis is not None and analysis.status == Status.ready: data = analysis.get_json() if data.get('analyzerData', None) is not None: # Use analyzer data for search index element_objects = data.get('analyzerData', {}).get('elements', []) elements = [element.get('tagname', '') or element.get('classname', '') for element in element_objects] if elements != []: fields.append(search.TextField(name='element', value=' '.join(elements))) weights.append((' '.join(elements), 5)) behavior_objects = data.get('analyzerData', {}).get('metadata', {}).get('polymer', {}).get('behaviors', []) behaviors = [behavior.get('name', '') for behavior in behavior_objects] if behaviors != []: fields.append(search.TextField(name='behavior', value=' '.join(behaviors))) weights.append((' '.join(behaviors), 5)) else: # Use hydrolysis data for search index elements = data.get('elementsByTagName', {}).keys() if elements != []: fields.append(search.TextField(name='element', value=' '.join(elements))) weights.append((' '.join(elements), 5)) behaviors = data.get('behaviorsByName', {}).keys() if behaviors != []: fields.append(search.TextField(name='behavior', value=' '.join(behaviors))) weights.append((' '.join(behaviors), 5)) weighted = [] for value, weight in weights: for _ in range(0, weight): weighted.append(value) fields.append(search.TextField(name='weighted_fields', value=' '.join(weighted))) rank = int((library.updated - datetime.datetime(2016, 1, 1)).total_seconds()) document = search.Document(doc_id=Library.id(owner, repo), fields=fields, rank=rank) index = search.Index('repo') index.put(document)
def create_investment_agreement_document(investment, iyo_username): # type: (InvestmentAgreement) -> search.Document investment_id_str = str(investment.id) fields = [ search.AtomField(name='id', value=investment_id_str), search.AtomField(name='reference', value=investment.reference), search.NumberField(name='status', value=investment.status), search.TextField(name='username', value=iyo_username), search.DateField(name='creation_time', value=datetime.utcfromtimestamp( investment.creation_time)), search.TextField(name='name', value=investment.name), search.TextField(name='address', value=investment.address and investment.address.replace('\n', '')), search.TextField(name='currency', value=investment.currency), ] if investment.amount: fields.append( search.TextField(name='amount', value=_stringify_float(investment.amount))) if investment.token_count: fields.append( search.TextField(name='token_count', value=_stringify_float( investment.token_count_float))) return search.Document(doc_id=investment_id_str, fields=fields)
def _indexIssue(issue, updates): """Does the actual work of indexing the given issue. We expect to be called in a deferred handler.""" fields = [ search.TextField(name="summary", value=issue.summary), search.TextField(name="description", value=issue.description), search.AtomField(name="id", value=str(issue.key().id_or_name())), search.AtomField(name="type", value=issue.type), search.NumberField(name="priority", value=issue.priority), search.AtomField(name="state", value=issue.state), search.AtomField(name="resolution", value=issue.resolution) ] if not updates: issue, updates = getIssue(issue.key().id()) comments = "" for update in updates: if update.comment: if comments: comments += "\r\n<hr />\r\n" comments += update.comment fields.append(search.HtmlField(name="comments", value=comments)) doc = search.Document(doc_id=str(issue.key()), fields=fields) index = search.Index(name="issue") index.put(doc)
def _indexForumThread(forum_thread, new_forum_post = None): """Does the actual work of indexing the given forum thread. We expect to be called in a deferred handler.""" forum= forum_thread.forum fields = [search.TextField(name="subject", value=forum_thread.subject), search.DateField(name="posted", value=forum_thread.posted), search.DateField(name="last_post", value=forum_thread.last_post), search.AtomField(name="forum", value=forum.slug.replace(":", "_"))] if forum.alliance: fields.append(search.AtomField(name="alliance", value=forum.alliance.replace(":", "_"))) else: fields.append(search.AtomField(name="alliance", value="NA")) content = "" for forum_post in model.forum.ForumPost.all().ancestor(forum_thread).order("posted"): if new_forum_post and str(forum_post.key()) == str(new_forum_post.key()): new_forum_post = None content += "\r\n<hr />\r\n" + forum_post.content if new_forum_post: content = new_forum_post.content + content fields.append(search.HtmlField(name="content", value=content)) doc = search.Document( doc_id = str(forum_thread.key()), fields = fields) index = search.Index(name="forum") index.put(doc)
def create_searchable_docs(root, file_path, locales=None): searchable_docs = [] fields_list = _get_fields_from_file(root, file_path, locales=locales) for field_names_to_values in fields_list: parsed_fields = dict(field_names_to_values) try: fields = [ search.AtomField( name='locale', value=parsed_fields['locale']), search.AtomField( name='path', value=(parsed_fields.get('permalink_path') or parsed_fields.get('doc_id')), language=parsed_fields['language']), search.TextField( name='title', value=parsed_fields['title'], language=parsed_fields['language']), search.HtmlField( name='html', value=parsed_fields['html'], language=parsed_fields['language']), ] existing_fields = ['locale', 'path', 'title', 'html'] for name, value in field_names_to_values: fields.append(search.TextField(name=name, value=value, language=parsed_fields['language'])) doc = search.Document(doc_id=parsed_fields['doc_id'], fields=fields) searchable_docs.append(doc) except Exception as e: logging.error('Error indexing doc -> {}'.format(field_names_to_values)) raise return searchable_docs
def create_node_order_document(order, iyo_username): order_id_str = '%s' % order.id fields = [ search.AtomField(name='id', value=order_id_str), search.AtomField(name='socket', value=order.socket), search.NumberField(name='so', value=order.odoo_sale_order_id or -1), search.NumberField(name='status', value=order.status), search.DateField(name='order_time', value=datetime.utcfromtimestamp(order.order_time)), search.TextField(name='username', value=iyo_username), ] if order.shipping_info: fields.extend([ search.TextField(name='shipping_name', value=order.shipping_info.name), search.TextField(name='shipping_email', value=order.shipping_info.email), search.TextField(name='shipping_phone', value=order.shipping_info.phone), search.TextField(name='shipping_address', value=order.shipping_info.address) ]) if order.billing_info: fields.extend([ search.TextField(name='billing_name', value=order.billing_info.name), search.TextField(name='billing_email', value=order.billing_info.email), search.TextField(name='billing_phone', value=order.billing_info.phone), search.TextField(name='billing_address', value=order.billing_info.address) ]) return search.Document(order_id_str, fields)
def _build_event_date(i, event, ed, venue, start, end, is_hours=False): """ Helper to create a specific date - yeilds one search doc """ category = ed.category if is_hours: category = CATEGORY.HOURS fields = [] doc_id = '%s-%s' % (event.slug, i) fields.append(search.TextField(name='name', value=event.name)) fields.append(search.AtomField(name='slug', value=event.slug)) fields.append(search.AtomField(name='event_keystr', value=str(event.key.urlsafe()))) # Populate bits specific to the event date fields.append(search.NumberField(name='start', value=unix_time(timezone('UTC').localize(start)))) fields.append(search.NumberField(name='end', value=unix_time(timezone('UTC').localize(end)))) fields.append(search.AtomField(name='category', value=category)) # Attach Venue/Geo Information fields.append(search.AtomField(name='venue_slug', value=ed.venue_slug)) venue_geo = None if venue.geo: geoPt = venue.geo if isinstance(geoPt, list): geoPt = geoPt[0] venue_geo = search.GeoPoint(geoPt.lat, geoPt.lon) fields.append(search.GeoField(name='venue_geo', value=venue_geo)) return search.Document(doc_id=doc_id, fields=fields)
def update_search_index(self, owner, repo, version_key, library, bower): metadata = json.loads(library.metadata) fields = [ search.AtomField(name='owner', value=owner), search.TextField(name='repo', value=repo), search.AtomField(name='kind', value=library.kind), search.AtomField(name='version', value=version_key.id()), search.TextField(name='github_description', value=metadata.get('description', '')), search.TextField(name='bower_description', value=bower.get('description', '')), search.TextField(name='bower_keywords', value=' '.join(bower.get('keywords', []))), search.TextField( name='prefix_matches', value=' '.join( util.generate_prefixes_from_list( util.safe_split_strip(metadata.get('description')) + util.safe_split_strip(bower.get('description')) + util.safe_split_strip(repo)))), ] # Generate weighting field weights = [(repo, 10)] analysis = Content.get_by_id('analysis', parent=version_key) if analysis is not None and analysis.status == Status.ready: analysis = json.loads(analysis.content) elements = analysis.get('elementsByTagName', {}).keys() if elements != []: fields.append( search.TextField(name='element', value=' '.join(elements))) weights.append((' '.join(elements), 5)) behaviors = analysis.get('behaviorsByName', {}).keys() if behaviors != []: fields.append( search.TextField(name='behavior', value=' '.join(behaviors))) weights.append((' '.join(behaviors), 5)) weighted = [] for value, weight in weights: for _ in range(0, weight): weighted.append(value) fields.append( search.TextField(name='weighted_fields', value=' '.join(weighted))) rank = int( (library.updated - datetime.datetime(2016, 1, 1)).total_seconds()) document = search.Document(doc_id=Library.id(owner, repo), fields=fields, rank=rank) index = search.Index('repo') index.put(document)
def save_user_doc(main_user): fields = [ search.AtomField('email', main_user.email), search.TextField('name', main_user.name), search.DateField('creation', main_user.creation) ] fields.extend( [search.AtomField('group', group) for group in main_user.groups]) doc = search.Document(doc_id=unicode(main_user.key.id()), fields=fields) user_index.put(doc) return doc
def CreatePostDocument(title, content, postID, category, subcategory, price): return search.Document(doc_id=postID, fields=[ search.TextField(name='title', value=title), search.TextField(name='postID', value=postID), search.TextField(name='content', value=content), search.AtomField(name='category', value=category), search.AtomField(name='subcategory', value=subcategory), search.NumberField(name='price', value=float(price)) ])
def add_to_index(group_id, group_name, group_description, group_image=None): group_index = search.Index(name='groups') group_document = search.Document( doc_id=group_id, fields=[ search.TextField(name='name', value=group_name), search.TextField(name='suggestions', value=build_suggestions(group_name)), search.AtomField(name='description', value=group_description), search.AtomField(name='group_image', value=group_image), ]) group_index.put(group_document)
def create_doc(self,key,value): doc_id=key data=value['identifiers'] fields=[ search.TextField(name="title",value=data['title']), search.TextField(name="details",value=data['name']), search.AtomField(name="status",value=data['status']), search.AtomField(name="category",value=data['CLS']), ] document = search.Document(doc_id=doc_id,fields=fields) if(document is None): return None return document
def job(cursor=None): qry = Prospect.all() if cursor: qry.with_cursor(cursor) prospects = qry.fetch(200) if not prospects: return to_put = list() index = search.Index(name=PROSPECT_INDEX) for prospect in prospects: try: index.delete(prospect.id) except ValueError: pass fields = [ search.AtomField(name='key', value=prospect.id), search.TextField(name='name', value=prospect.name), search.TextField(name='address', value=prospect.address), search.TextField(name='phone', value=prospect.phone), search.TextField(name='email', value=prospect.email) ] doc = search.Document(doc_id=prospect.id, fields=fields) to_put.append(doc) index.put(to_put) deferred.defer(job, qry.cursor())
def index_artifact(index_, id_, fields): f = [] for i in xrange(0, len(fields), 3): if fields[i] == ATOMFIELD: f.append(search.AtomField(name=fields[i + 1], value=fields[i + 2])) elif fields[i] == TEXTFIELD: f.append(search.TextField(name=fields[i + 1], value=fields[i + 2])) elif fields[i] == HTMLFIELD: f.append(search.HtmlField(name=fields[i + 1], value=fields[i + 2])) elif fields[i] == NUMBERFIELD: f.append( search.NumberField(name=fields[i + 1], value=fields[i + 2])) elif fields[i] == DATEFIELD: f.append(search.DateField(name=fields[i + 1], value=fields[i + 2])) elif fields[i] == GEOPOINTFIELD: f.append(search.GeoField(name=fields[i + 1], value=fields[i + 2])) doc = search.Document(doc_id=id_, fields=f) retry_count = 0 while True: try: index_.put(doc) break except search.Error as e: if retry_count < 3: log.warning( 'Error put doc into index, could be out of space. Creating new index' ) index_ = search.Index(index_.name[:-4] + str(int(index_.name[-4:])).zfill(4), namespace=index_.namespace) retry_count += 1 else: raise e
def re_index_question(question_key): question_index = search.Index(name=QUESTION_INDEX) # cleanup any previous index entry try: question_index.delete([str(question_key)]) except ValueError: pass # no index found for this customer yet # re-add index question = Question.get(question_key) bizz_check(question) fields = [search.AtomField(name='question_key', value=str(question_key)), search.TextField(name='question_language', value=question.language), search.TextField(name='question_title', value=question.title), search.TextField(name='question_description', value=question.description), search.TextField(name='question_tags', value=" ".join(question.modules)), ] for qr in question.replies(False): question_reply_id = qr.id fields.extend([search.TextField(name='question_reply_description_%s' % question_reply_id, value=qr.description), ]) question_index.put(search.Document(doc_id=str(question_key), fields=fields))
def searchablize_tag_or_stream(item, index_name, response): index = search.Index(name=index_name, namespace=search_index_namespace) if item is None: return toks = item.name.split() try: full_str = "" for tok in toks: for i in range(len(tok) + 1): for j in range(i): substr = tok[j:i] add_strs = [substr] for s in add_strs: doc = search.Document(fields=[ search.AtomField(name='id', value=str(item.key.id())), search.TextField(name='name', value=item.name), search.TextField(name='string', value=s), search.DateField( name='date_added', value=datetime.datetime.now().date()) ]) # Index the document. index.put(doc) full_str += " " + tok except search.PutError, e: result = e.results[0] response['errResult'] = str(result)
def _buildCoreProductFields(cls, pid, name, description, category, category_name, price): """Construct a 'core' document field list for the fields common to all Products. The various categories (as defined in the file 'categories.py'), may add additional specialized fields; these will be appended to this core list. (see _buildProductFields).""" fields = [ search.TextField(name=cls.PID, value=pid), # The 'updated' field is always set to the current date. search.DateField(name=cls.UPDATED, value=datetime.datetime.now().date()), search.TextField(name=cls.PRODUCT_NAME, value=name), # strip the markup from the description value, which can # potentially come from user input. We do this so that # we don't need to sanitize the description in the # templates, showing off the Search API's ability to mark up query # terms in generated snippets. This is done only for # demonstration purposes; in an actual app, # it would be preferrable to use a library like Beautiful Soup # instead. # We'll let the templating library escape all other rendered # values for us, so this is the only field we do this for. search.TextField(name=cls.DESCRIPTION, value=re.sub(r'<[^>]*?>', '', description)), search.AtomField(name=cls.CATEGORY, value=category), search.NumberField(name=cls.AVG_RATING, value=0.0), search.NumberField(name=cls.PRICE, value=price) ] return fields
def get_field(self,key,value): try: if(self.mapping[key]): if(self.mapping[key]=='TEXTFIELD'): return search.TextField(name=key,value=value); elif(self.mapping[key]=='ATOMFIELD'): return search.AtomField(name=key,value=value); elif(self.mapping[key]=='NUMBERFIELD'): return search.NumberField(name=key,value=value); elif(self.mapping[key]=='HTMLFIELD'): return search.HtmlField(name=key,value=value); elif(self.mapping[key]=='DATEFIELD'): timestamp=value; timestamp = timestamp if timestamp>0 else -timestamp; value=datetime.fromtimestamp(timestamp/1000.0); return search.DateField(name=key,value=value); elif(self.mapping[key]=='GEOFIELD'): return search.GeoField(name=key,value=value); else: return None; else: return None; except KeyError,keyError: print(keyError); return None;
def test_to_document(self, mock_get_document_fields): test_model = Test(id='fake') fields = [search.AtomField(name='text_field', value='12345ABC')] mock_get_document_fields.return_value = fields test_document = search.Document(doc_id=test_model.key.urlsafe(), fields=fields) result = test_model.to_document() self.assertEqual(result, test_document)
def IndexPages(title, idpage, summary, content): return search.Document( doc_id = idpage, fields=[search.TextField(name='title', value=title), search.AtomField(name='idpage',value=idpage), search.TextField(name='summary', value=summary), search.HtmlField(name='content', value=content) ] )
def _buildFields(cls, id, title, body, domain, tags): fields = [ search.TextField(name=cls.ID, value=id), search.TextField(name=cls.TITLE, value=title), search.AtomField(name=cls.DOMAIN, value=domain), search.TextField(name=cls.TAGS, value=" ".join(tags)), search.TextField(name=cls.BODY, value=body) ] return fields
def test_to_seach_fields(self): # Test list field generation. entity = TestEntity(test_repeatedprop=['item_1', 'item_2']) search_fields = entity._to_search_fields('test_repeatedprop', ['item_1', 'item_2']) expected_fields = [ search.TextField(name='test_repeatedprop', value='item_1'), search.AtomField(name='test_repeatedprop', value='item_1'), search.TextField(name='test_repeatedprop', value='item_2'), search.AtomField(name='test_repeatedprop', value='item_2') ] self.assertEqual(expected_fields, search_fields) # Test ndb.Key field generation. test_key = ndb.Key('Test', 1) entity = TestEntity(test_keyproperty=test_key) search_field = entity._to_search_fields('test_keyproperty', test_key) expected_field = [ search.AtomField(name='test_keyproperty', value=test_key.urlsafe()) ] self.assertEqual(expected_field, search_field) # Test datetime field generation. date = datetime.datetime(year=2017, month=1, day=5) entity = TestEntity(test_datetime=date) search_field = entity._to_search_fields('test_datetime', date) expected_field = [search.DateField(name='test_datetime', value=date)] self.assertEqual(expected_field, search_field) # Test boolean field generation. entity = TestEntity(test_bool=True) search_field = entity._to_search_fields('test_bool', True) expected_field = [search.AtomField(name='test_bool', value='True')] self.assertEqual(expected_field, search_field) # Test geopt field generation. geopt = ndb.GeoPt('52.37, 4.88') entity = TestEntity(test_geopt=geopt) search_field = entity._to_search_fields('test_geopt', geopt) expected_field = [ search.GeoField(name='test_geopt', value=search.GeoPoint(52.37, 4.88)) ] self.assertEqual(expected_field, search_field)
def add_faceted_document(index): document = search.Document( doc_id='doc1', fields=[search.AtomField(name='name', value='x86')], facets=[ search.AtomFacet(name='type', value='computer'), search.NumberFacet(name='ram_size_gb', value=8) ]) index.put(document)
def _post_put_hook(self, future): # Generate the member's QR code if the member does not have one. if not self.qr_code: self.qr_code = self.get_qr_code() self.put() # Update the member's assosciated search document. fields = [ search.TextField(name='name', value=self.name), search.AtomField(name='dce', value=self.dce), search.AtomField(name='email', value=self.email) ] for semester in self.semesters_paid: fields.append( search.AtomField(name='semester', value=str(round(semester, 1)))) doc = search.Document(doc_id=self.id, fields=fields) search.Index(name=MEMBER_SEARCH_INDEX_NAME).put(doc)
def _to_search_fields(self, key, value): """Converts an ndb.Property into a search document field. Args: key: str, the name of the field. value: ndb.Property, the value to convert. Returns: A list of search fields created from the specified property. Repeated properties will create one field per item. """ if value is None: return [] if isinstance(value, list): search_fields = [] for val in value: search_fields.extend(self._to_search_fields(key, val)) return search_fields if isinstance(value, ndb.Key): return [search.AtomField(name=key, value=value.urlsafe())] if isinstance(value, (datetime.datetime, datetime.date)): return [search.DateField(name=key, value=value)] if isinstance(value, bool): return [search.AtomField(name=key, value=str(value))] if isinstance(value, numbers.Number) and not isinstance(value, bool): return [search.NumberField(name=key, value=value)] if isinstance(value, ndb.GeoPt): return [ search.GeoField(name=key, value=search.GeoPoint(value.lat, value.lon)) ] return [ search.TextField(name=key, value=unicode(value)), search.AtomField(name=key, value=unicode(value)) ]
def post(self, request, *args, **kwargs): if request.META.get('HTTP_API_KEY') != settings.ADMIN_API_KEY: return HttpResponseForbidden() limit = int(request.GET.get('limit', 3)) qry = Restaurant.query(Restaurant.search_updated==None) indexed = [] errors = [] index = search.Index(name=settings.SEARCH_INDEX) for item in qry.fetch(limit): fields = [ search.TextField(name='name', value=item.name), search.AtomField(name='cuisine', value=item.cuisine), search.AtomField(name='neighborhood', value=item.neighborhood), search.NumberField(name='lunch_price', value=item.lunch_price or 0), search.TextField(name='lunch_menu', value=item.lunch_menu_text), search.NumberField(name='dinner_price', value=item.dinner_price or 0), search.TextField(name='dinner_menu', value=item.dinner_menu_text), ] d = search.Document(doc_id=item.key.id(), fields=fields) try: index.put(d) item.search_updated = datetime.now() item.put() except search.Error: logging.exception("Error indexing: %s" % item.name) errors.append(item.name) else: indexed.append(item.name) return JsonResponse({ "indexed": indexed, "num_indexed": len(indexed), "errors": errors, "num_errors": len(errors), })
def build_index(venue): """ Construct a search document for the venue. TODO: This needs to be done yet """ # Construct Search document feilds fields = [] fields.append(search.AtomField(name='slug', value=venue.slug)) fields.append(search.TextField(name='name', value=venue.name)) fields.append(search.AtomField(name='category', value=venue.category)) fields.append(search.TextField(name='substrs', value=get_substrs(venue.name))) geopoint = None if venue.geo: pt = venue.geo[0] geopoint = search.GeoPoint(pt.lat, pt.lon) fields.append(search.GeoField(name='geo', value=geopoint)) return search.Document(doc_id=venue.slug, fields=fields)
def to_search_document(self, rank=None): """Extends inherited method in Model.""" fields = super(Lesson, self)._get_search_fields() # Simplify checking for video if self.type == 'video': fields.append(search.AtomField(name='content_type', value='video')) return search.Document(doc_id=self.uid, fields=fields, rank=rank, language='en')
def post(self): user= users.get_current_user() if user: userid = user.user_id() authenticationUser = User.query(User.userid == userid).fetch(1) authenticationNgo = NGO.query(NGO.userid == userid).fetch(1) if authenticationUser or authenticationNgo: self.redirect("/home") else: name = self.request.get("name") email = user.email() description = self.request.get("description") pancardNumber = self.request.get("pancardNumber") chiefFunctionary = self.request.get("chiefFunctionary") chairman = self.request.get("chairman") sectorOfOperation = self.request.get("sectorOfOperation") stateOfOperation = self.request.get("stateOfOperation") registrationNumber = self.request.get("registrationNumber") dateOfRegistration = self.request.get("dateOfRegistration") stateOfRegistration = self.request.get("stateOfRegistration") telephone = self.request.get("telephone") address = self.request.get("address") dateOfRegistration = self.date(dateOfRegistration) ngo = NGO() ngo.userid = userid ngo.name = name ngo.credibility = False ngo.description = description ngo.pancardNumber = pancardNumber ngo.chiefFunctionary = chiefFunctionary ngo.chairman = chairman ngo.sectorOfOperation = sectorOfOperation ngo.stateOfOperation = stateOfOperation ngo.registrationNumber = registrationNumber ngo.dateOfRegistration = dateOfRegistration ngo.stateOfRegistration = stateOfRegistration ngo.telephone = telephone ngo.projects = [] ngo.address = address ngo.email = email ngo.put() index = search.Index(name = "NGO") document = search.Document(doc_id = userid, fields = [ search.AtomField(name = "name", value = name ), search.TextField(name = "description", value = description)]) try: index.put(document) except search.Error: logging.exception("Put Failed") sleep(5) #cheap trick but none the less it works! self.redirect("/signup/ngoRegistration/proofOfRegistration") else: self.redirect("/login")