コード例 #1
0
ファイル: skeleton.py プロジェクト: phorward/viur-server
    def toDB(self, clearUpdateTag=False):
        """
			Store current Skeleton entity to data store.

			Stores the current data of this instance into the database.
			If an *key* value is set to the object, this entity will ne updated;
			Otherwise an new entity will be created.

			To read a Skeleton object from the data store, see :func:`~server.skeleton.Skeleton.fromDB`.

			:param clearUpdateTag: If True, this entity won't be marked dirty;
				This avoids from being fetched by the background task updating relations.
			:type clearUpdateTag: bool

			:returns: The data store key of the entity.
			:rtype: str
		"""
        def txnUpdate(key, mergeFrom, clearUpdateTag):
            blobList = set()
            skel = type(mergeFrom)()
            # Load the current values from Datastore or create a new, empty db.Entity
            if not key:
                dbObj = db.Entity(skel.kindName)
                oldBlobLockObj = None
            else:
                k = db.Key(key)
                assert k.kind(
                ) == skel.kindName, "Cannot write to invalid kind!"
                try:
                    dbObj = db.Get(k)
                except db.EntityNotFoundError:
                    dbObj = db.Entity(k.kind(),
                                      id=k.id(),
                                      name=k.name(),
                                      parent=k.parent())
                else:
                    skel.setValues(dbObj)
                try:
                    oldBlobLockObj = db.Get(
                        db.Key.from_path("viur-blob-locks", str(k)))
                except:
                    oldBlobLockObj = None

            # Remember old hashes for bones that must have an unique value
            oldUniqeValues = {}
            for boneName, boneInstance in skel.items():
                if boneInstance.unique:
                    if "%s.uniqueIndexValue" % boneName in dbObj.keys():
                        oldUniqeValues[boneName] = dbObj["%s.uniqueIndexValue"
                                                         % boneName]

            ## Merge the values from mergeFrom in
            for key, bone in skel.items():
                if key in mergeFrom.keys():
                    bone.mergeFrom(skel.valuesCache, key, mergeFrom)
            unindexed_properties = []
            for key, _bone in skel.items():
                tmpKeys = dbObj.keys()
                dbObj = _bone.serialize(skel.valuesCache, key, dbObj)
                newKeys = [x for x in dbObj.keys() if not x in tmpKeys
                           ]  # These are the ones that the bone added
                if not _bone.indexed:
                    unindexed_properties += newKeys
                blobList.update(_bone.getReferencedBlobs(
                    self.valuesCache, key))

            if clearUpdateTag:
                dbObj[
                    "viur_delayed_update_tag"] = 0  # Mark this entity as Up-to-date.
            else:
                dbObj["viur_delayed_update_tag"] = time(
                )  # Mark this entity as dirty, so the background-task will catch it up and update its references.
            dbObj.set_unindexed_properties(unindexed_properties)
            dbObj = skel.preProcessSerializedData(dbObj)
            try:
                ourKey = str(dbObj.key())
            except:  # Its not an update but an insert, no key yet
                ourKey = None
            # Lock hashes from bones that must have unique values
            newUniqeValues = {}
            for boneName, boneInstance in skel.items():
                if boneInstance.unique:
                    # Check if the property is really unique
                    newUniqeValues[
                        boneName] = boneInstance.getUniquePropertyIndexValue(
                            self.valuesCache, boneName)
                    if newUniqeValues[boneName] is not None:
                        try:
                            lockObj = db.Get(
                                db.Key.from_path(
                                    "%s_%s_uniquePropertyIndex" %
                                    (skel.kindName, boneName),
                                    newUniqeValues[boneName]))

                            if lockObj["references"] != ourKey:
                                # This value has been claimed, and that not by us

                                raise ValueError(
                                    "The unique value '%s' of bone '%s' has been recently claimed!"
                                    % (self.valuesCache[boneName], boneName))

                        except db.EntityNotFoundError:  # No lockObj found for that value, we can use that
                            pass
                        dbObj["%s.uniqueIndexValue" %
                              boneName] = newUniqeValues[boneName]
                    else:
                        if "%s.uniqueIndexValue" % boneName in dbObj.keys():
                            del dbObj["%s.uniqueIndexValue" % boneName]
            if not skel.searchIndex:
                # We generate the searchindex using the full skel, not this (maybe incomplete one)
                tags = []
                for key, _bone in skel.items():
                    if _bone.searchable:
                        tags += [
                            tag for tag in _bone.getSearchTags(
                                self.valuesCache, key)
                            if (tag not in tags and len(tag) < 400)
                        ]
                dbObj["viur_tags"] = tags
            db.Put(dbObj)  # Write the core entry back
            # Now write the blob-lock object
            blobList = skel.preProcessBlobLocks(blobList)
            if blobList is None:
                raise ValueError(
                    "Did you forget to return the bloblist somewhere inside getReferencedBlobs()?"
                )
            if None in blobList:
                raise ValueError("None is not a valid blobKey.")
            if oldBlobLockObj is not None:
                oldBlobs = set(oldBlobLockObj["active_blob_references"]
                               if oldBlobLockObj["active_blob_references"]
                               is not None else [])
                removedBlobs = oldBlobs - blobList
                oldBlobLockObj["active_blob_references"] = list(blobList)
                if oldBlobLockObj["old_blob_references"] is None:
                    oldBlobLockObj["old_blob_references"] = [
                        x for x in removedBlobs
                    ]
                else:
                    tmp = set(oldBlobLockObj["old_blob_references"] +
                              [x for x in removedBlobs])
                    oldBlobLockObj["old_blob_references"] = [
                        x for x in (tmp - blobList)
                    ]
                oldBlobLockObj["has_old_blob_references"] = oldBlobLockObj[
                    "old_blob_references"] is not None and len(
                        oldBlobLockObj["old_blob_references"]) > 0
                oldBlobLockObj["is_stale"] = False
                db.Put(oldBlobLockObj)
            else:  # We need to create a new blob-lock-object
                blobLockObj = db.Entity("viur-blob-locks",
                                        name=str(dbObj.key()))
                blobLockObj["active_blob_references"] = list(blobList)
                blobLockObj["old_blob_references"] = []
                blobLockObj["has_old_blob_references"] = False
                blobLockObj["is_stale"] = False
                db.Put(blobLockObj)
            for boneName, boneInstance in skel.items():
                if boneInstance.unique:
                    # Update/create/delete missing lock-objects
                    if boneName in oldUniqeValues.keys() and oldUniqeValues[boneName] != \
                     newUniqeValues[boneName]:
                        # We had an old lock and its value changed
                        try:
                            # Try to delete the old lock
                            oldLockObj = db.Get(
                                db.Key.from_path(
                                    "%s_%s_uniquePropertyIndex" %
                                    (skel.kindName, boneName),
                                    oldUniqeValues[boneName]))
                            if oldLockObj["references"] != ourKey:
                                # We've been supposed to have that lock - but we don't.
                                # Don't remove that lock as it now belongs to a different entry
                                logging.critical(
                                    "Detected Database corruption! A Value-Lock had been reassigned!"
                                )
                            else:
                                # It's our lock which we don't need anymore
                                db.Delete(
                                    db.Key.from_path(
                                        "%s_%s_uniquePropertyIndex" %
                                        (skel.kindName, boneName),
                                        oldUniqeValues[boneName]))
                        except db.EntityNotFoundError as e:
                            logging.critical(
                                "Detected Database corruption! Could not delete stale lock-object!"
                            )
                    if newUniqeValues[boneName] is not None:
                        # Lock the new value
                        newLockObj = db.Entity("%s_%s_uniquePropertyIndex" %
                                               (skel.kindName, boneName),
                                               name=newUniqeValues[boneName])
                        newLockObj["references"] = str(dbObj.key())
                        db.Put(newLockObj)
            return (str(dbObj.key()), dbObj, skel)

        # END of txnUpdate subfunction

        key = self["key"] or None
        if not isinstance(clearUpdateTag, bool):
            raise ValueError(
                "Got an unsupported type %s for clearUpdateTag. toDB doesn't accept a key argument any more!"
                % str(type(clearUpdateTag)))

        # Allow bones to perform outstanding "magic" operations before saving to db
        for bkey, _bone in self.items():
            _bone.performMagic(self.valuesCache, bkey, isAdd=(key == None))

        # Run our SaveTxn
        if db.IsInTransaction():
            key, dbObj, skel = txnUpdate(key, self, clearUpdateTag)
        else:
            key, dbObj, skel = db.RunInTransactionOptions(
                db.TransactionOptions(xg=True), txnUpdate, key, self,
                clearUpdateTag)

        # Perform post-save operations (postProcessSerializedData Hook, Searchindex, ..)
        self["key"] = str(key)
        if self.searchIndex:  # Add a Document to the index if an index specified
            fields = []

            for boneName, bone in skel.items():
                if bone.searchable:
                    fields.extend(
                        bone.getSearchDocumentFields(self.valuesCache,
                                                     boneName))

            fields = skel.getSearchDocumentFields(fields)
            if fields:
                try:
                    doc = search.Document(doc_id="s_" + str(key),
                                          fields=fields)
                    search.Index(name=skel.searchIndex).put(doc)
                except:
                    pass

            else:  # Remove the old document (if any)
                try:
                    search.Index(name=self.searchIndex).remove("s_" + str(key))
                except:
                    pass

        for boneName, bone in skel.items():
            bone.postSavedHandler(self.valuesCache, boneName, skel, key, dbObj)

        skel.postSavedHandler(key, dbObj)

        if not clearUpdateTag:
            updateRelations(key, time() + 1)

        return (key)
コード例 #2
0
def _CreateIssueSearchDocuments(issues, comments_dict, users_by_id,
                                config_dict):
    """Make the GAE search index documents for the given issue batch.

  Args:
    issues: list of issues to index.
    comments_dict: prefetched dictionary of comments on those issues.
    users_by_id: dictionary {user_id: UserView} so that the email
        addresses of users who left comments can be found via search.
    config_dict: dict {project_id: config} for all the projects that
        the given issues are in.
  """
    documents_by_shard = collections.defaultdict(list)
    for issue in issues:
        comments = comments_dict.get(issue.issue_id, [])
        comments = _IndexableComments(comments, users_by_id)
        summary = issue.summary
        # TODO(jrobbins): allow search specifically on explicit vs derived
        # fields.
        owner_id = tracker_bizobj.GetOwnerId(issue)
        owner_email = users_by_id[owner_id].email
        config = config_dict[issue.project_id]
        component_paths = []
        for component_id in issue.component_ids:
            cd = tracker_bizobj.FindComponentDefByID(component_id, config)
            if cd:
                component_paths.append(cd.path)

        field_values = [
            str(tracker_bizobj.GetFieldValue(fv, users_by_id))
            for fv in issue.field_values
        ]

        metadata = '%s %s %s %s %s %s' % (
            tracker_bizobj.GetStatus(issue), owner_email, [
                users_by_id[cc_id].email
                for cc_id in tracker_bizobj.GetCcIds(issue)
            ], ' '.join(component_paths), ' '.join(field_values), ' '.join(
                tracker_bizobj.GetLabels(issue)))
        assert comments, 'issues should always have at least the description'
        description = _ExtractCommentText(comments[0], users_by_id)
        description = description[:framework_constants.MAX_FTS_FIELD_SIZE]
        all_comments = ' '.join(
            _ExtractCommentText(c, users_by_id) for c in comments[1:])
        all_comments = all_comments[:framework_constants.MAX_FTS_FIELD_SIZE]

        custom_fields = _BuildCustomFTSFields(issue)
        doc = search.Document(
            doc_id=str(issue.issue_id),
            fields=[
                search.NumberField(name='project_id', value=issue.project_id),
                search.TextField(name='summary', value=summary),
                search.TextField(name='metadata', value=metadata),
                search.TextField(name='description', value=description),
                search.TextField(name='comment', value=all_comments),
            ] + custom_fields)

        shard_id = issue.issue_id % settings.num_logical_shards
        documents_by_shard[shard_id].append(doc)

    start_time = time.time()
    promises = []
    for shard_id, documents in documents_by_shard.iteritems():
        if documents:
            promises.append(
                framework_helpers.Promise(_IndexDocsInShard, shard_id,
                                          documents))

    for promise in promises:
        promise.WaitAndGetValue()

    logging.info('Finished %d indexing in shards in %d ms',
                 len(documents_by_shard), int(
                     (time.time() - start_time) * 1000))
コード例 #3
0
ファイル: ViewStream.py プロジェクト: ttillett13/GrayTeam
    def build_post_template(current_user, request):
        stream_name = request.get('stream_name')
        picture_name = request.get('name')
        picture = request.get('file')
        comments = request.get('comments')
        decrementPage = request.get('decrementPage')
        standardPage = request.get('page')
        latitude = request.get('latitude')
        if not latitude:
            latitude = random.uniform(-90, 90)
        else:
            latitude = float(latitude)

        longitude = request.get('longitude')
        if not longitude:
            longitude = random.uniform(-180, 180)
        else:
            longitude = float(longitude)

        status = "success"

        if decrementPage:
            page = int(decrementPage) - 1
        else:
            page = int(standardPage) + 1

        if page < 0:
            page = 0

        dt = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
        # Check to see if image name already exists
        if picture_name and not Picture.query(Picture.name == stream_name +
                                              "_" + str(picture_name) + "_" +
                                              dt).fetch():
            stream = Stream.query(Stream.name == stream_name).get()
            # for i in Stream.query().fetch():
            #     if i.name == stream_name:
            #         stream = i
            if stream:

                filename = '/{}/Pictures'.format(
                    BUCKET_NAME) + "/" + stream_name + "_" + str(
                        picture_name) + "_" + dt

                with cloudstorage.open(
                        filename, 'w',
                        content_type='image/jpeg') as filehandle:
                    filehandle.write(str(picture))

                blobstore_filename = '/gs{}'.format(filename)
                blob_key = blobstore.create_gs_key(blobstore_filename)

                new_picture = Picture(
                    name=stream_name + "_" + str(picture_name) + "_" + dt,
                    image=blob_key,
                    comments=comments,
                    lat=latitude,
                    lon=longitude,
                    date_uploaded=datetime.datetime.now()).put()

                # Update Stream
                stream.pictures.append(new_picture)
                stream.picture_count += 1
                stream.last_new_picture = datetime.datetime.now()
                stream.put()
                time.sleep(1)
                # add image comments to stream search index
                #if not stream.cover_page_url:
                #    stream.cover_page_url = images.get_serving_url(new_picture.image, secure_url=False)

                index = search.Index(INDEX_NAME)
                doc_id = stream.key.urlsafe()
                search_stream = index.get(doc_id)
                tags = search_stream.field('tag').value + " " + comments
                #tags = str(stream.tags) + comments
                d = search.Document(doc_id=doc_id,
                                    fields=[
                                        search.TextField(name="stream_name",
                                                         value=stream.name),
                                        search.TextField(
                                            name="cover_image",
                                            value=stream.cover_page_url),
                                        search.TextField(name="url",
                                                         value=stream.url),
                                        search.TextField(name="tag",
                                                         value=tags)
                                    ],
                                    language="en")
                index.put(d)
            else:
                status = "fail"
        elif not decrementPage:
            status = "fail"

        template_values = {
            'name': stream_name,
            'status': status,
            'page': page,
            'pic': ""
        }

        return template_values
コード例 #4
0
 def create_document(cls,post,subject):
     document = search.Document(
         fields=[search.TextField(name='subject',value=subject),
         search.TextField(name='post',value=post)])
     return document
コード例 #5
0
    def post(self): 
        params_html = {}  
        paper_type_list = ['Conference','Journal','Thesis','Poster','Patent','Report','Poster','Book','Book Chapter','Presentation','Draft']
        publication_status_list = ['Accepted','Submitted','Published']
        
        params_html['paper_type_list'] = paper_type_list
        params_html['publication_status_list'] = publication_status_list
        
        #----------------Retrieve the Paper from the Database-----------------
        paper_key = self.request.get('i')
        paper = db.get(paper_key)
        paper_publisher = paper.publisher
        #---------------------------------------------------------------------
        
        #---------------------Check if the User is Admin---------------------
        admin_flag = 0
        temp = self.request.cookies.get('user_id')
        if temp:
            userid = valid_hash_cookie(temp)
            user = db.GqlQuery("SELECT * FROM UserPass_User WHERE user_id = '%s'" %userid)
            user = user.get()
            admin_flag = user.isadmin
            user_author = user.author_id
            params_html['userid'] = userid
        
        params_html['admin_flag'] = admin_flag
        #---------------------------------------------------------------------
        
        #----------------------Get the Details from the Form-------------------
        paper_title = (self.request.get('paper_title'))
        if isinstance(paper_title, str):
            paper_title = unicode(paper_title,'utf-8')
        else:
            paper_title = unicode(paper_title)
            
        publication_type = self.request.get('publication_type')
        publication_date = self.request.get('publication_date')
        paper_abstract = (self.request.get('abstract_of_paper'))
        paper_keywords = self.request.get('pub_keywords')
        if isinstance(paper_keywords, str):
            paper_keywords = unicode(paper_keywords,'utf-8')
        else:
            paper_keywords = unicode(paper_keywords)
                
        biblio_str = self.request.get('how_to_cite')
        web_link = self.request.get('web_link')
        pdf_link = self.request.get('pdf_link')        
        code_link = self.request.get('code_link')
        demo_link = self.request.get('demo_link')
        data_link = self.request.get('data_link')
        publication_status = self.request.get('publication_status')
        
        #..........................Retrive the Authors........................
        paper_authors = []
        author_count = 0
        for i in range(1,len(paper.authors)+1):
            field_name = str(i) + "_author"
            temp = self.request.get(field_name)
            if isinstance(temp, str):
                temp = unicode(temp,'utf-8')
            else:
                temp = unicode(temp)
            
            paper_authors.append(temp)
            if temp != '':
                author_count = author_count + 1
        #.....................................................................
        
        #.......................Retrive the Author Emails.....................
        author_emails = []
        email_success_flag = 1
        for i in range(1,len(paper.authors)+1):
            field_name = str(i) + "_email"
            temp = self.request.get(field_name)
            if (temp):
                if (valid_email(temp)):
                    author_emails.append(temp)
                else:
                    email_success_flag = 0
                    author_emails.append('')
            else:
                author_emails.append('')    
        #.....................................................................
        
        #---------------------------------------------------------------------
        
        #-----------------------Check for Input Errors------------------------
        success_flag = 1
         
        if not paper_title:
            success_flag = 0
            params_html['error_title'] = 'Paper title is necessary!'
        else:
            params_html['paper_title_val'] = (paper_title)
            
        if author_count == 0:
            success_flag = 0
            params_html['error_author'] = 'At least one author is required!'
            params_html['authors_list_val'] = []
        
        if not email_success_flag:
            success_flag = 0
            params_html['error_email'] = 'The entered email is invalid'
            
        if author_count:
            params_html['authors_list_val'] = zip(paper_authors,author_emails)
        
        if not publication_date:
            success_flag = 0
            params_html['error_date'] = 'Date is ncessary!'
        else:
            params_html['publication_date_val'] = publication_date
            
        if not paper_abstract:
            success_flag = 0
            params_html['error_abstract'] = "Abstract shouldn't be empty!"
        else:
            params_html['paper_abstract'] = (paper_abstract)
        
        if not paper_keywords:
            success_flag = 0
            params_html['error_keywords'] = 'At least one keyword is necessary'
        else:
            params_html['keywords'] = paper_keywords
        
        if not biblio_str:
            success_flag = 0
            params_html['error_cite'] = 'This field is absolutely necessary'
        else:
            params_html['biblio_str'] = biblio_str
        
        if not web_link:
            success_flag = 0
            params_html['error_web_link'] = 'A URL for the full paper is necessary'
        else:
            params_html['web_link_val'] = web_link
        
        if not pdf_link:
            success_flag = 0
            params_html['error_pdf_link'] = 'A URL for the PDF is necessary'
        else:
            params_html['pdf_link_val'] = pdf_link
        
        if not demo_link:
            demo_link = None
            
        
        if not data_link:
            data_link = None
            
        params_html['initial_paper_type'] = publication_type
        params_html['initial_status'] = publication_status
        #---------------------------------------------------------------------
        
        
        
        if not success_flag:
            paper_authors = []
            for item in paper.authors:
                authorID = item.replace(" ", "")
                authorID = authorID.replace(",","")
                paper_authors.append(authorID)
                
            if user_author in paper_authors:
                params_html['admin_flag'] = 1
            self.response.out.write(template.render('./html/edit_paper.html',params_html))
        else:
            temp = self.request.cookies.get('user_id')
            if temp:
                chef_id = valid_hash_cookie(temp)
                if not chef_id:
                    self.redirect('/login')
            else:
                self.redirect('/login')        
            
            #------------------------------Update Paper Details-----------------------------
            success_flag = 1
            paper.title = paper_title
            paper.publication_type = publication_type   
            paper.publication_date = publication_date
            paper.abstract = paper_abstract
            paper.keywords = paper_keywords.split(';')
            paper.biblio_str = biblio_str
            if web_link:
                try:
                    paper.web_link = web_link
                except:
                    success_flag = 0
                    logging.error("Invalid web link when editing papr")                                        
                    params_html['error_web_link'] = 'Invalid link format'
                    
            if pdf_link:
                try:
                    paper.pdf_link = pdf_link
                except:
                    success_flag = 0
                    logging.error("Invalid PDF link when editing papr")                                        
                    params_html['error_pdf_link'] = 'Invalid link format'
                
            
            if code_link:                
                try:
                    paper.code_link = code_link
                except:
                    success_flag = 0
                    logging.error("Invalid code link when editing papr")                                        
                    params_html['error_code_link'] = 'Invalid link format'
            else:
                paper.code_link = None
            
            if demo_link:                
                try:
                    paper.demo_link = demo_link
                except:
                    success_flag = 0
                    logging.error("Invalid demo link when editing papr")                                        
                    params_html['error_demo_link'] = 'Invalid link format'
            else:
                paper.demo_link = None
            
            if data_link:                    
                try:
                    paper.data_link = data_link
                except:
                    success_flag = 0
                    logging.error("Invalid data link when editing papr")                                        
                    params_html['error_data_link'] = 'Invalid link format'
            else:
                paper.data_link = None
             
            if not success_flag:
                params_html['admin_flag'] = 1
                self.response.out.write(template.render('./html/edit_paper.html',params_html))
            else:
                paper.publication_status = publication_status
                paper.authors = paper_authors
                paper.authors_str = '; '.join(paper_authors)
                paper.email_authors = author_emails
                paper.put()
                #---------------------------------------------------------------------
            
                #--------------------Index the Paper for Future Search Queries----------------------
                index = search.Index(name='PAPERS_INDEXES', namespace='PAPER_INDICES_NAMESPACE')
                key_val = paper.key()

                key_val = str(key_val).replace('-','_')
                fields = [search.TextField(name='abstract', value=paper_abstract),
                        search.TextField(name='doc_id', value=key_val),
                        #search.DateField(name='publication_date',value=datetime.datetime.now().date()),
                        search.TextField(name='title', value=paper_title),
                        search.TextField(name='authors', value='; '.join(paper_authors)),
                        search.TextField(name='keywords', value=paper_keywords),      
                        search.AtomField(name='pub_type', value=publication_type),
                        search.TextField(name='publisher', value=paper_publisher),
                        search.AtomField(name='pub_status', value=publication_status),
                        search.NumberField(name='pub_year', value=int(paper.publication_year)),
                        search.TextField(name='dockey', value=str(paper.key()))]
                
                d = search.Document(doc_id=key_val, fields=fields)
                try:
                    add_result = search.Index(name='PAPERS_INDEXES').put(d)
                
                except search.Error:
                    self.response.out.write("Sorry we weren't able to add this!")
                #-----------------------------------------------------------------------------------

            
            
                #-----------------Add or Update the Author to the Authors Database------------------
                itr = 0
                author_emails = paper.email_authors
                
                for author in paper.authors:
                    authorID = str(author.replace(" ", ""))
                    authorID = authorID.replace(",","")
                    email = author_emails[itr]
                
                    user = db.GqlQuery("SELECT * FROM Authors_DB WHERE author_id = '%s'" %authorID)
                    user = user.get()
                
                    if user:
                    
                        #............................Update the Author..............................
                        published_papers = user.paper_keys
                        if str(paper.key()) not in str(published_papers):
                            published_papers.append(str(paper.key()))
                            user.paper_keys = published_papers
                            
                            titles = user.paper_titles
                            titles.append(str((paper.title)))
                            user.paper_titles = titles
                        
                            dates = user.paper_dates
                            dates.append(str(paper.publication_year))
                            user.paper_dates = dates
                        
                        
                            authors_str = user.other_authors
                            authors_str.append(paper.authors_str)
                            user.other_authors = authors_str
                            
                        user.email_add = email
                        user.put()
                    #...........................................................................
        
                    else:
                        #.....................Add the Author to the Database........................
                        ind = author.find(",")
                        first_name = author[0:ind]
                        last_name = author[ind+2:len(author)]
                    
                        u = Authors_DB(author_id = authorID,firstname = first_name, lastname = last_name,email_add = email,
                                       paper_keys = list([str(paper.key())]),paper_titles = list([str(paper.title)]),
                                       paper_dates = list([str(paper.publication_year)]),other_authors = list([paper.authors_str]))
                        u.put()
                        #...........................................................................

                    itr = itr + 1
                    #-----------------------------------------------------------------------------------
                
                perma_link=('./_paper?i=%s' %str(paper.key()))
            
                self.redirect('/%s' %perma_link)
コード例 #6
0
    def post(self):

        x = self.get_uploads()
        upload = self.get_uploads()[0]

        # Get input values from the form
        email = self.request.get('email')
        title = self.request.get('title')
        description = self.request.get('description')
        price = self.request.get('price')
        quantity = self.request.get('quantity')
        theme = self.request.get('theme')
        tags = self.request.get('tags')
        tagsarr = tags.split(',')

        #GEOTAG
        latitude = self.request.get('latitude')
        longitude = self.request.get('longitude')
        if latitude == '' and longitude == '':
            loc = "{},{}".format(30.285104, -97.737554)
        else:
            loc = "{},{}".format(latitude, longitude)

        meals = Meals(geopt=ndb.GeoPt(loc),
                      email=email,
                      name=title,
                      description=description,
                      quantity=quantity,
                      price=price,
                      theme=theme,
                      tags=tagsarr,
                      meal_image=upload.key(),
                      meal_image_url=images.get_serving_url(upload.key()))

        meals.put()

        for tag in tagsarr:
            # ADD
            key_a = ndb.Key(Tags, tag)
            t = Tags(key=key_a, name=tag)
            t.put()

            # Insert unique
            a = Tags.get_or_insert(tag)

        obj = {'newmealcreated': 'true'}
        self.redirect('/')

        ##search functionality##
        # list of searchable fields
        fields = [
            search.TextField(name="Title", value=title),
            search.TextField(name="Description", value=description)
        ]

        # creates document containin chef info
        d = search.Document(fields=fields)

        # try to add the document to the index
        try:
            add_result = search.Index(name=MEAL_INDEX_NAME).put(d)
            print("The document was successfully added to the index.")
        except search.Error:
            print("The document was not successfully added to the index.")
        obj = {'newprofilecreated': 'true'}
        self.redirect('/')
コード例 #7
0
    def post(self):
        file_info = blobstore.BlobInfo(
            blobstore.BlobKey(self.request.get('key_str')))
        reader = blobstore.BlobReader(file_info)
        tree = ET.parse(reader)
        root = tree.getroot()
        author = get_author()
        for child in root:
            list_of_number_ids = u''
            description = child.attrib['description']
            labels = child.attrib['labels']
            series_type = child.attrib['series_type']
            units = child.attrib['unit']
            source = child.attrib['source']
            data_fields = [
                search.TextField(name='author', value=author),
                search.TextField(name='description', value=description),
                search.TextField(name='units', value=units),
                search.TextField(name='labels', value=labels),
                search.TextField(name='series_type', value=series_type)
            ]

            if check_duplicate_series(description):
                logging.info("duplicate series %s" % description)
                return
            series_id = search.Index(name=_INDEX_NAME).put(
                search.Document(
                    fields=data_fields +
                    [search.TextField(name='list_of_number_ids', value='')])
            )[0].id
            for number in child:
                value = float(number.attrib['value'])
                year = '-1'
                month = '-1'
                day = '-1'
                if 'time_period' in number.attrib:
                    time = number.attrib['time_period']
                    year = time
                    if time.find('-') != -1:
                        year = time.split('-')[0]
                        month = time.split('-')[1]
                        if len(time.split('-')) == 3:
                            day = time.split('-')[2]
                check_duplicate = check_duplicate_numbers(
                    value, units, description, source, year, month, day)
                if check_duplicate:
                    number = check_duplicate
                    number_id = number.doc_id
                    add_series_id_to_number(number, series_id)
                else:
                    number_id = add_to_number_index(author, None, value, units,
                                                    description, labels,
                                                    source, int(year),
                                                    int(month), int(day),
                                                    series_id)
                list_of_number_ids += u" " + number_id
            search.Index(name=_INDEX_NAME).put(
                search.Document(doc_id=series_id,
                                fields=data_fields + [
                                    search.TextField(name='list_of_number_ids',
                                                     value=list_of_number_ids)
                                ]))
コード例 #8
0
def createPositionedItemDocument(pItem, itemStorerId):
    posField = search.GeoField(name='position',
                               value=search.GeoPoint(pItem.position.lat,
                                                     pItem.position.lon))
    d = search.Document(doc_id=itemStorerId, fields=[posField])
    return d
コード例 #9
0
def make_archive_for_gig_key(the_gig_key):
    """ makes an archive for a gig - files away all the plans, then delete them """

    the_gig = the_gig_key.get()

    the_archive_text = ""
    if the_gig.status == 2:  # this gig was cancelled
        the_archive_text = "The gig was cancelled."
    else:
        the_band = the_gig_key.parent().get()
        the_assocs = assoc.get_confirmed_assocs_of_band_key(the_band.key)
        the_sections = list(the_band.sections)
        the_sections.append(None)

        the_plans = []
        for the_section in the_sections:
            section_plans = []
            for an_assoc in the_assocs:
                the_plan = plan.get_plan_for_member_key_for_gig_key(
                    an_assoc.member, the_gig_key)
                # add the plan to the list, but only if the member's section for this gig is this section
                if the_plan:
                    test_section = the_plan.section
                    if test_section is None:
                        test_section = an_assoc.default_section

                    if test_section == the_section:
                        section_plans.append([an_assoc.member, the_plan])

                        # when we file this away, update the member's gig-commitment stats
                        if the_plan.value in [1, 5, 6]:
                            an_assoc.commitment_number = an_assoc.commitment_number + 1

                        # whether or not there's a plan, up the number of gigs we should have committed to
                        an_assoc.commitment_total = an_assoc.commitment_total + 1

            the_plans.append((the_section, section_plans))
        ndb.put_multi(the_assocs)

        for a_section in the_plans:
            if a_section[1]:
                the_section_key = a_section[0]
                if (the_section_key):
                    the_section_name = the_section_key.get().name
                else:
                    if len(the_plans) == 1:
                        the_section_name = ''
                    else:
                        the_section_name = 'No Section'
                the_archive_text = u'{0}\n{1}'.format(the_archive_text,
                                                      the_section_name)

                for member_plans in a_section[1]:
                    the_member = member_plans[0].get()
                    the_plan = member_plans[1]
                    the_comment = u'- {0}'.format(
                        the_plan.comment) if the_plan.comment else ""
                    the_nickname = u' ({0})'.format(
                        the_member.nickname) if the_member.nickname else ''
                    the_archive_text = u'{0}\n\t{1}{2} - {3} {4}'.format(
                        the_archive_text, the_member.name, the_nickname,
                        plan.plan_text[the_plan.value], the_comment)

                the_archive_text = u'{0}\n'.format(the_archive_text)

    # create a document
    my_document = search.Document(fields=[
        search.TextField(name='plans', value=the_archive_text),
        search.TextField(name='type', value='archive')
    ])

    try:
        index = search.Index(name="gigomatic_index")
        result = index.put(my_document)
    except search.Error:
        logging.exception('Put failed')

    doc_id = result[0].id
    return doc_id
コード例 #10
0
    def post(self):
        user = users.get_current_user()

        search_text = self.request.get('txtName')

        #target_query

        # IF USE GOOGLE SEARCH API
        index = search.Index(name="search_stream")
        target_query = Stream.query()
        targets = target_query.fetch()
        for target in targets:
            fields = [search.TextField(name="name", value=target.name)]
            for tag in target.tags:
                fields.append(search.TextField(name="tags", value=tag))
            d = search.Document(fields=fields, language='en')
            search.Index(name="search_stream").put(d)

        #index = search.Index()

        #search_results = index.search("%s in name OR %s in tags" % (search_text, search_text))
        search_results = index.search(search_text)
        #logging.info("%s", dir(search_results))
        logging.info("Found %d number of results" %
                     len(search_results.results))

        final_targets = []
        counter = 0
        for i in search_results:
            found_name = i.field("name".encode('utf-8')).value.decode('utf-8')
            logging.info("Found %s" % found_name)
            if counter < 5:
                target_query = Stream.query(Stream.name == found_name)
                target = target_query.fetch()[0]
                final_targets.append(target)
                counter += 1
            else:
                break
        logging.info("Listing %d of streams" % len(final_targets))
        for i in final_targets:
            logging.info("found %s", i.name)
        # remove all indexes
        while True:
            # until no more documents, get a list of documents,
            # constraining the returned objects to contain only the doc ids,
            # extract the doc ids, and delete the docs.
            document_ids = [
                document.doc_id for document in index.get_range(ids_only=True)
            ]
            if not document_ids:
                break
            index.delete(document_ids)

        # Just try to retrieve from NDB
        # # WORKS BUT NOT USING SEARCH API
        # target_query = Stream.query()
        # targets = target_query.fetch()
        # final_targets = []
        #
        # for target in targets:
        #     logging.info("target name %s, tags: %s, search text %s " % (target.name, target.tags, search_text))
        #     if search_text in target.name or search_text in str(target.tags):
        #         final_targets.append(target)
        #
        # for i in final_targets:
        #     logging.info("found %s", i.name)

        template_values = {
            'stream': final_targets,
        }

        template = JINJA_ENVIRONMENT.get_template('SearchStream.html')
        self.response.write(template.render(template_values))
コード例 #11
0
    def create_or_update(self,
                         name=None,
                         code=None,
                         phone=None,
                         yelp=None,
                         opentable=None,
                         website=None,
                         neighborhood_name=None,
                         street_address=None,
                         region_name=None,
                         region_code=None,
                         user=None,
                         **kwargs):

        if code:
            loc_code = code
        else:
            loc_code = get_loc_code(name)

        loc = Location.get_by_key_name(loc_code)
        if not loc:
            loc = Location(key_name=loc_code, code=loc_code, name=name)
            if user and not user.is_admin():
                loc.owner = user
            loc_doc = search.Document(fields=[
                search.TextField(name='name', value=name),
                search.TextField(name='code', value=loc_code),
                search.DateField(name='date',
                                 value=datetime.datetime.now().date())
            ])
            logging.info('adding loc doc to index')
            search.Index(name=_INDEX_NAME).add(loc_doc)

            # TODO: get updates after creation, but without overwriting manual values and being throttled
            self.get_loc_info(loc)

        elif name:
            loc.name = name

        if phone is not None:
            loc.phone = phone
        if yelp is not None:
            loc.yelp = yelp
        if not loc.yelp:
            import urllib
            loc.yelp = 'http://www.yelp.com/search?find_desc=%s&find_loc=%s' % (
                urllib.quote_plus(loc.name),
                urllib.quote_plus(region_name or 'San Francisco'))

        if opentable is not None:
            loc.opentable = opentable
        if website is not None:
            loc.website = website
        if neighborhood_name is not None:
            loc.neighborhood_name = neighborhood_name
        if street_address is not None:
            loc.street_address = street_address
        if region_name is not None:
            loc.region_name = region_name
        if region_code is not None:
            loc.region_code = region_code

        return loc
コード例 #12
0
    def post(self):
        doc_id = self.request.get('doc_id', '')
        if doc_id:
            #logging.debug('doc_id: %s' % (doc_id))
            urlinstance = Url.get_by_id(int(doc_id))

            if urlinstance:
                # If not valid url, delete from index
                if urlinstance.valid < 0:
                    doc_index = search.Index(name='url')
                    logging.info(
                        'Delete invalid (%s) url (ID %s) from document index \'url\' (%s)'
                        % (str(urlinstance.valid), doc_id, doc_index))
                    doc_index.delete(doc_id)
                else:
                    url = urlinstance.url
                    title = urlinstance.title
                    #logging.debug('url: %s, title: %s' % (url, title))

                    channels = []
                    channel = None
                    users = []
                    user = None
                    date = datetime.datetime.fromtimestamp(0)

                    comments = []
                    comment = None
                    tags = []
                    tag = None

                    rate = 0

                    channelurlquery = ChannelUrl.query(
                        ChannelUrl.url == urlinstance.key)
                    for channelurlinstance in channelurlquery:
                        channelinstance = channelurlinstance.channel.get()
                        if channelinstance.name not in channels:
                            channels.append(channelinstance.name)
                            #logging.info('Adding channel %s' % (channelinstance.name))

                        postquery = Post.query(
                            Post.channelurl == channelurlinstance.key)
                        for postinstance in postquery:
                            if postinstance.user not in users:
                                users.append(postinstance.user)
                            if date:
                                if date < postinstance.date:
                                    date = postinstance.date
                            else:
                                date = postinstance.date

                        extraquery = Extra.query(
                            Extra.channelurl == channelurlinstance.key)
                        for extrainstance in extraquery:
                            if extrainstance.tag:
                                if extrainstance.tag not in tags:
                                    tags.append(extrainstance.tag)
                                    #logging.info('Adding tag %s' % (extrainstance.tag))
                            if extrainstance.comment:
                                if extrainstance.comment not in comments:
                                    comments.append(extrainstance.comment)
                                    #logging.info('Adding comment %s' % (extrainstance.comment))

                        ratequery = Rate.query(
                            Rate.channelurl == channelurlinstance.key)
                        for rateinstance in ratequery:
                            rate += rateinstance.value
                        #logging.debug('rate %s' % (rate))

                    if not date:
                        date = datetime.datetime.fromtimestamp(0)
                    # lists to strings
                    channel = ' '.join(channels)
                    user = '******'.join(users)
                    tag = ' '.join(tags)
                    if not tag:
                        tag = None
                    comment = ' '.join(comments)
                    if not comment:
                        comment = None

                    logging.debug(
                        'doc; channel=%s, user=%s, url=%s, date=%s, title=%s, comment=%s, tag=%s, rate=%s'
                        %
                        (channel, user, url, date, title, comment, tag, rate))
                    try:
                        doc = search.Document(
                            doc_id=str(doc_id),
                            fields=[
                                search.TextField(name='channel',
                                                 value=channel),
                                search.TextField(name='user', value=user),
                                search.TextField(name='url', value=url),
                                search.DateField(name='date', value=date),
                                search.TextField(name='title', value=title),
                                search.TextField(name='comment',
                                                 value=comment,
                                                 language='fi'),
                                search.TextField(name='tag',
                                                 value=tag,
                                                 language='fi'),
                                search.NumberField(name='rate', value=rate)
                            ],
                            language='en')
                    except Exception, e:
                        logging.error('doc_id: %s, error %s' %
                                      (str(doc_id), e))
                        doc = None

                    try:
                        if doc:
                            search.Index(name='url').put(doc)
                            urlinstance.document_date = datetime.datetime.now()
                            urlinstance.put()
                        else:
                            logging.error('Doc missing.')
                    except search.Error:
                        logging.error('Create Document failed.')
            else:
                logging.debug('No urlinstance for doc_id: %s' % (doc_id))
コード例 #13
0
ファイル: listing.py プロジェクト: sayanroyc/Spartan_Overhaul
def create_new_listing(user_id):
    json_data = request.get_json()
    name = json_data.get('name', '')
    item_description = json_data.get('item_description', '')
    total_value = float(json_data.get('total_value', ''))
    # location_lat 		= float(json_data.get('location_lat',''))
    # location_lon		= float(json_data.get('location_lon',''))

    # Check to see if the user exists
    u = User.get_by_id(user_id)
    if u is None:
        raise InvalidUsage('UserID does not match any existing user',
                           status_code=400)

    u_key = ndb.Key('User', user_id)

    # if total_value > MAX_ITEM_VALUE:
    # 	raise InvalidUsage('Total value is too large', status_code=400)

    status = 'Available'
    rating = -1.0
    location = ndb.GeoPt(40.112814, -88.231786)

    # INSERT FUNCTION TO CALCULATE RENTAL RATES HERE
    hourly_rate, daily_rate, weekly_rate = get_rates(total_value)

    # Add listing to Datastore
    l = Listing(owner=u_key,
                status=status,
                name=name,
                item_description=item_description,
                rating=rating,
                total_value=total_value,
                hourly_rate=hourly_rate,
                daily_rate=daily_rate,
                weekly_rate=weekly_rate,
                location=location)
    try:
        listing_key = l.put()
        listing_id = str(listing_key.id())
    except:
        abort(500)

    # Add listing to Search App
    new_item = search.Document(doc_id=listing_id,
                               fields=[
                                   search.TextField(name='name', value=name),
                                   search.GeoField(name='location',
                                                   value=search.GeoPoint(
                                                       location.lat,
                                                       location.lon)),
                                   search.TextField(name='owner_id',
                                                    value=str(user_id))
                               ])
    try:
        index = search.Index(name='Listing')
        index.put(new_item)
    except:
        abort(500)

    data = {
        'listing_id': listing_id,
        'date_created': l.date_created,
        'date_last_modified': l.date_last_modified,
        'status': status
    }
    resp = jsonify(data)
    resp.status_code = 201
    return resp
コード例 #14
0
ファイル: listing.py プロジェクト: sayanroyc/Spartan_Overhaul
def update_listing(listing_id):
    json_data = request.get_json()
    name = json_data.get('name', '')
    total_value = float(json_data.get('total_value', ''))
    hourly_rate = float(json_data.get('hourly_rate', ''))
    daily_rate = float(json_data.get('daily_rate', ''))
    weekly_rate = float(json_data.get('weekly_rate', ''))
    status = json_data.get('status', '')
    item_description = json_data.get('item_description', '')

    # Get the listing
    l = Listing.get_by_id(listing_id)
    if l is None:
        raise InvalidUsage('ItemID does not match any existing item',
                           status_code=400)

    # Update the item attributes
    l.name = name
    l.total_value = total_value
    l.hourly_rate = hourly_rate
    l.daily_rate = daily_rate
    l.weekly_rate = weekly_rate
    l.item_description = item_description
    l.status = status

    # Add the updated item to the Datastore
    try:
        l.put()
    except:
        abort(500)

    # Add the updated item to the Search API
    if l.status == 'Available':
        updated_item = search.Document(
            doc_id=str(listing_id),
            fields=[
                search.TextField(name='name', value=name),
                search.GeoField(name='location',
                                value=search.GeoPoint(l.location.lat,
                                                      l.location.lon)),
                search.TextField(name='owner_id', value=str(l.owner.id()))
            ])

        try:
            index = search.Index(name='Listing')
            index.put(updated_item)
        except:
            abort(500)
    else:
        try:
            index = search.Index(name='Listing')
            index.delete(str(listing_id))
        except:
            abort(500)

    # Return the attributes of the new item
    data = {
        'name': name,
        'total_value': total_value,
        'hourly_rate': hourly_rate,
        'daily_rate': daily_rate,
        'weekly_rate': weekly_rate,
        'status': status,
        'item_description': item_description
    }
    resp = jsonify(data)
    resp.status_code = 200
    return resp
コード例 #15
0
 def put_index(self, data=None):
     """ index the element at each"""
     empty_string = lambda x: x if x else ""
     empty_date = lambda x: x if x else date(2999, 12, 31)
     collaborators = " ".join(self.collaborators_ids)
     title_autocomplete = ','.join(tokenize_autocomplete(self.title))
     organization = str(self.organization.id())
     if data:
         search_key = ['infos', 'tasks', 'tags', 'topics']
         for key in search_key:
             if key not in data.keys():
                 data[key] = ""
         my_document = search.Document(
             doc_id=str(data['id']),
             fields=[
                 search.TextField(name=u'type', value=u'Task'),
                 search.TextField(name='organization',
                                  value=empty_string(organization)),
                 search.TextField(name='access',
                                  value=empty_string(self.access)),
                 search.TextField(name='owner',
                                  value=empty_string(self.owner)),
                 search.TextField(name='collaborators',
                                  value=collaborators),
                 search.TextField(name='title',
                                  value=empty_string(self.title)),
                 search.TextField(name='status',
                                  value=empty_string(self.status)),
                 search.DateField(name='due', value=empty_date(self.due)),
                 search.TextField(name='about_kind',
                                  value=empty_string(self.about_kind)),
                 search.TextField(name='about_item',
                                  value=empty_string(self.about_item)),
                 search.TextField(name='infos', value=data['infos']),
                 search.TextField(name='tags', value=data['tags']),
                 search.TextField(name='tasks', value=data['tasks']),
                 search.TextField(name='topics', value=data['topics']),
                 search.TextField(name='title_autocomplete',
                                  value=empty_string(title_autocomplete)),
             ])
     else:
         my_document = search.Document(
             doc_id=str(self.key.id()),
             fields=[
                 search.TextField(name=u'type', value=u'Task'),
                 search.TextField(name='organization',
                                  value=empty_string(organization)),
                 search.TextField(name='access',
                                  value=empty_string(self.access)),
                 search.TextField(name='owner',
                                  value=empty_string(self.owner)),
                 search.TextField(name='collaborators',
                                  value=collaborators),
                 search.TextField(name='title',
                                  value=empty_string(self.title)),
                 search.TextField(name='status',
                                  value=empty_string(self.status)),
                 search.DateField(name='due', value=empty_date(self.due)),
                 search.TextField(name='about_kind',
                                  value=empty_string(self.about_kind)),
                 search.TextField(name='about_item',
                                  value=empty_string(self.about_item)),
                 search.TextField(name='title_autocomplete',
                                  value=empty_string(title_autocomplete)),
             ])
     my_index = search.Index(name="GlobalIndex")
     my_index.put(my_document)
コード例 #16
0
 def _post_put_hook(self,future):
     tokens = tokenize_autocomplete(self.tag)
     value = ','.join(tokens)
     doc = search.Document(doc_id=self.key.string_id(), fields=[search.TextField(name='tag', value=value)])
     add_result = search.Index('tag').put(doc)
     print(add_result)
コード例 #17
0
    def generate_static_report(self, domainUrl):
        siteReport = SiteReport.gql('WHERE url = :url', url=domainUrl).get()

        if siteReport is None:
            self.response.set_status(404)
            self.response.write('Report not found')
            return

        siteRating = SiteRating.all().filter('domain =', domainUrl).get()
        userRating = None
        if siteRating is not None:
            userRating = siteRating.rating_overall

        self.addJavaScript('https://www.google.com/jsapi')
        self.addJavaScript('/scripts/staticReport.js')

        baseUrl = 'http://' + self.current_instance['url']

        values = {
            'baseUrl': baseUrl,
            'domain': domainUrl,
            'userRating': userRating,
            'domainLength': len(domainUrl.replace('.com', '')),
            'sbOptions': reportSections,
            'generatedDate': siteReport.creationDate.date().isoformat(),
            'generatedDateTime': siteReport.creationDate.date().isoformat(),
            'pageTitle':
            '%(domainUrl)s SEO and SEM performance metrics - %(siteName)s' % {
                'domainUrl': domainUrl.capitalize(),
                'siteName': self.current_instance['name']
            },
            'pageDescription':
            'Review %(domainUrl)s website report including SEO and SEM KPI and improvements. Learn how to do better at SERP to increase conversions.'
            % {
                'domainUrl': domainUrl
            },
            'addthisPubId': addthisPubId
        }

        self.set_twitter_card(domainUrl)

        tasks = library.task.manager.findAll()

        all_data = {}
        actions = []

        for task in tasks:
            subreport = task.getSavedReport(domainUrl)
            task.suggest_actions(actions, subreport, domainUrl)

        site_reviews = SiteReview.all().filter('domain = ',
                                               domainUrl).fetch(limit=5)
        values['user_reviews'] = site_reviews

        values['shortUrl'] = self.request.url
        if config.debug_active is False:
            try:
                values['shortUrl'] = createShortUrl(self.request.url)
            except:
                logging.error(sys.exc_info()[1])

        statuses = {
            'good': 0,
            'regular': 0,
            'bad': 0,
        }

        for action in actions:
            statuses[action['status']] = statuses[action['status']] + 1

        totalStatuses = sum(statuses.values())

        values['loadTimeMs'] = 0
        values['actions'] = actions

        html = self.renderTemplate('staticReport.html', values)
        beauty = BeautifulSoup(html)
        beauty.find(id='score').contents[0].replace_with(str(siteReport.score))

        beauty.find(id='goodStatuses').string.replace_with(
            str(statuses['good']))
        beauty.find(id='regularStatuses').string.replace_with(
            str(statuses['regular']))
        beauty.find(id='badStatuses').string.replace_with(str(statuses['bad']))

        if totalStatuses > 0:
            beauty.find('div', 'progress-bar progress-bar-success'
                        )['style'] = 'width: %d%%;' % (
                            (statuses['good'] * 100) / totalStatuses)
            beauty.find('div', 'progress-bar progress-bar-warning'
                        )['style'] = 'width: %d%%;' % (
                            (statuses['regular'] * 100) / totalStatuses)
            beauty.find('div', 'progress-bar progress-bar-danger'
                        )['style'] = 'width: %d%%;' % (
                            (statuses['bad'] * 100) / totalStatuses)

        for task in tasks:
            subreport = task.getSavedReport(domainUrl)
            if dict == type(subreport) and 'content' in subreport:
                data.update(subreport['content'])

            all_data[task.getName()] = subreport

            task.updateView(beauty, subreport)

        try:
            report_id = str(siteReport.key().id())
            index = search.Index(name='domains')
            doc = index.get(report_id)
            if doc is None:
                fields = [
                    search.TextField(name='url', value=domainUrl),
                    #search.TextField( name = 'title', value = all_data['pageTitle'] ),
                    #search.TextField( name = 'description', value = all_data['pageDescription'] ),
                    search.DateField(name='generation_date',
                                     value=siteReport.creationDate.date()),
                ]
                if 'pageKeywords' in all_data:
                    fields.append(
                        search.TextField(name='keywords',
                                         value=','.join(
                                             all_data['pageKeywords'])))

                doc = search.Document(doc_id=report_id, fields=fields)
                index.put(doc)
        except search.Error:
            logging.exception('Put failed')

        return beauty.encode(formatter=None)
コード例 #18
0
def document():
    return search.Document(
        doc_id='doc1',
        fields=[search.TextField(name='title', value='Meep: A biography')])
コード例 #19
0
    def post(self):
        # データブックの名前を取得
        databook_name = get_databook_name(self.request.get('db'))
        # 全文検索用インデックスの名前を取得
        databook_indexname = get_databook_indexname(self.request.get('db'))
        # 表示メッセージの初期化
        message_data = ''

        # 記事のタイトルをチェック
        req_title = self.request.get('title').strip()
        if not req_title:
            self.redirect(mainpage_url + '?' +
                          urllib.urlencode({'db': databook_name}))
            return

        # 管理者ログインのチェック
        admin_login = False
        if users.is_current_user_admin():
            admin_login = True

        # 書き込み禁止の判定
        write_enabled = True
        write_disabled_message = ''
        if not capabilities.CapabilitySet('datastore_v3',
                                          ['write']).is_enabled():
            write_enabled = False
            write_disabled_message = '【現在書き込みは禁止しています】'

        # 日時更新のチェック(デフォルトOFF)
        datechg_flag = 0
        if self.request.get('datechg') == '1':
            datechg_flag = 1

        # 記事を検索(タイトルで1件だけ)
        articles_query = Article.query(
            Article.title == req_title,
            ancestor=databook_key(databook_name)).order(-Article.date)
        articles = articles_query.fetch(1)
        # 記事が存在しなければ新規作成
        if len(articles) < 1:
            article = Article(parent=databook_key(databook_name))
            article.title = req_title
            article.author = ''
            article.content = ''
            article.source = ''
            article.date = datetime.datetime.now()
            article.bkup_authors = []
            article.bkup_contents = []
            article.bkup_sources = []
            article.bkup_dates = []
            article.bkup_lastupdate = datetime.datetime.min
            article.search_doc_id = ''
            article.show_flag = 1
        else:
            article = articles[0]

        # ログインユーザー名をセット(今回未使用)
        # if users.get_current_user():
        #     article.author = users.get_current_user().nickname()

        # 送信されたデータを記事に設定
        if self.request.get('delete') != '1' and self.request.get(
                'rename') != '1':
            article.author = self.request.get('author').strip()
            article.content = self.request.get('content').strip()
            article.source = self.request.get('source')
            if datechg_flag == 1:
                article.date = datetime.datetime.now()

        # 記事の非表示(保守用)
        if write_enabled and article.author.startswith('=hide'):
            article.show_flag = 0
        else:
            article.show_flag = 1

        # 記事の削除(保守用)
        # if article.author.startswith('=delete'):
        if write_enabled and self.request.get('delete') == '1':
            if admin_login and article.bkup_dates:
                # (関連する全文検索用ドキュメントがあればそれも削除)
                if article.search_doc_id:
                    search.Index(name=databook_indexname).delete(
                        article.search_doc_id)
                article.key.delete()
            self.redirect(mainpage_url + '?' +
                          urllib.urlencode({'db': databook_name}))
            return

        # 全文検索用ドキュメントの個別削除(保守用)
        if write_enabled and article.author.startswith('=index_delete'):
            if admin_login:
                doc_id = article.content
                if doc_id:
                    search.Index(name=databook_indexname).delete(doc_id)
            self.redirect(mainpage_url + '?' +
                          urllib.urlencode({'db': databook_name}))
            return

        # 全文検索用ドキュメントの全削除(保守用)
        if write_enabled and article.author.startswith('=all_index_delete'):
            if admin_login:
                search_index = search.Index(name=databook_indexname)
                while True:
                    doc_ids = [
                        doc.doc_id
                        for doc in search_index.get_range(ids_only=True)
                    ]
                    if not doc_ids:
                        break
                    search_index.delete(doc_ids)
            self.redirect(mainpage_url + '?' +
                          urllib.urlencode({'db': databook_name}))
            return

        # 記事のタイトル変更(保守用)
        rename_flag = 0
        if write_enabled and self.request.get('rename') == '1':
            new_title = self.request.get('newtitle').strip()
            if admin_login and new_title and new_title != article.title and article.bkup_dates:
                # 記事を検索(新タイトルで1件だけ)
                articles_query = Article.query(
                    Article.title == new_title,
                    ancestor=databook_key(databook_name)).order(-Article.date)
                articles_temp = articles_query.fetch(1)
                # 記事が存在しなければ、タイトルを変更できる
                if len(articles_temp) < 1:
                    article.title = new_title
                    rename_flag = 1
                else:
                    rename_flag = 2
            else:
                rename_flag = 2

        # バックアップの保存
        # (10分以内のときは、バックアップを追加しないで上書きとする)
        if write_enabled and rename_flag == 0:
            time_diff_minutes = -1
            if article.bkup_lastupdate:
                time_diff = datetime.datetime.now() - article.bkup_lastupdate
                time_diff_minutes = time_diff.days * 24 * 60 + time_diff.seconds / 60
            if time_diff_minutes >= 0 and time_diff_minutes <= backup_time:
                # 最新のバックアップを上書き
                article.bkup_authors[0] = article.author
                article.bkup_contents[0] = article.content
                article.bkup_sources[0] = article.source
                article.bkup_dates[0] = article.date
            else:
                # バックアップを追加(最大10件)
                article.bkup_authors.insert(0, article.author)
                article.bkup_contents.insert(0, article.content)
                article.bkup_sources.insert(0, article.source)
                article.bkup_dates.insert(0, article.date)
                if len(article.bkup_dates) > backup_num:
                    article.bkup_authors = article.bkup_authors[:backup_num]
                    article.bkup_contents = article.bkup_contents[:backup_num]
                    article.bkup_sources = article.bkup_sources[:backup_num]
                    article.bkup_dates = article.bkup_dates[:backup_num]
                article.bkup_lastupdate = datetime.datetime.now()

        # 全文検索用ドキュメントを登録する
        if write_enabled and (rename_flag == 0 or rename_flag == 1):
            date_str = article.date.replace(tzinfo=UTC()).astimezone(
                JapanTZ()).strftime('%Y-%m-%d %H:%M:%S %Z')
            doc_content = article.title + ' ' + article.author + ' ' + article.content + ' ' + date_str
            if article.search_doc_id:
                # すでに登録されていれば上書き
                doc = search.Document(doc_id=article.search_doc_id,
                                      fields=[
                                          search.TextField(name='title',
                                                           value=article.title,
                                                           language='ja'),
                                          search.TextField(name='content',
                                                           value=doc_content,
                                                           language='ja'),
                                          search.DateField(name='date',
                                                           value=article.date)
                                      ])
                put_result = search.Index(name=databook_indexname).put(doc)
            else:
                # 登録されていなければ新規作成(このときドキュメントIDを記憶しておく)
                doc = search.Document(fields=[
                    search.TextField(
                        name='title', value=article.title, language='ja'),
                    search.TextField(
                        name='content', value=doc_content, language='ja'),
                    search.DateField(name='date', value=article.date)
                ])
                put_result = search.Index(name=databook_indexname).put(doc)
                # ↓これではドキュメントIDとれないので注意。putの戻り値から取得する必要がある
                # article.search_doc_id = doc.doc_id
                article.search_doc_id = put_result[0].id

        # 記事をデータブックに登録
        if write_enabled:
            if rename_flag == 0:
                article.put()
                message_data = message_data + '(セーブしました)'
            elif rename_flag == 1:
                article.put()
                message_data = message_data + '(タイトルを変更しました)'
            else:
                message_data = message_data + '(タイトルを変更できません(名称が不正もしくは同名が存在する等))'
        else:
            message_data = message_data + '(書き込みが禁止されています)'

        # # メインページに戻る
        # self.redirect(mainpage_url + '?' + urllib.urlencode({'db': databook_name}))

        # 文字コード変換(表示用)
        message_data = message_data.decode('utf-8')
        write_disabled_message = write_disabled_message.decode('utf-8')

        # ローカル日時変換(表示用)
        article.date = article.date.replace(tzinfo=UTC()).astimezone(JapanTZ())
        for i in range(len(article.bkup_dates)):
            article.bkup_dates[i] = article.bkup_dates[i].replace(
                tzinfo=UTC()).astimezone(JapanTZ())

        # 編集ページのテンプレートに記事データを埋め込んで表示
        template = jinja_environment.get_template(editpage_html)
        self.response.out.write(
            template.render(databook_name=databook_name,
                            article=article,
                            update_url=update_url,
                            runpage_url=runpage_url,
                            mainpage_url=mainpage_url,
                            editpage_url=editpage_url,
                            message_data=message_data,
                            admin_login=admin_login,
                            datechg_flag=datechg_flag,
                            write_disabled_message=write_disabled_message))
コード例 #20
0
      logging.warning('News update failed')
  else:
    logging.info('News not updated, private channel/old url')
  
  if not url_title:
    url_title=''.join(url.split('/')[-1:])
  #logging.debug('Title: %s' % (url_title))

  # Create Document (FullTextSearch)
  doc_id=str(urlinstance.key.id())
  try:
    doc = search.Document(doc_id=doc_id,fields=[
      search.TextField(name='channel', value=channel),
      search.TextField(name='user', value=user),
      search.TextField(name='url', value=url),
      search.DateField(name='date',value=date),
      search.TextField(name='title', value=url_title),
      search.TextField(name='comment', value=comment, language='fi'),
      search.TextField(name='tag', value=tags, language='fi'),
      search.NumberField(name='rate', value=0)
      ],language='en')
  except Exception, e:
    logging.error('Error %s' % (e))
  #logging.debug('Document fields updated')
            
  if urlinstance.document_date:
    try:
      taskqueue.add(name=doc_id+'_post',queue_name='document',url='/tasks/update_document', params={'doc_id': doc_id})
    except taskqueue.TombstonedTaskError:
      logging.warning('TombstonedTaskError %s_post' % (doc_id))
    except taskqueue.TaskAlreadyExistsError:
      logging.warning('TaskAlreadyExistsError %s_post' % (doc_id))
コード例 #21
0
def add_to_search_index(object_id, labels, metadata, mapped_category=None,
                        most_similar_category=None, thumb_url=None,
                        preview_url=None):
  """Adds document to the search index."""
  try:
    # Define document search fields - these can be queried using keyword search
    fields = [
        search.TextField(name='image_id', value=object_id),
    ]

    # Define document facet fields
    facets = []

    # Add label descriptions into search and facet fields. Search API allows
    # multiple values for the same field.
    for label in labels:
      fields.append(search.TextField(name='label',
                                     value=label['description']))
      facets.append(search.AtomFacet(name='label_facet',
                                     value=label['description']))

    # Add mapped category and most similar category as facets
    if mapped_category:
      fields.append(search.TextField(name='mapped_category',
                                     value=mapped_category))
      facets.append(search.AtomFacet(name='mapped_category_facet',
                                     value=mapped_category))

    if most_similar_category:
      fields.append(search.TextField(name='most_similar_category',
                                     value=most_similar_category))
      facets.append(search.AtomFacet(name='most_similar_category_facet',
                                     value=most_similar_category))

    # We're not using a database, so store the image URLs to the index.
    # We don't need to search on the image URL, but we will need them to display
    # images in the user interface.

    # Add thumbnail url
    if thumb_url:
      fields.append(search.TextField(name='thumb_url',
                                     value=thumb_url))

    # Add preview url
    if thumb_url:
      fields.append(search.TextField(name='preview_url',
                                     value=preview_url))

    # Add any other object metadata as document search fields
    for k, v in metadata.iteritems():
      fields.append(search.TextField(name=k, value=v))

    logging.info('add fields: %s', str(fields))
    logging.info('add facets: %s', str(facets))

    # Add the document to the search index
    d = search.Document(doc_id=hashing.hash_value(object_id),
                        fields=fields,
                        facets=facets)
    add_result = search.Index(name='imagesearch').put(d)
    doc_id = add_result[0].id

    return doc_id

  except search.Error:
    logging.exception('Something went wrong in add_to_search_index()')
コード例 #22
0
    def create_or_update(self,
                         name=None,
                         slug=None,
                         description=None,
                         price=None,
                         schedule=None,
                         neighborhood_name=None,
                         location=None,
                         location_code=None,
                         price_rating=None,
                         api=False,
                         image_url=None,
                         url=None,
                         user=None,
                         organizationName=None,
                         organizationUrl=None,
                         **kwargs):

        self.user = user

        if url:
            # this does not allow file-based passes to have updated listings on Passtiche
            return self.pass_file(url)
        if not slug:
            slug = PassTemplate.get_slug(name) or 'event'
        keyname = slug
        if location_code:
            keyname += "-%s" % location_code
        if self.user:
            keyname += "~%s" % self.user.short_code

        logging.info(kwargs)
        if kwargs.get('new'):
            pass_template = None
        else:
            logging.info('getting pass template %s' % keyname)
            pass_template = PassTemplate.get_by_key_name(keyname)
            if pass_template:
                logging.info('found pass template %s' % keyname)

        if not pass_template:
            logging.info('creating new pass template')
            pass_template = PassTemplate(key_name=keyname,
                                         name=name,
                                         slug=slug)
            from utils import string as str_utils
            code = str_utils.genkey(length=4)
            pass_template.short_code = code

            if self.user and not self.user.is_admin():
                pass_template.owner = self.user
            if not location and location_code:
                from model.activity import Location
                location = Location.get_by_key_name(location_code)
            if location:
                location_name = location.name
            else:
                location_name = ''

            searchFields = [
                search.TextField(name='name', value=name),
                search.TextField(name='code', value=code),
                search.TextField(name='keyname', value=keyname),
                search.TextField(name='loc', value=(location_code or '')),
                search.TextField(name='location_name', value=location_name),
                search.DateField(name='date',
                                 value=datetime.datetime.now().date())
            ]

            if self.user:
                searchFields.append(
                    search.TextField(name='owner', value=self.user.short_code))
            pass_doc = search.Document(fields=searchFields)
            logging.info('adding pass doc to index')
            search.Index(name=_INDEX_NAME).put(pass_doc)

        elif name:
            pass_template.name = name

        if description:
            pass_template.description = description
        if price:
            pass_template.price = int(price)
        if price_rating is not None:
            pass_template.price_rating = price_rating

        if organizationName:
            pass_template.organizationName = organizationName

        if organizationUrl:
            if not organizationUrl.startswith('http://'):
                organizationUrl = 'http://%s' % organizationUrl
            pass_template.organizationUrl = organizationUrl

        if image_url:
            pass_template.image_url = image_url

        if schedule:

            pass_template.schedule = schedule

            pass_template = set_time_format(pass_template)

            # TODO: starts time
        if neighborhood_name:
            pass_template.neighborhood_name = neighborhood_name

        if location_code:
            if location:
                loc = location
            else:
                from model.activity import Location
                loc = Location.get_by_key_name(location_code)
            pass_template.location_code = loc.code
            pass_template.location_name = loc.name

        pass_template.get_location(reset=True)
        return pass_template
コード例 #23
0
ファイル: handlers_.py プロジェクト: peterretief/bp_mol
def CreateVesselData(text, row, col):
    return search.Document(
        fields=[search.TextField(name='text', value=text),
                search.NumberField(name='row', value=row),
                search.NumberField(name='col', value=col),
                search.DateField(name='date', value=datetime.now().date())])
コード例 #24
0
 def _create_search_document(self, fields=None):
     document = search.Document(
         fields=fields if fields else self.default_fields)
     result = self.index.put(document)[0]
     document._doc_id = result.id
     return document
コード例 #25
0
    def post(self):

        checkbox_vals = self.request.get_all("selected_papers")

        itr = 0
        params_html = {}

        #----------------------Check If a User Has Signed In-------------------------
        admin_flag = 0
        temp = self.request.cookies.get('user_id')
        if temp:
            userid = valid_hash_cookie(temp)
            if userid:
                params_html['userid'] = userid
                user = db.GqlQuery(
                    "SELECT * FROM UserPass_User WHERE user_id = '%s'" %
                    userid)
                user = user.get()
                if user:
                    admin_flag = user.isadmin
                    adding_user = user

        params_html['admin_flag'] = admin_flag
        #----------------------------------------------------------------------------

        search_results = []
        search_results_sorted_price = []
        match_author_id = 0
        query_criteria = ''
        paper_title = self.request.get('q')
        if paper_title:
            query_criteria = "?p=title%3A" + paper_title.replace('_', '%20')

        paper_authors = self.request.get('a')
        if paper_authors:
            match_author_id = 1
            if query_criteria:
                query_criteria = query_criteria + "+and+author%3A" + paper_authors.replace(
                    '_', '&nbsp')
            else:
                query_criteria = "?p=author%3A" + paper_authors.replace(
                    '_', '&nbsp')

        query_criteria = query_criteria + "&of=xm&rg=200"

        url = "http://infoscience.epfl.ch/search" + query_criteria
        confirmRequest = urllib.urlopen(url)
        response_str = confirmRequest.read()

        root = ET.fromstring(response_str)
        papers_list = []
        #self.response.out.write(response_str)
        for record in root:

            #--------------------------------Initialize Variables------------------------------
            paper_authors = []
            publishing_labs = []
            pdf_link = ''
            web_link = ''
            data_link = ''
            code_link = ''
            demo_link = ''
            pdf_public = 0
            publication_status = ''
            github_link = ''

            publication_type = ''
            publication_venue = ''
            publisher = ''
            publication_pages = ''
            publication_vol = ''
            publication_no = ''
            publication_details = ''
            paper_title = ''
            publication_year = ''
            paper_abstract = ''
            paper_keywords = []
            authors_emails = []
            #----------------------------------------------------------------------------------
            for child in record:
                att = child.attrib

                #--------------------------Get the Authors of the Paper------------------------
                if (att['tag'] == '909'):
                    for codings in child:
                        d = codings.attrib
                        if (d['code'] == 'p'):
                            publishing_labs.append(codings.attrib)
                #------------------------------------------------------------------------------

                #---------------------------Get the Publication Title--------------------------
                if (att['tag'] == '245'):
                    for codings in child:
                        d = codings.attrib
                        if (d['code'] == 'a'):
                            if isinstance(codings.text, str):
                                paper_title = unicode(codings.text, 'utf-8')
                            else:
                                paper_title = unicode(codings.text)
                #------------------------------------------------------------------------------

                #----------------------------Get the Publication Date--------------------------
                if (att['tag'] == '260'):
                    for codings in child:
                        d = codings.attrib
                        if (d['code'] == 'c'):
                            publication_year = codings.text
                #------------------------------------------------------------------------------

                #--------------------------Get the Authors of the Paper------------------------
                if (att['tag'] == '700'):
                    for codings in child:
                        d = codings.attrib
                        if (d['code'] == 'a'):
                            if isinstance(codings.text, str):
                                paper_authors.append(
                                    unicode(swap_first_last_name(codings.text),
                                            'utf-8'))
                            else:
                                paper_authors.append(
                                    unicode(swap_first_last_name(
                                        codings.text)))
                            authors_emails.append('')
                #------------------------------------------------------------------------------
                #--------------Get the Link to the Paper on Infoscience Website----------------
                if (att['tag'] == '024'):
                    for codings in child:
                        d = codings.attrib
                        if (d['code'] == 'a'):
                            link_val = codings.text
                            temp = re.finditer('oai:infoscience.epfl.ch:(.*)',
                                               link_val)
                            if temp:
                                for item in temp:
                                    web_link = link_val[item.start():item.end(
                                    )]
                                    web_link = "http://infoscience.epfl.ch/record/" + web_link[
                                        24:len(web_link)]
                #self.response.out.write(web_link)
                #------------------------------------------------------------------------------

                #-------Get the Link to the PDF File, The Simulation Code and Data Files-------
                if (att['tag'] == '856'):
                    for codings in child:
                        d = codings.attrib
                        if (d['code'] == 'u'):
                            link_val = codings.text
                            if ('github' in link_val):
                                github_link = link_val
                            elif ('.pdf' in link_val):
                                pdf_link = link_val

                                if (d['code'] == 'x'):
                                    if (codings.text == 'PUBLIC'):
                                        pdf_public = 1
                            elif ('.zip' in link_val):
                                data_link = link_val

                        #........For Final Version of the Website and Naming Conventions........
                        # if (d['code'] == 'z'):
                        #     val = codings.text
                        #     if (val == '')
                        #.......................................................................
                #------------------------------------------------------------------------------

                #---------------------------Get the Publication Status-------------------------
                if (att['tag'] == '973'):
                    for codings in child:
                        d = codings.attrib
                        if (d['code'] == 's'):
                            ky = codings.text
                            publication_status = publication_status_match[ky]

                        if (d['code'] == 'x'):
                            if (codings.text == 'PUBLIC'):
                                pdf_public = 1
                #------------------------------------------------------------------------------

                #---------------------------Get the Publication Type---------------------------
                if (att['tag'] == '980'):
                    for codings in child:
                        d = codings.attrib
                        if (d['code'] == 'a'):
                            ky = codings.text
                            publication_type = publication_type_match[ky]
                #------------------------------------------------------------------------------

                #----------------------Get the Publication Venue Details-----------------------
                if (publication_type == 'CONF'):
                    if (att['tag'] == '711'):
                        for codings in child:
                            d = codings.attrib
                            if (d['code'] == 'a'):
                                publication_venue = codings.text
                            if (d['code'] == 'c'):
                                publication_venue = publication_venue + '; ' + codings.text
                            if (d['code'] == 'd'):
                                publication_venue = publication_venue + '; ' + codings.text
                #------------------------------------------------------------------------------

                #---------------------------Get Publisher's Details----------------------------
                if (att['tag'] == '773'):
                    for codings in child:
                        d = codings.attrib
                        if (d['code'] == 'c'):
                            publication_pages = codings.text
                        if (d['code'] == 'n'):
                            publication_no = codings.text
                        if (d['code'] == 'v'):
                            publication_vol = codings.text
                        if (d['code'] == 'p'):
                            publisher = codings.text
                #------------------------------------------------------------------------------

                #----------------------------Get the Paper's Abstract--------------------------
                if (att['tag'] == '520'):
                    for codings in child:
                        d = codings.attrib
                        if (d['code'] == 'a'):
                            paper_abstract = codings.text
                #------------------------------------------------------------------------------

                #----------------------------Get the Paper's Keywords--------------------------
                if (att['tag'] == '653'):
                    for codings in child:
                        d = codings.attrib
                        if (d['code'] == 'a'):
                            if isinstance(codings.text, str):
                                paper_keywords.append(
                                    unicode(codings.text, 'utf-8'))
                            else:
                                paper_keywords.append(unicode(codings.text))

                #------------------------------------------------------------------------------

            #------------------------------Some Post Processing--------------------------------
            paper_authors_str = '; '.join(paper_authors)
            paper_keywords_str = ', '.join(paper_keywords)

            publication_details = paper_authors_str.replace(
                ',', '') + ', "' + paper_title + '", '
            if publisher:
                publication_details = publication_details + publisher + ', '

            if publication_vol:
                publication_details = publication_details + 'Vol. ' + publication_vol + ', '
            if publication_no:
                publication_details = publication_details + 'No. ' + publication_no + ', '
            if publication_pages:
                publication_details = publication_details + 'pp. ' + publication_pages + ', '

            publication_details = publication_details[0:len(publication_details
                                                            ) - 2]

            #----------------------------------------------------------------------------------

            itr = itr + 1
            if (str(itr) in (checkbox_vals)):
                papers_list.append(
                    list([
                        paper_title, paper_authors_str, paper_abstract,
                        pdf_link, publication_year, publisher, paper_keywords,
                        paper_authors, data_link, github_link,
                        publication_details, publication_status,
                        publication_type, publication_details, demo_link,
                        web_link, authors_emails
                    ]))

                #papers_list.append(list([paper_title,paper_authors_str,paper_abstract,web_link,publication_year,publisher,paper_keywords_str,data_link,github_link,publication_details]))

        #------------------------Add the Selected Papers to the Database------------------------
        params_html['paper_list'] = papers_list
        paper_added_flag = 0
        sucess_flags_list = []
        paper_addition_errors = []
        for paper in papers_list:

            #--------------------Check if the paper does not already exists---------------------
            paper_sucess_flag = ''
            addition_error = ''
            already_exists = 0
            query = db.GqlQuery("SELECT * FROM Papers_DB WHERE title = '%s'" %
                                paper[0])
            query = query.get()
            if query:
                already_exists = 1
                paper_key = query.key()
            #-----------------------------------------------------------------------------------
            temp_flag = 1
            if not already_exists:
                try:
                    p = Papers_DB(title=paper[0],
                                  publication_year=int(paper[4]),
                                  publisher=paper[5],
                                  keywords=paper[6],
                                  authors=paper[7],
                                  abstract=paper[2],
                                  authors_str=paper[1],
                                  publication_status=paper[11],
                                  publication_type=paper[12],
                                  biblio_str=paper[13],
                                  email_authors=paper[16])
                except:
                    #self.response.out.write('salam')
                    temp_flag = 0
                    stacktrace = traceback.format_exc()
                    logging.error("%s", stacktrace)
                    addition_error = str(stacktrace)
                    temp = re.finditer(
                        'BadValueError: Property (.*) is required',
                        str(stacktrace))
                    paper_sucess_flag = ''
                    if temp:
                        for item in temp:
                            addition_error = addition_error[item.start():item.
                                                            end()]
                            addition_error = addition_error[
                                23:len(addition_error)]
            else:
                p = query
                if paper[0]:
                    p.title = paper[0]
                else:
                    temp_flag = 0

                p.publication_year = int(paper[4])
                p.publisher = paper[5]
                p.keywords = paper[6]
                if paper[7]:
                    p.authors = paper[7]
                else:
                    temp_flag = 0

                p.abstract = paper[2]
                p.authors_str = paper[1]
                p.publication_status = paper[11]
                p.publication_type = paper[12]
                p.biblio_str = paper[13]
                p.abstractemail_authors = paper[16]

            if temp_flag:
                if (paper[14]):
                    p.demo_link = paper[13]
                if (paper[3]):
                    p.pdf_link = paper[3]
                if (paper[9]):
                    p.code_link = paper[9]
                if (paper[8]):
                    p.data_link = paper[8]
                if (paper[15]):
                    p.web_link = paper[15]

            temp_flag = 1
            if 'p' in locals():
                try:
                    p.put()
                except:
                    temp_flag = 0
                    #logging.error('Error adding the paper')
                    stacktrace = traceback.format_exc()

                    logging.error("%s", stacktrace)
                    addition_error = str(stacktrace)
                    paper_sucess_flag = ''

                    temp = re.finditer(
                        'BadValueError: Property(.*)is required',
                        str(stacktrace))
                    if temp:
                        for item in temp:
                            addition_error = addition_error[item.start():item.
                                                            end()]
                            addition_error = addition_error[
                                23:len(addition_error)]

                if temp_flag:
                    paper_sucess_flag = p.key()

            #--------------------Index the Paper for Future Search Queries----------------------
            if paper_sucess_flag:
                index = search.Index(name='PAPERS_INDEXES',
                                     namespace='PAPER_INDICES_NAMESPACE')
                key_val = p.key()

                key_val = str(key_val).replace('-', '_')
                fields = [
                    search.TextField(name='abstract', value=paper[2]),
                    search.TextField(name='doc_id', value=key_val),
                    #search.DateField(name='publication_date',value=datetime.datetime.now().date()),
                    search.TextField(name='title', value=paper[0]),
                    search.TextField(name='authors', value=paper[1]),
                    search.TextField(name='keywords',
                                     value=paper_keywords_str),
                    search.AtomField(name='pub_type', value=paper[11]),
                    search.TextField(name='publisher', value=paper[5]),
                    search.AtomField(name='pub_status', value=paper[10]),
                    search.NumberField(name='pub_year', value=int(paper[4])),
                    search.TextField(name='dockey', value=str(p.key()))
                ]

                d = search.Document(doc_id=key_val, fields=fields)
                try:
                    add_result = search.Index(name='PAPERS_INDEXES').put(d)
                    #self.response.out.write('salam')
                except search.Error:
                    self.response.out.write(
                        "Sorry we weren't able to add this!")
            #-----------------------------------------------------------------------------------

            #-------------------Check if the USerID Matches any of the Authors------------------
            if match_author_id:
                if adding_user:
                    authors_links = []
                    for item in p.authors:
                        authorID = item.replace(" ", "")
                        authorID = authorID.replace(",", "")
                        authors_links.append(authorID)

                    if adding_user.author_id not in authors_links:
                        max_sim = 0
                        itr = 0

                        for item in authors_links:
                            if (SequenceMatcher(
                                    None, adding_user.author_id.lower(),
                                    item.lower()).ratio() > max_sim):
                                ind = itr
                                max_sim = SequenceMatcher(
                                    None, adding_user.author_id.lower(),
                                    item.lower()).ratio()
                                #self.response.out.write(str(max_sim))
                            itr = itr + 1
                        if max_sim > 0.85:
                            adding_user.author_id = authors_links[ind]
                            adding_user.put()

            #-----------------------------------------------------------------------------------
            #-----------------Add or Update the Author to the Authors Database------------------
            if paper_sucess_flag:
                authors_list = paper[7]
                for author in authors_list:
                    authorID = author.replace(" ", "")
                    authorID = authorID.replace(",", "")
                    user = db.GqlQuery(
                        "SELECT * FROM Authors_DB WHERE author_id = '%s'" %
                        authorID)
                    user = user.get()
                    if user:
                        #............................Update the Author..............................
                        keys = user.paper_keys
                        if (str(p.key()) not in keys):
                            keys.append(str(p.key()))
                            user.paper_keys = keys

                            titles = user.paper_titles
                            titles.append(p.title)
                            user.paper_titles = titles

                            dates = user.paper_dates
                            dates.append(str(p.publication_year))
                            user.paper_dates = dates

                            authors_str = user.other_authors
                            authors_str.append(p.authors_str)
                            user.other_authors = authors_str

                            user.put()
                            #...........................................................................
                    else:
                        #.....................Add the Author to the Database........................
                        ind = author.find(",")
                        first_name = author[0:ind]
                        last_name = author[ind + 2:len(author)]
                        u = Authors_DB(author_id=authorID,
                                       firstname=first_name,
                                       lastname=last_name,
                                       email_add='',
                                       paper_keys=list([str(p.key())]),
                                       paper_titles=list([p.title]),
                                       paper_dates=list(
                                           [str(p.publication_year)]),
                                       other_authors=list([p.authors_str]))
                        u.put()
                        #...........................................................................

                #-----------------------------------------------------------------------------------

            paper_added_flag = 1

            sucess_flags_list.append(paper_sucess_flag)
            paper_addition_errors.append(addition_error)

        params_html['added_flag'] = paper_added_flag
        params_html['added_paper_flag'] = sucess_flags_list
        params_html['paper_list'] = zip(papers_list, sucess_flags_list,
                                        paper_addition_errors)

        self.response.out.write(
            template.render('./html/display_newpapers.html', params_html))
コード例 #26
0
 def get_update_document(self, fields):
     if not self.document_id:
         # must be a new entry
         return search.Document(fields=fields)
     return search.Document(fields=fields, doc_id=self.document_id)
コード例 #27
0
ファイル: task_scheduler.py プロジェクト: eakuefner/luci-py
def schedule_request(request):
  """Creates and stores all the entities to schedule a new task request.

  The number of entities created is 3: TaskRequest, TaskResultSummary and
  TaskToRun.

  The TaskRequest is saved first as a DB transaction, then TaskResultSummary and
  TaskToRun are saved as a single DB RPC. The Search index is also updated
  in-between.

  Arguments:
  - request: is in the TaskRequest entity saved in the DB.

  Returns:
    TaskResultSummary. TaskToRun is not returned.
  """
  dupe_future = None
  if request.properties.idempotent:
    # Find a previously run task that is also idempotent and completed. Start a
    # query to fetch items that can be used to dedupe the task. See the comment
    # for this property for more details.
    #
    # Do not use "cls.created_ts > oldest" here because this would require a
    # composite index. It's unnecessary because TaskRequest.key is mostly
    # equivalent to decreasing TaskRequest.created_ts, ordering by key works as
    # well and doesn't require a composite index.
    cls = task_result.TaskResultSummary
    h = request.properties.properties_hash
    dupe_future = cls.query(cls.properties_hash==h).order(cls.key).get_async()

  # At this point, the request is now in the DB but not yet in a mode where it
  # can be triggered or visible. Index it right away so it is searchable. If any
  # of remaining calls in this function fail, the TaskRequest and Search
  # Document will simply point to an incomplete task, which will be ignored.
  #
  # Creates the entities TaskToRun and TaskResultSummary but do not save them
  # yet. TaskRunResult will be created once a bot starts it.
  task = task_to_run.new_task_to_run(request)
  result_summary = task_result.new_result_summary(request)

  # Do not specify a doc_id, as they are guaranteed to be monotonically
  # increasing and searches are done in reverse order, which fits exactly the
  # created_ts ordering. This is useful because DateField is precise to the date
  # (!) and NumberField is signed 32 bits so the best it could do with EPOCH is
  # second resolution up to year 2038.
  index = search.Index(name='requests')
  packed = task_pack.pack_result_summary_key(result_summary.key)
  doc = search.Document(
      fields=[
        search.TextField(name='name', value=request.name),
        search.AtomField(name='id', value=packed),
      ])
  # Even if it fails here, we're still fine, as the task is not "alive" yet.
  search_future = index.put_async([doc])

  now = utils.utcnow()

  if dupe_future:
    # Reuse the results!
    dupe_summary = dupe_future.get_result()
    # Refuse tasks older than X days. This is due to the isolate server dropping
    # files. https://code.google.com/p/swarming/issues/detail?id=197
    oldest = now - datetime.timedelta(
        seconds=config.settings().reusable_task_age_secs)
    if dupe_summary and dupe_summary.created_ts > oldest:
      # If there's a bug, commenting out this block is sufficient to disable the
      # functionality.
      # Setting task.queue_number to None removes it from the scheduling.
      task.queue_number = None
      _copy_entity(dupe_summary, result_summary, ('created_ts', 'name', 'user'))
      result_summary.properties_hash = None
      result_summary.try_number = 0
      result_summary.cost_saved_usd = result_summary.cost_usd
      # Only zap after.
      result_summary.costs_usd = []
      result_summary.deduped_from = task_pack.pack_run_result_key(
          dupe_summary.run_result_key)

  # Get parent task details if applicable.
  parent_task_keys = None
  if request.parent_task_id:
    parent_run_key = task_pack.unpack_run_result_key(request.parent_task_id)
    parent_task_keys = [
      parent_run_key,
      task_pack.run_result_key_to_result_summary_key(parent_run_key),
    ]

  result_summary.modified_ts = now

  # Storing these entities makes this task live. It is important at this point
  # that the HTTP handler returns as fast as possible, otherwise the task will
  # be run but the client will not know about it.
  def run():
    ndb.put_multi([result_summary, task])

  def run_parent():
    # This one is slower.
    items = ndb.get_multi(parent_task_keys)
    k = result_summary.task_id
    for item in items:
      item.children_task_ids.append(k)
      item.modified_ts = now
    ndb.put_multi(items)

  # Raising will abort to the caller.
  futures = [datastore_utils.transaction_async(run)]
  if parent_task_keys:
    futures.append(datastore_utils.transaction_async(run_parent))

  try:
    search_future.get_result()
  except search.Error:
    # Do not abort the task, for now search is best effort.
    logging.exception('Put failed')

  for future in futures:
    # Check for failures, it would raise in this case, aborting the call.
    future.get_result()

  stats.add_task_entry(
      'task_enqueued', result_summary.key,
      dimensions=request.properties.dimensions,
      user=request.user)
  return result_summary
コード例 #28
0
    def update_search_index(self):
        """
        Updates the address search index with the values of this address.
        """

        # Gather information for the index
        fields = []

        # Append default fields
        for field_def in [
                Bunch(val=self.ct,
                      name=u"creation_timestamp",
                      ftype=search.DateField,
                      repluml=False,
                      char1=False),
                Bunch(val=self.cu,
                      name=u"creation_user",
                      ftype=search.TextField,
                      repluml=False,
                      char1=False),
                Bunch(val=self.et,
                      name=u"edit_timestamp",
                      ftype=search.DateField,
                      repluml=False,
                      char1=False),
                Bunch(val=self.eu,
                      name=u"edit_user",
                      ftype=search.TextField,
                      repluml=False,
                      char1=False),
                Bunch(val=self.kind,
                      name=u"kind",
                      ftype=search.AtomField,
                      repluml=False,
                      char1=False),
                Bunch(val=self.organization,
                      name=u"organization",
                      ftype=search.TextField,
                      repluml=True,
                      char1=False),
                Bunch(val=self.position,
                      name=u"position",
                      ftype=search.TextField,
                      repluml=True,
                      char1=True),
                Bunch(val=self.salutation,
                      name=u"salutation",
                      ftype=search.TextField,
                      repluml=True,
                      char1=True),
                Bunch(val=self.first_name,
                      name=u"first_name",
                      ftype=search.TextField,
                      repluml=True,
                      char1=True),
                Bunch(val=self.last_name,
                      name=u"last_name",
                      ftype=search.TextField,
                      repluml=True,
                      char1=True),
                Bunch(val=self.nickname,
                      name=u"nickname",
                      ftype=search.TextField,
                      repluml=True,
                      char1=True),
                Bunch(val=self.street,
                      name=u"street",
                      ftype=search.TextField,
                      repluml=True,
                      char1=True),
                Bunch(val=self.postcode,
                      name=u"postcode",
                      ftype=search.TextField,
                      repluml=False,
                      char1=True),
                Bunch(val=self.city,
                      name=u"city",
                      ftype=search.TextField,
                      repluml=True,
                      char1=True),
                Bunch(val=self.district,
                      name=u"district",
                      ftype=search.TextField,
                      repluml=True,
                      char1=False),
                Bunch(val=self.land,
                      name=u"land",
                      ftype=search.TextField,
                      repluml=True,
                      char1=False),
                Bunch(val=self.country,
                      name=u"country",
                      ftype=search.TextField,
                      repluml=True,
                      char1=False),
                Bunch(val=self.gender,
                      name=u"gender",
                      ftype=search.AtomField,
                      repluml=False,
                      char1=False),
                Bunch(val=self.category_items,
                      name=u"category",
                      ftype=search.AtomField,
                      repluml=False,
                      char1=False),
                Bunch(val=self.tag_items,
                      name=u"tag",
                      ftype=search.AtomField,
                      repluml=False,
                      char1=False),
                Bunch(val=self.business_items,
                      name=u"business",
                      ftype=search.AtomField,
                      repluml=False,
                      char1=False),
        ]:
            values = field_def.val

            if not isinstance(values, (list, tuple)):
                values = [values]

            for value in values:
                if value is not None:
                    # Append default value
                    fields.append(
                        field_def.ftype(name=field_def.name, value=value))
                    # Append value without umlauts
                    if field_def.repluml and common.format_.has_umlauts(value):
                        fields.append(
                            field_def.ftype(
                                name=field_def.name,
                                value=common.format_.replace_umlauts(value)))
                    # Append first character
                    if field_def.char1 and len(value) > 0:
                        fields.append(
                            search.AtomField(name=field_def.name + u"_char1",
                                             value=value[0].lower()))

        # Fields with its own model
        for phone_item in self.phone_items:
            if phone_item.number is not None:
                fields.append(
                    search.TextField(name=u"phone", value=phone_item.number))

        for email_item in self.email_items:
            if email_item.email is not None:
                fields.append(
                    search.TextField(name=u"email", value=email_item.email))

        for url_item in self.url_items:
            if url_item.url is not None:
                fields.append(search.TextField(name=u"url",
                                               value=url_item.url))

        for free_defined_item in self.free_defined_items:

            name = common.format_.safe_ascii(
                free_defined_item.label.lower().replace(" ",
                                                        "_").replace("-", "_"))
            # This fields will not indexed
            if name in cherrypy.config[
                    "search_index.address.free_defined_fields.exceptions"]:
                continue

            value = free_defined_item.text

            if value is not None and value:

                if free_defined_item.value_type == u"unicode":
                    fields.append(search.TextField(name=name, value=value))
                    if common.format_.has_umlauts(free_defined_item.text):
                        fields.append(
                            search.TextField(
                                name=name,
                                value=common.format_.replace_umlauts(value),
                            ))
                elif free_defined_item.value_type == u"int":
                    fields.append(
                        search.NumberField(name=name, value=int(value)))
                elif free_defined_item.value_type == u"float":
                    fields.append(
                        search.NumberField(name=name, value=float(value)))
                elif free_defined_item.value_type == u"date":
                    fields.append(
                        search.DateField(
                            name=name,
                            value=common.format_.string_to_date(value)))

        for anniversary_item in self.anniversary_items:

            name = common.format_.safe_ascii(
                anniversary_item.label.lower().replace(" ",
                                                       "_").replace("-", "_"))
            value = u""

            if anniversary_item.year and anniversary_item.month and anniversary_item.day:
                value = datetime.date(int(anniversary_item.year),
                                      int(anniversary_item.month),
                                      int(anniversary_item.day))
                fields.append(search.DateField(name=name, value=value))
            else:
                if anniversary_item.year:
                    value += unicode(anniversary_item.year) + "-"
                if anniversary_item.month:
                    value += unicode(anniversary_item.month).rjust(2,
                                                                   "0") + "-"
                if anniversary_item.day:
                    value += unicode(anniversary_item.day).rjust(2, "0")
                value = value.rstrip("-")

                fields.append(search.TextField(name=name, value=value))

        # for note_item in self.note_items:
        #     if common.format_.has_umlauts(note_item.text):
        #         fields.append(search.TextField(
        #             name = u"note", value = common.format_.replace_umlauts(note_item.text)
        #         ))
        #     assert isinstance(note_item, NoteItem)
        #     fields.append(search.TextField(name = u"note", value = note_item.text))

        # for journal_item in self.journal_items:
        #     if common.format_.has_umlauts(journal_item.text):
        #         fields.append(search.TextField(
        #             name = u"journal", value = common.format_.replace_umlauts(journal_item.text)
        #         ))
        #     assert isinstance(journal_item, JournalItem)
        #     fields.append(search.TextField(name = u"journal", value = journal_item.text))

        # for agreement_item in self.agreement_items:
        #     if common.format_.has_umlauts(agreement_item.text):
        #         fields.append(search.TextField(
        #             name = u"agreement", value = common.format_.replace_umlauts(agreement_item.text)
        #         ))
        #     assert isinstance(agreement_item, AgreementItem)
        #     fields.append(search.TextField(name = u"agreement", value = agreement_item.text))

        # Document
        document = search.Document(doc_id=self.key.urlsafe(),
                                   fields=fields,
                                   language=cherrypy.config["LANGUAGE"])

        # Add/update index
        index = search.Index(name="Address")
        index.put(document)
コード例 #29
0
    def post(self):

        json_string = self.request.body
        dict_object = json.loads(json_string)

        stream_name = dict_object['name']
        stream_cover_url = dict_object['cover_url']
        tags = dict_object['tags']
        emails = dict_object['emails']
        email_message = dict_object['email_message']
        owner = dict_object['owner']

        same_name = Stream.query(Stream.name == stream_name).fetch()

        if not same_name:

            #NDB storing
            stream = Stream(name=stream_name,
                            cover_url=stream_cover_url,
                            tags=tags,
                            photos=[],
                            owner=owner,
                            views=0,
                            views_list=[])
            stream_key = stream.put()
            stream_id = str(stream_key.id())

            #Search indexing
            latitude = random.uniform(90, -90)
            longitude = random.uniform(180, -180)
            geopoint = search.GeoPoint(latitude, longitude)
            search_tags = ' '.join(tags)
            tokenized_name = self.tokenize_autocomplete(stream_name)
            tokenized_tags = self.tokenize_autocomplete(search_tags)
            tokenized_name = ','.join(tokenized_name)
            tokenized_tags = ','.join(tokenized_tags)
            search_index = search.Document(
                doc_id=stream_id,
                fields=[
                    search.TextField(name='name', value=stream_name),
                    search.TextField(name='tokenized_name',
                                     value=tokenized_name),
                    search.TextField(name='cover_url', value=stream_cover_url),
                    search.TextField(name='tokenized_tags',
                                     value=tokenized_tags),
                    search.TextField(name='tags', value=search_tags),
                    search.TextField(name="stream_id", value=stream_id),
                    search.DateField(name='date',
                                     value=datetime.datetime.now()),
                    search.GeoField(name='stream_location', value=geopoint)
                ])
            result = search.Index(name='stream').put(search_index)

            stream_url = '/view?id={0}'.format(stream_key.id())
            res = {
                "msg": "Stream Created",
                "success": True,
                "stream_url": stream_url
            }
            self.response.out.write(json.dumps(res))
            # self.response.location = '/view?id={0}'.format(stream_key.id())
        else:
            res = {
                "msg":
                "You tried to create a stream whose name is the same as an existing stream. The operation did not complete.",
                "success": False
            }
            self.response.out.write(json.dumps(res))
コード例 #30
0
def update_index(supplier):
    name = ','.join(util.tokenize_autocomplete(supplier.name))
    document = search_api.Document(
        doc_id=str(supplier.key.id()),
        fields=[search_api.TextField(name='name', value=name)])
    get_autocomplete_index().put(document)