예제 #1
0
 def get(self, datasetID):
     dataset = Dataset.get_by_id(long(datasetID))   
     crawl = Crawl(dataset=dataset, status='QUEUED')
     crawl.put()
     
     ''' Queue the crawl immediately '''
     crawl.queue(5)      
     return webapp2.redirect('/datasets/' + datasetID)
예제 #2
0
 def get(self, datasetID):
     dataset = Dataset.get_by_id(long(datasetID))
     for crawl in Crawl.all().filter('dataset =', dataset).run():
         crawl.delete()
         
     for dump in Dumpfile.all().filter('dataset =', dataset).run():
         dump.delete()
     
     dataset.delete()
     logging.info('Deleted dataset ' + datasetID)
     return webapp2.redirect('/datasets')
예제 #3
0
 def get(self, datasetID):
     startIn = self.request.get('start').split(':')
     if len(startIn) == 2:
         logging.info('Queuing harvest in ' + startIn[0] + ' hours ' + startIn[1] + ' minutes')
         seconds = int(startIn[0]) * 3600 + int(startIn[1]) * 60
         dataset = Dataset.get_by_id(long(datasetID))
         
         ''' TODO store 'interval' param in dataset object (if any) '''                    
         
         crawl = Crawl(dataset=dataset, status='QUEUED')
         crawl.put()
         crawl.queue(seconds)
         return webapp2.redirect('/datasets/' + datasetID)        
     else:
         ''' TODO decent error handling '''
         logging.info('Invalid crawl time: ' + self.request.get('start'))
         return webapp2.redirect('/datasets/' + datasetID + '?error=true')
예제 #4
0
 def get(self, datasetID):
     dataset = Dataset.get_by_id(long(datasetID))
     self.response.write(json.dumps(dataset.toJSON()))
예제 #5
0
 def get(self, datasetID):
     dataset = Dataset.get_by_id(long(datasetID))
     self.render_response('datasets/datasets_show.html', **{'dataset':dataset})