Пример #1
0
 def post(self):
     self.response.headers['Content-Type'] = 'application/json'
       
     req_type = self.request.get('req_type')
     job_type = self.request.get('job_type')
     id = self.request.get('id')
     instance_type = self.request.get('instance_type')
     
     if req_type == 'analyze':
         logging.info('Analyzing the cost...')
         #job_type = params['job_type']
         
         logging.info('rerun cost analysis in '+instance_type)
         credentials =  self.user_data.getCredentials()
         access_key = credentials['EC2_ACCESS_KEY']
         secret_key = credentials['EC2_SECRET_KEY']
         backend_services = backendservice.backendservices(self.user_data)
         
         if not self.user_data.valid_credentials or not backend_services.isOneOrMoreComputeNodesRunning(instance_type):
             logging.info('You must have at least one active '+instance_type+' compute node to run in the cloud.')
             self.response.write(json.dumps({
                 'status': False,
                 'msg': 'You must have at least one active '+instance_type+' compute node to run in the cloud.'
             }))
             return
         
         result = {}
         try:        
                 uuid, _ = self.get_uuid_name(id, job_type)
                 logging.info('start to rerun the job {0} for cost analysis'.format(str(uuid)))
                 # Set up CloudTracker with user credentials and specified UUID to rerun the job
                 ct = CloudTracker(access_key, secret_key, str(uuid), self.user_data.getBucketName())
                 has_prov = not ct.if_tracking() 
                 # If there is no provenance data for this job, report an error to the user
                 if not has_prov:
                     result = {'status':"fail",'msg':"The job with this ID does not exist or cannot be reproduced."}    
                     self.response.write(json.dumps(result))
                     return
             
                 params = ct.get_input()
                 
                 params['cost_analysis_uuid'] = uuid
                 
                 cloud_result = backend_services.submit_cloud_task(params, agent_type = AgentTypes.EC2, instance_type = instance_type, cost_replay = True)
                 
                 if not cloud_result["success"]:
                     e = cloud_result["exception"]
                     result = {
                               'status': False,
                               'msg': 'Cloud execution failed: '+str(e)
                              }
                     return result 
                 
                 result = {'status':True,'msg':'Cost analysis submitted successfully.'}
         
         
         except Exception,e:
                 result = {'status':False,'msg':'Cloud execution failed: '+str(e)}
Пример #2
0
    def post(self):
        self.response.content_type = 'application/json'
        req_type = self.request.get('req_type')
        
        credentials = self.user_data.getCredentials()
        os.environ["AWS_ACCESS_KEY_ID"] = credentials['EC2_ACCESS_KEY']
        os.environ["AWS_SECRET_ACCESS_KEY"] = credentials['EC2_SECRET_KEY']
        access_key = credentials['EC2_ACCESS_KEY']
        secret_key = credentials['EC2_SECRET_KEY']
        
        if req_type == 'delOutput':
            uuid = self.request.get('uuid')
            logging.debug('delOutput: uuid={0}'.format(uuid))
            
            
            try:
                job_type = self.request.get('job_type')
                
                if job_type == 'stochkit':
                    job = db.GqlQuery("SELECT * FROM StochKitJobWrapper WHERE user_id = :1 AND cloudDatabaseID = :2", self.user.user_id(),uuid).get()       
                    job.output_stored = 'False'
                    job.put()
                elif job_type == 'sensitivity':
                    job = sensitivity.SensitivityJobWrapper.all().filter('user_id =', self.user.user_id()).filter('cloudDatabaseID =', uuid).get()
                    job.output_stored = 'False'
                    job.outData = None
                    job.put()
                elif job_type == 'spatial':
                    job = spatial.SpatialJobWrapper.all().filter('user_id =', self.user.user_id()).filter('cloudDatabaseID =', uuid).get()  
                    job.output_stored = 'False'
                    job.outData = None
                    job.put()
                
                service = backendservices(self.user_data)
                service.deleteTaskOutput(job)

                # delete the local output if any
                output_path = os.path.join(os.path.dirname(__file__), '../output/')
                if os.path.exists(str(output_path)+uuid):
                    shutil.rmtree(str(output_path)+uuid)
                    
                result = {'status':True,'msg':'Output deleted successfully.'}
            except Exception as e:
                logging.error(e)
                result = {'status':False,'msg':'Fail to delete output in the cloud: '+str(e)}
            
            self.response.write(json.dumps(result))
            return
        
        elif req_type == 'rerun':
        
            service = backendservices(self.user_data)
        
            job_type = self.request.get('job_type')
            uuid = self.request.get('uuid')
            logging.debug('rerun: uuid={0}'.format(uuid))

            logging.info('job uuid: '.format(uuid))
            
            if not self.user_data.valid_credentials or not service.isOneOrMoreComputeNodesRunning():
                self.response.write(json.dumps({
                    'status': False,
                    'msg': 'There is no '+instance_type+' node running. *Launch one node? '
                }))
                return
        
        
            if job_type == 'stochkit':
              
                job = db.GqlQuery("SELECT * FROM StochKitJobWrapper WHERE user_id = :1 AND cloudDatabaseID = :2", self.user.user_id(), uuid).get()       
            
        
                try:
                    logging.info('start to rerun the job {0}'.format(str(uuid)))
                    # Set up CloudTracker with user credentials and specified UUID to rerun the job
                    ct = CloudTracker(access_key, secret_key, str(uuid), self.user_data.getBucketName())
                    has_prov = not ct.if_tracking() 
                    # If there is no provenance data for this job, report an error to the user
                    if not has_prov:
                        result = {'status':False,'msg':"The job with this ID does not exist or cannot be reproduced."}
                        self.response.content_type = 'application/json'    
                        self.response.write(json.dumps(result))
                        return
                
                    params = ct.get_input()
                    logging.info("OUT_PUT SIZE: {0}".format(params['output_size']))
                
                    time = datetime.datetime.now()
                    params['rerun_uuid'] = uuid
                    cloud_result = service.submit_cloud_task(params=params)
                    
                    if not cloud_result["success"]:
                        e = cloud_result["exception"]
                        result = {
                                  'status': False,
                                  'msg': 'Cloud execution failed: '+str(e)
                                 }
                        return result 
                    # The celery_pid is the Celery Task ID.
                    job.celeryPID = cloud_result["celery_pid"]
                    job.status = 'Running'
                    job.outData = None
                    job.output_stored = 'True'
            
                    job.startDate = time.strftime("%Y-%m-%d-%H-%M-%S")
            
                    job.put()
                    result = {'status':True,'msg':'Job rerun submitted successfully.'}
            
            
                except Exception,e:
                    result = {'status':False,'msg':'Cloud execution failed: '+str(e)}
            
                
                self.response.write(json.dumps(result))
                return
        
            elif job_type == 'sensitivity':
                job = sensitivity.SensitivityJobWrapper.all().filter('user_id =', self.user.user_id()).filter('cloudDatabaseID =', uuid).get()
            
                try:
                    ct = CloudTracker(access_key, secret_key, str(uuid), self.user_data.getBucketName())
                    has_prov = not ct.if_tracking() 
                    # If there is no provenance data for this job, report an error to the user
                    if not has_prov:
                        result = {'status':False,'msg':"The job with this ID does not exist or cannot be reproduced."}
                        self.response.content_type = 'application/json'    
                        self.response.write(json.dumps(result))
                        return
                
                    params = ct.get_input()
                
                    time = datetime.datetime.now()

                    # execute task in cloud
                    params['rerun_uuid'] = uuid
                    cloud_result = service.submit_cloud_task(params=params)
                    
                    if not cloud_result["success"]:
                        e = cloud_result["exception"]
                        result = {
                                  'status': False,
                                  'msg': 'Cloud execution failed: '+str(e)
                                 }
                        return result 
                    
                    # The celery_pid is the Celery Task ID.
                    job.status = "Running"    
                    job.celeryPID = cloud_result["celery_pid"]
                    job.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
                    job.output_stored = 'True'
                    job.put()
                    result = {'status':True,'msg':'Job rerun submitted successfully.'}
            
                except Exception,e:
                    result = {'status':False,'msg':'Cloud execution failed: '+str(e)}
            
                
                self.response.write(json.dumps(result))
                return  
Пример #3
0
    def post(self):
        self.response.content_type = 'application/json'
        req_type = self.request.get('req_type')

        credentials = self.user_data.getCredentials()
        os.environ["AWS_ACCESS_KEY_ID"] = credentials['EC2_ACCESS_KEY']
        os.environ["AWS_SECRET_ACCESS_KEY"] = credentials['EC2_SECRET_KEY']
        access_key = credentials['EC2_ACCESS_KEY']
        secret_key = credentials['EC2_SECRET_KEY']

        if req_type == 'delOutput':
            uuid = self.request.get('uuid')
            logging.debug('delOutput: uuid={0}'.format(uuid))

            try:
                job_type = self.request.get('job_type')

                if job_type == 'stochkit':
                    job = db.GqlQuery(
                        "SELECT * FROM StochKitJobWrapper WHERE user_id = :1 AND cloudDatabaseID = :2",
                        self.user.user_id(), uuid).get()
                    job.output_stored = 'False'
                    job.put()
                elif job_type == 'sensitivity':
                    job = sensitivity.SensitivityJobWrapper.all().filter(
                        'user_id =',
                        self.user.user_id()).filter('cloudDatabaseID =',
                                                    uuid).get()
                    job.output_stored = 'False'
                    job.outData = None
                    job.put()
                elif job_type == 'spatial':
                    job = spatial.SpatialJobWrapper.all().filter(
                        'user_id =',
                        self.user.user_id()).filter('cloudDatabaseID =',
                                                    uuid).get()
                    job.output_stored = 'False'
                    job.outData = None
                    job.put()

                service = backendservices(self.user_data)
                service.deleteTaskOutput(job)

                # delete the local output if any
                output_path = os.path.join(os.path.dirname(__file__),
                                           '../output/')
                if os.path.exists(str(output_path) + uuid):
                    shutil.rmtree(str(output_path) + uuid)

                result = {
                    'status': True,
                    'msg': 'Output deleted successfully.'
                }
            except Exception as e:
                logging.error(e)
                result = {
                    'status': False,
                    'msg': 'Fail to delete output in the cloud: ' + str(e)
                }

            self.response.write(json.dumps(result))
            return

        elif req_type == 'rerun':

            service = backendservices(self.user_data)

            job_type = self.request.get('job_type')
            uuid = self.request.get('uuid')
            logging.debug('rerun: uuid={0}'.format(uuid))

            logging.info('job uuid: '.format(uuid))

            if not self.user_data.valid_credentials or not service.isOneOrMoreComputeNodesRunning(
            ):
                self.response.write(
                    json.dumps({
                        'status':
                        False,
                        'msg':
                        'There is no ' + instance_type +
                        ' node running. *Launch one node? '
                    }))
                return

            if job_type == 'stochkit':

                job = db.GqlQuery(
                    "SELECT * FROM StochKitJobWrapper WHERE user_id = :1 AND cloudDatabaseID = :2",
                    self.user.user_id(), uuid).get()

                try:
                    logging.info('start to rerun the job {0}'.format(
                        str(uuid)))
                    # Set up CloudTracker with user credentials and specified UUID to rerun the job
                    ct = CloudTracker(access_key, secret_key, str(uuid),
                                      self.user_data.getBucketName())
                    has_prov = not ct.if_tracking()
                    # If there is no provenance data for this job, report an error to the user
                    if not has_prov:
                        result = {
                            'status':
                            False,
                            'msg':
                            "The job with this ID does not exist or cannot be reproduced."
                        }
                        self.response.content_type = 'application/json'
                        self.response.write(json.dumps(result))
                        return

                    params = ct.get_input()
                    logging.info("OUT_PUT SIZE: {0}".format(
                        params['output_size']))

                    time = datetime.datetime.now()
                    params['rerun_uuid'] = uuid
                    cloud_result = service.submit_cloud_task(params=params)

                    if not cloud_result["success"]:
                        e = cloud_result["exception"]
                        result = {
                            'status': False,
                            'msg': 'Cloud execution failed: ' + str(e)
                        }
                        return result
                    # The celery_pid is the Celery Task ID.
                    job.celeryPID = cloud_result["celery_pid"]
                    job.status = 'Running'
                    job.outData = None
                    job.output_stored = 'True'

                    job.startDate = time.strftime("%Y-%m-%d-%H-%M-%S")

                    job.put()
                    result = {
                        'status': True,
                        'msg': 'Job rerun submitted successfully.'
                    }

                except Exception, e:
                    result = {
                        'status': False,
                        'msg': 'Cloud execution failed: ' + str(e)
                    }

                self.response.write(json.dumps(result))
                return

            elif job_type == 'sensitivity':
                job = sensitivity.SensitivityJobWrapper.all().filter(
                    'user_id =',
                    self.user.user_id()).filter('cloudDatabaseID =',
                                                uuid).get()

                try:
                    ct = CloudTracker(access_key, secret_key, str(uuid),
                                      self.user_data.getBucketName())
                    has_prov = not ct.if_tracking()
                    # If there is no provenance data for this job, report an error to the user
                    if not has_prov:
                        result = {
                            'status':
                            False,
                            'msg':
                            "The job with this ID does not exist or cannot be reproduced."
                        }
                        self.response.content_type = 'application/json'
                        self.response.write(json.dumps(result))
                        return

                    params = ct.get_input()

                    time = datetime.datetime.now()

                    # execute task in cloud
                    params['rerun_uuid'] = uuid
                    cloud_result = service.submit_cloud_task(params=params)

                    if not cloud_result["success"]:
                        e = cloud_result["exception"]
                        result = {
                            'status': False,
                            'msg': 'Cloud execution failed: ' + str(e)
                        }
                        return result

                    # The celery_pid is the Celery Task ID.
                    job.status = "Running"
                    job.celeryPID = cloud_result["celery_pid"]
                    job.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
                    job.output_stored = 'True'
                    job.put()
                    result = {
                        'status': True,
                        'msg': 'Job rerun submitted successfully.'
                    }

                except Exception, e:
                    result = {
                        'status': False,
                        'msg': 'Cloud execution failed: ' + str(e)
                    }

                self.response.write(json.dumps(result))
                return