def check_usbmon(self):
     try:
         job.run('grep usbmon /proc/modules')
     except job.Error:
         print('Kernel module not loaded, attempting to load usbmon')
         result = job.run('modprobe usbmon', ignore_status=True)
         if result.exit_status != 0:
             print result.stderr
Esempio n. 2
0
def run_local():
    job_args = []

    job_args.append("--runner=DirectRunner")
    job_args.append("--job_name=" + _config("job_name"))

    import job
    job.run(config, job_args, argv, True)
Esempio n. 3
0
def run_gcp():
    gcp_path = _config("gcp_bucket")
    
    job_args = []
    
    job_args.append("--runner=DataflowRunner")
    job_args.append("--project=" + _config("gcp_project"))
    job_args.append("--job_name=" + _config("job_name"))
    job_args.append("--region=" + _config("gcp_region"))
    job_args.append("--temp_location=" + "{}/temp".format(gcp_path))
    job_args.append("--staging_location=" + "{}/staging".format(gcp_path))
    job_args.append("--setup_file=./setup.py")

    import job
    job.run(config, job_args, argv, True)
Esempio n. 4
0
    def update(self,station,dt):
        Actor.update(self,station,dt)
                
        _eff = station.satisfy_reaction(self.respiration,dt)                       

        #amenity calculations
        #habitation
        #self.amenity_prefs['Habitation']
        #food
        #drinks
        #amenities
        
        job_time = dt                
        
        if _eff < 0.4:
            pass #TODO SUFFOCATING!  Bad things happen
        _eff = station.satisfy_reaction(self.hydration,dt)
        if _eff < 0.5:            
            pass #TODO dehydrated!  Slightly less bad things happen                        
        _eff = station.satisfy_reaction(self.excretion,dt)

        #ingestion
        #try eating fancy food first, when it's implemented
        gimme_eat = {'Name':'Ingestion', 'Inputs':{'Meals':0.62}, 'Outputs':{} }
        _eff = station.satisfy_reaction(gimme_eat,dt)
        if _eff < 1.0:
            #try making more food
            job_time -= dt*(1 - _eff)/8 / self.skill['CookJob']
            if station.get_item('CookJob') < 1.0: station.add_item('CookJob',1.0)
            job.run('CookJob', self, station, dt*(1 - _eff)/8 / self.skill['CookJob'] ) #1 cook feeds 16 people, kinda arbitrary, isn't it?
            gimme_eat = {'Name':'Ingestion', 'Inputs':{'Meals':0.62*(1 - _eff)}, 'Outputs':{} }
            _eff2 = station.satisfy_reaction(gimme_eat,dt)
            _eff = _eff + _eff2*(1-_eff)
        if _eff < 0.5:            
            print 'Malnourished!',_eff #TODO malnourished!
        


        momentary_happiness = 1.0
        for c in self.comfort_needs:
            momentary_happiness *= max(0.5,station.satisfy_reaction(c,dt))
        
        cur_job = job.get_job_from_priority_skillset_prefs(station,self.skill,self.pref)
        job.run(cur_job, self, station, job_time ) 
Esempio n. 5
0
def run(pipeline_config):
    '''Run the pipeline specified by the PipelineConfig.'''
    print "Starting pipeline '%s'" % pipeline_config.name

    overwrite = pipeline_config.overwrite
    ran_job = False
    with utils.WorkingDir(pipeline_config.working_dir):
        for job_config in pipeline_config.jobs:
            if ran_job and not overwrite:
                print "A config change was detected, running all remaining jobs"
                overwrite = True

            ran_job = job.run(job_config, overwrite=overwrite)

    print "Pipeline '%s' complete" % pipeline_config.name
Esempio n. 6
0
    def _build(self, job):
        """
        Build a job.
        """
        self.running_jobs[job.id] = job

        job.set_status('running')
        self._write_job_status(job)
        logging.info("{}: starting".format(job))

        for k, v in job.env.items():
            logging.debug('{}: env: {}={}'.format(job, k, v))
        logging.debug("{}: executing.".format(job))

        result = job.run()
        if result == -1:
            # Build was aborted. Remove the project specific job status, but
            # keep the _all job status so we can inspect the output if
            # required.
            job.set_status('aborted')
            logging.info("{}: result: intentionally aborted. Exit code = {}".format(job, job.exit_code))

            self._write_job_status(job, aborted=True)
        else:
            job.set_status('done')
            if job.exit_code == 0:
                logging.info("{}: result: success. Exit code = {}".format(job, job.exit_code))
            else:
                logging.warn("{}: result: failed. Exit code = {}".format(job, job.exit_code))

            self._write_job_status(job)

            prev_job = self.get_job_status(job.prev_id)
            if (prev_job is not None and job.exit_code != prev_job.exit_code):
                # Job result has changed since last time. Call the `job_changed_handler`.
                if self.job_changed_handler is not None:
                    logging.debug("{}: status changed. Calling the 'job changed' handler".format(job.id[:8]))
                    self.job_changed_handler(job, prev_job)
                else:
                    logging.debug("{}: status changed, but no 'job changed' handler defined.".format(job.id[:8]))

        del self.running_jobs[job.id]
Esempio n. 7
0
from job import run
import logging
import config

logging.basicConfig(level=logging.INFO)

if __name__ == "__main__":
    run(config)
Esempio n. 8
0
def job_pubsub(event, context):
    job.run(event, context)