def associated(self): """search for and return any jobs associated with this bucket""" jobs = [] # Todo: fix search for jobs from job import Job for job in Job.all(): if job.db.bucket == self: jobs.append(job) return len(jobs)
def put(self): logging.info('put single job received') data_string = self.request.body logging.info(data_string) decoded = json.loads(data_string) if decoded.has_key('jobs'): count_jobs = len(decoded['jobs']) if count_jobs > 1: logging.info("more than 1 job, abort") self.error(204) return jobs = [] for job in decoded['jobs']: temp = Job(key_name=str(job['jobId']), parent=currentIteration) temp.set(job) # Lookup Job in DB and see if already running # if not running overwrite and send 200 else 500 q = Job.all() q.filter("jobId =", temp.jobId) result = q.get() if result is not None: # if result.vmIp != self.request.remote_addr: # logging.info('job already running from other vm, abort') # self.error(500) # return if result.finished: continue #skip job if result.iteration != temp.iteration: continue #skip job temp.vmIp = self.request.remote_addr jobs.append(temp) else: self.error(204) for job in jobs: job.put() logging.info('put job['+str(job.jobId)+'] into datastore') if job.finished: memcache.delete(GetAllJobs.cachekey) logging.info('memcache deleted!!!!')