def cancelJob(webin, pp): namep = pp.name owner = namep[0] idx = namep[1] topic = "/job/" + owner + "/" + idx theJob = models.getJob(topic, webin.pageStore) vprint("CANCEL ", "[" + topic + "]", theJob, theJob.kind, theJob.status) if theJob == None: vprint("CANCELED could not find ", topic, " to cancel") return failResponse("noSuchJob") st = theJob.status theJob.status = "canceled" saveJob(theJob) if st == "done": whenCanceled(theJob) # there is the bare chance of a race condition: the job is just finishing when the canceled notification is written # so check for this time.sleep(0.1) jb = models.getJob(topic, webin.pageStore) if jb.status == "done": print "CANCEL very unusual: race condition for ", topic whenCanceled(jb) #if (k=="retrieve") or (k == "upload"): # whenCanceled(theJob) #whenCanceled(theJob) theJob.status = "canceled" saveJob(theJob) return okResponse()
def emitJob(webin, pp): namep = pp.name owner = namep[0] idx = namep[1] theJob = models.getJob("/job/" + owner + "/" + idx, webin.pageStore) if getattr(theJob, "pageStore", None): del theJob.pageStore if not theJob: return failResponse("noSuchJob") return okResponse(theJob.__dict__)
def checkJobCanceled(job): if testMode: return False tp = job.topic sjob = models.getJob(tp, job.pageStore) rs = sjob.status == 'canceled' vprint("CANCELED checked", tp, rs) if rs: job.status = "canceled" whenCanceled(job) return rs
def runJob(webin): cks = webin.checkSessionResponse() if cks: return cks session = webin.session user = session.user.split("/")[-1] qs = webin.queryString qsp = urlparse.parse_qs(qs) jobId = qsp["id"][0] vprint("runJob", jobId) #jobId = qs.split("=")[1] jobTopic = "/job/" + user + "/" + jobId theJob = models.getJob(jobTopic, webin.pageStore) theJob.topic = jobTopic k = theJob.kind vprint("RUNNING JOB ", jobId, "KIND ", k) if theJob.status == "canceled": vprint("JOB CANCELED BEFORE IT STARTED") return okResponse("canceled") if k == "upload": rs = upload(theJob, webin) if k == "retrieve": rs = retrieveImage(theJob) """ the rest of these cases no longer occur, due to the separate importer """ if k == "add_image_to_db": rs = addImageToDb(theJob) #if k=="build_tiling" or k=="resize_image" or k=="to_s3": # rs = None elif k == "build_tiling": rs = buildTilingInThread(theJob) elif k == "resize_image": rs = resizeImageInThread(theJob) elif k == "to_s3": rs = toS3InThread(theJob) #jobs.closeStore(webin.pageStore) if type(rs) == str: vprint("FAIL FROM RUN JOB", rs) return failResponse(rs) del jobsByTopic[jobTopic] return okResponse()
models.deleteImage("/image/cg/P1000515") models.deleteImage("/image/cg/P1000515l") models.deleteImage("/image/cg/P1000515k") models.deleteImage("/image/cg/P1000515m") jb = models.Job() jb.total = 1001 jb.kind = "compute_tiling" jb.owner = "cg" jb.subject = "/image/cg/the_test" buildTiling(jb) jb = models.getJob("/job/cg/4") jb.status = "started" jb.so_far = 45 jb.save() jb.save() models.addImageToDb("cg","sthellens") models.loadImageD("/image/cg/sthellens") dyn.getDict("Album","/album/ccc/bathing_1/8") ims = models.allImages()
def grabBuffer(job): vprint("GRABBuffer") global bufferCount, lastBuf, boundary, outFile, outlength, outPath bufferCount = bufferCount + 1 Logr.log("upload", " grabbing buffer " + str(bufferCount)) vprint("GRAB START READ") buf = instream.read(buffsize) # blocking may take time vprint("GRAB END READ") #Logr.log("upload","grabbed buffer ["+buf+"]") #ln = len(buf) last2bufs = lastBuf + buf #Logr.log("upload","\n\n\n SEARCH FOR ["+boundary+"] \n ["+last2bufs+"]") bndp = last2bufs.find(boundary) if bndp >= 0: vprint("GRABBED LAST BUFFER") if checkJobCanceled(job): vprint("UPLOAD CANCELED") return lastbuf = last2bufs[0:bndp - 2] #Logr.log("upload","LASTBUF ["+lastbuf+"]") outFile.write(lastbuf) job.status = "done" saveJob(job) outFile.close() sb = job.subject sbo = json.loads(sb) imname = sbo["image_name"] try: im = addImageToDb(job) except Exception as ex: msg, szx, szy = ex.args area = szx * szy areast = misc.bytesstring(area, pixels=1) errmsg = "Apologies: ImageDiver currently supports images up to 0.5 gigapixels, and this has size " + areast + ". Import canceled." #errmsg = json.dumps({"error":"tooBig","size":int(szx * szy)}) vprint("TOO BIG", errmsg, szx, szy) job.error = errmsg job.status = "error" saveJob(job) return False sbs = job.subject #print "SBS",sbs sb = json.loads(sbs) imd = im.dimensions sb["dimensions"] = imd area = imd["x"] * imd["y"] sbs = json.dumps(sb) job.subject = sbs #image.addImageToDb(job.owner,job.subject) saveJob(job) if job.total > backendThreshold: allocateImportJob(job.owner, imname, pageStore=job.pageStore) return False if len(buf) < buffsize: Logr.log("upload", "internal error: no end boundary found in upload") vprint("GRAB exception") raise Exception("internal error: no end boundary found in upload") if checkJobCanceled(job): vprint("UPLOAD CANCELED") return Logr.log("update", "BUF [" + lastBuf + "]") vprint("GRAB about to write") outFile.write(lastBuf) outlength = outlength + len(lastBuf) job.so_far = outlength saveJob(job) job = models.getJob(job.topic) cr = job.status == "canceled" if cr: vprint("GRAB canceled") Logr.log("upload", "canceling upload") outFile.close() os.remove(outPath) return False Logr.log("upload", " uploaded " + str(outlength)) lastBuf = buf vprint("GRABBUF DONE") return True