def setup_module(): '''Initual setup needed before tests can run.''' if not os.path.exists(OUTDIR): os.mkdir(OUTDIR) ds = report.create_local_datasource(OUTPUT_DS) report.set_datasource(ds) report.set_run_id(int(time.time())) # a time stamp
def run_check(p_number, testname, db_name, add_args, runid, use_local, schema, use_ref_data, lock): ''' Main checker rutine which should be defined for all processes. ''' logger = multiprocessing.log_to_stderr() test_func = qc.get_test(testname) #Set up some globals in various modules... per process. if runid is not None: report.set_run_id(runid) if use_local: # rather than sending args to scripts, which might not have implemented # handling that particular argument, set a global attr in report. report.set_use_local(True) elif schema is not None: report.set_schema(schema) #LOAD THE DATABASE con = sqlite.connect(db_name) if con is None: logger.error( "[qc_wrap]: Process: {0:d}, unable to fetch process db".format( p_number)) return cur = con.cursor() timestamp = (time.asctime().split()[-2]).replace(':', '_') logname = testname + '_' + timestamp + '_' + str(p_number) + '.log' logname = os.path.join(LOGDIR, logname) logfile = open(logname, 'w') stdout = osutils.redirect_stdout(logfile) stderr = osutils.redirect_stderr(logfile) filler = '*-*' * 23 print(filler) print( '[qc_wrap]: Running {test} routine at {time}, process: {proc}, run id: {rid}' .format(test=testname, time=time.asctime(), proc=p_number, rid=runid)) print(filler) done = 0 cur.execute('select count() from ' + testname + ' where status=0') n_left = cur.fetchone()[0] while n_left > 0: print(filler) print("[qc_wrap]: Number of tiles left: {0:d}".format(n_left)) print(filler) #Critical section# lock.acquire() cur.execute("select id,las_path,ref_path from " + testname + " where status=0") data = cur.fetchone() if data is None: print("[qc_wrap]: odd - seems to be no more tiles left...") lock.release() break fid, lasname, vname = data cur.execute( "update " + testname + " set status=?,prc_id=?,exe_start=? where id=?", (STATUS_PROCESSING, p_number, time.asctime(), fid)) try: con.commit() except Exception as err_msg: stderr.write( "[qc_wrap]: Unable to update tile to finish status...\n" + err_msg + "\n") break finally: lock.release() #end critical section# print("[qc_wrap]: Doing lasfile {0:s}...".format(lasname)) send_args = [testname, lasname] if use_ref_data: send_args.append(vname) send_args += add_args try: return_code = test_func(send_args) except Exception as err_msg: return_code = -1 msg = str(err_msg) status = STATUS_ERROR stderr.write("[qc_wrap]: Exception caught:\n" + msg + "\n") stderr.write("[qc_wrap]: Traceback:\n" + traceback.format_exc() + "\n") else: #set new status msg = "ok" status = STATUS_OK try: return_code = int(return_code) except (NameError, ValueError, TypeError): return_code = 0 cur.execute( "update " + testname + " set status=?,exe_end=?,rcode=?,msg=? where id=?", (status, time.asctime(), return_code, msg, fid)) done += 1 try: con.commit() except Exception as err_msg: stderr.write( "[qc_wrap]: Unable to update tile to finish status...\n" + err_msg + "\n") #go on to next one... cur.execute("select count() from " + testname + " where status=0") n_left = cur.fetchone()[0] print("[qc_wrap]: Checked %d tiles, finished at %s" % (done, time.asctime())) cur.close() con.close() #avoid writing to a closed fp... stdout.close() stderr.close() logfile.close()
cur.execute("update proc_jobs set status=%s,msg=%s where ogc_fid=%s",(STATUS_ERROR,"Definition did not exist.",id)) con.commit() continue testname,schema,runid,targs=data logger.info("Was told to do job with id %s, test %s, on data (%s,%s)" %(job_id,testname,path,ref_path)) #now just run the script.... hmm - perhaps import with importlib and run it?? stdout.write(sl+"[proc_client] Doing definition %s from %s, test: %s\n"%(job_id,db_cstr,testname)) args={"__name__":"qc_wrap","path":path} try: targs=json.loads(targs) #convert to a python list test_func=qc.get_test(testname) use_ref_data=qc.tests[testname][0] use_reporting=qc.tests[testname][1] #both of these can be None - but that's ok. if use_reporting: report.set_run_id(runid) report.set_schema(schema) send_args=[testname,path] if use_ref_data: assert(len(ref_path)>0) send_args.append(ref_path) send_args+=targs rc=test_func(send_args) except Exception,e: stderr.write("[proc_client]: Exception caught:\n"+str(e)+"\n") stderr.write("[proc_client]: Traceback:\n"+traceback.format_exc()+"\n") logger.error("Caught: \n"+str(e)) msg=str(e)[:128] #truncate msg for now - or use larger field width. cur.execute("update proc_jobs set status=%s,msg=%s where ogc_fid=%s",(STATUS_ERROR,msg,id)) con.commit()
stdout, stderr, test_data["args"], call_as_main=False) # Run some tests on the demo data... print(sl) print("Running tests on demo data...") try: ds = report.create_local_datasource(OUTPUT_DS) except Exception, e: print("Unable to create a test-suite datasource:\n" + str(e)) n_serious += 1 else: report.set_datasource(ds) report.set_run_id(int(time.time())) # a time stamp for test, test_data in TESTS: if pargs.test is None or pargs.test == test: print(sl) if test in loaded_tests: n_serious += run_test(test, loaded_tests[test], test_data["files"], stdout, stderr, test_data["args"]) else: print(test + " was not loaded...") print(sl + "\n") print("Minor errors : {0:d}".format(n_minor)) print("Serious errors: {0:d}".format(n_serious)) if n_serious == 0: print("Yipiieee!!")
def run_check(p_number, testname, db_name, add_args, runid, use_local, schema, use_ref_data, lock): ''' Main checker rutine which should be defined for all processes. ''' logger = multiprocessing.log_to_stderr() test_func = qc.get_test(testname) #Set up some globals in various modules... per process. if runid is not None: report.set_run_id(runid) if use_local: # rather than sending args to scripts, which might not have implemented # handling that particular argument, set a global attr in report. report.set_use_local(True) elif schema is not None: report.set_schema(schema) #LOAD THE DATABASE con = sqlite.connect(db_name) if con is None: logger.error( "[qc_wrap]: Process: {0:d}, unable to fetch process db".format( p_number)) return cur = con.cursor() timestamp = (time.asctime().split()[-2]).replace(':', '_') logname = testname + '_' + timestamp + '_' + str(p_number) + '.log' logname = os.path.join(LOGDIR, logname) logfile = open(logname, 'w') stdout = osutils.redirect_stdout(logfile) stderr = osutils.redirect_stderr(logfile) filler = '*-*' * 23 print(filler) print( '[qc_wrap]: Running {test} routine at {time}, process: {proc}, run id: {rid}' .format(test=testname, time=time.asctime(), proc=p_number, rid=runid)) print(filler) done = 0 cur.execute('select count() from ' + testname + ' where status=0') n_left = cur.fetchone()[0] while n_left > 0: print(filler) print("[qc_wrap]: Number of tiles left: {0:d}".format(n_left)) print(filler) #Critical section# lock.acquire() cur.execute("select id,las_path,ref_path from " + testname + " where status=0") data = cur.fetchone() if data is None: print("[qc_wrap]: odd - seems to be no more tiles left...") lock.release() break fid, lasname, vname = data cur.execute( "update " + testname + " set status=?,prc_id=?,exe_start=? where id=?", (STATUS_PROCESSING, p_number, time.asctime(), fid)) try: con.commit() except Exception, err_msg: stderr.write( "[qc_wrap]: Unable to update tile to finish status...\n" + err_msg + "\n") break finally: