def get_superpixels_callback(result): # update the job status in the database: update_completed_job_status(result) retval, jobid = result engine = sqlalchemy.create_engine(get_database_uri()) dbretval = engine.connect().execute( f"select procout from jobid_{jobid} where procout like 'RETVAL:%'" ).first() if dbretval is None: # no retval, indicating superpixel didn't get to the end, leave everything as is engine.dispose() return retvaldict = json.loads(dbretval[0].replace("RETVAL: ", "")) if "model" in retvaldict: # for DL approach modelid = retvaldict["model"].split("/")[4] else: modelid = -1 for img in retvaldict["output_file"]: engine.connect().execute( f"update image set superpixel_time = datetime(), superpixel_modelid = :modelid where path= :img", img=img, modelid=modelid) engine.dispose()
def make_embed_callback(result): # update the job status in the database: update_completed_job_status(result) jobid = result[1] engine = sqlalchemy.create_engine(get_database_uri()) dbretval = engine.connect().execute( f"select procout from jobid_{jobid} where procout like 'RETVAL:%'" ).first() if dbretval is None: # no retval, indicating superpixel didn't get to the end, leave everything as is engine.dispose() return retvaldict = json.loads(dbretval[0].replace("RETVAL: ", "")) projname = retvaldict["project_name"] modelid = retvaldict["modelid"] engine.connect().execute( f"update project set embed_iteration = :modelid where name = :projname", projname=projname, modelid=modelid) engine.dispose()
def make_patches_callback(result): # update the job status in the database: update_completed_job_status(result) retval, jobid = result engine = sqlalchemy.create_engine(get_database_uri()) dbretval = engine.connect().execute( f"select procout from jobid_{jobid} where procout like 'RETVAL:%'" ).first() if dbretval is None: # no retval, indicating make_patches didn't get to the end, leave everything as is engine.dispose() return retvaldict = json.loads(dbretval[0].replace("RETVAL: ", "")) for img in retvaldict["image_list"]: engine.connect().execute( f"update image set make_patches_time = datetime() where path= :img", img=img) # if it was successful, mark the training time in the database: if retval == 0: jobs_logger.info('Marking make_patches time in database:') projid = engine.connect().execute( f"select projId from job where id = :jobid", jobid=jobid).first()[0] engine.connect().execute( f"update project set make_patches_time = datetime() where id = :projid", projid=projid) engine.dispose()
def train_autoencoder_callback(result): # update the job status in the database: update_completed_job_status(result) # if it was successful, mark the training time in the database: retval, jobid = result if retval == 0: jobs_logger.info('Marking training ae time in database:') engine = sqlalchemy.create_engine(get_database_uri()) projid = engine.connect().execute( f"select projId from job where id = :jobid", jobid=jobid).first()[0] engine.connect().execute( f"update project set train_ae_time = datetime(), iteration = CASE WHEN iteration<0 then 0 else iteration end where id = :projid", projid=projid) engine.dispose()
def run_script(external_command, job_id): # get the thread's id: command_id = threading.get_ident() set_job_status(job_id, "RUNNING") # say the job is running: engine = sqlalchemy.create_engine(get_database_uri()) jobs_logger.info(f'Running command {command_id}: {external_command}') # from https://stackoverflow.com/a/18422264 process = subprocess.Popen(external_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # display the output: for line in iter(process.stdout.readline, b''): output = line.strip().decode('utf-8') jobs_logger.debug(output) engine.connect().execute( f"insert into jobid_{job_id} (timestamp,procout) values (datetime('now'), :output);", output=output) # https://stackoverflow.com/a/39477756 jobs_logger.info("Waiting for the job and then getting stdout and stderr:") stdout, stderr = process.communicate() jobs_logger.info("Closing stdout:") process.stdout.close() # check if there were any errors: stderr = stderr.strip().decode('utf-8') if stderr != "": jobs_logger.error(f'stderr = {stderr}') jobs_logger.info("Polling for completion:") return_value = process.poll() jobs_logger.info(f'Return value = {return_value}') engine.connect().execute( f"insert into jobid_{job_id} (timestamp,procout) values (datetime('now'), :retval);", retval=f"Return value: {return_value}") engine.dispose() return return_value, job_id # <-- output the command's output
def check_existing_project(data): project_name = data['name'] proj = Project.query.filter_by(name=project_name).first() if proj is not None: raise ProcessingException( description=f'Project {project_name} already exists.', code=400) # Create the Flask-Restless API manager app = Flask(__name__) app.debug = True app.logger_name = 'flask' app.register_blueprint(api) app.register_blueprint(html) app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0 app.config['SQLALCHEMY_DATABASE_URI'] = get_database_uri() app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['SQLALCHEMY_ECHO'] = config.getboolean('sqlalchemy', 'echo', fallback=False) APP_ROOT = os.path.dirname(os.path.abspath('__file__')) if __name__ == '__main__': #This seems like the correct place to do this # load logging config logging.config.fileConfig('./config/logging.ini') app.logger.info('Initializing database') db.app = app
def set_job_status(job_id, status, retval = ""): if job_id: engine = sqlalchemy.create_engine(get_database_uri()) engine.connect().execute(f"update job set status= :status, retval = :retval where id={job_id}", status=status, retval = retval) engine.dispose() jobs_logger.info(f'Job {job_id} set to status "{status}".')