def deploy(self, user_job_request): job_name = utils.get_metadata("name",user_job_request,0) job_version = utils.get_metadata("version",user_job_request,0) control = utils.get_metadata("control",user_job_request,0) process = utils.get_metadata("process",user_job_request,0) sitename = utils.get_metadata("site-name",user_job_request,0) area = utils.get_metadata("area",user_job_request,0) machinename = utils.get_metadata("machine-name",user_job_request,0) if ddb.valid_job_request(job_name, job_version, control): logger.info("Setting greengrass group") connector = gg.generate_greengrass_resources(user_job_request) if not connector.create_gg_definitions(): return 0 else: # write raw job to s3 logger.info("Writing raw job to s3") if not s3.write(user_job_request,"raw"): connector.delete_gg_definitions() return 0 # build job as per protocol logger.info("Processing job request") processed_job_request = build.job(user_job_request) if not processed_job_request: connector.delete_gg_definitions() return 0 # write process job to s3 logger.info("Writing processed job to s3") if not s3.write(processed_job_request,"json"): connector.delete_gg_definitions() return 0 # update DynamoDB as all other jobs on this gg group id will stop during manual deployment logger.info("Updating datase to stop all jobs for the group id") if not ddb.update(job_name, job_version, control, user_job_request): connector.delete_gg_definitions() return 0 # create an entry in DynamoDB logger.info("Creating database entry") if not ddb.write(user_job_request): connector.delete_gg_definitions() return 0 # collect metrics if os.environ["MET_ENB"].lower() == "true": metrics.get_metrics(user_job_request) # prompt user to deploy via the console post.to_user(job_name, job_version, "info", var.m2c2_user_deploy_request) return 1 else: return 0
def start(self, user_job_request): for i in range(0, len(utils.get_metadata("properties",user_job_request,0))): job_name = utils.get_metadata("name",user_job_request,i) job_version = utils.get_metadata("version",user_job_request,i) job_control = utils.get_metadata("control",user_job_request,0) if ddb.valid_job_request(job_name, job_version, job_control): stored_job_request = s3.read(job_name, job_version) if not stored_job_request: return 0 if not ddb.update(job_name, job_version, job_control,""): return 0 stored_job_request["job"]["control"] = "start" post.to_lambda(job_name, stored_job_request) else: return 0
def start(self, user_job_request): for i in range( 0, len(utils.get_metadata("properties", user_job_request, 0))): job_name = utils.get_metadata("name", user_job_request, i) job_version = utils.get_metadata("version", user_job_request, i) job_control = utils.get_metadata("control", user_job_request, i) jobs = ddb.read_ddb_jobid(job_name)["Items"] if not job_version: # Start latest version job_version = max([item.get("version", 0) for item in jobs], default=0) job_entry = ([ item for item in jobs if item['version'] == job_version ])[0] else: for entry in jobs: if str(entry["version"]) == str(job_version): job_entry = entry if ddb.valid_job_request(job_name): if not job_entry: logger.info( "There was an issue retriving job information for job {0}" .format(job_name)) return 0 json_job = json.loads(job_entry["job_details"]) if str(job_entry['version']) == str(job_version): job_to_start = json_job if not ddb.update(job_name, job_version, job_control, ""): return 0 for entry in jobs: if entry["version"] != int(job_version): json_job_details = json.loads(entry['job_details']) json_job_details['job']['properties'][0][ 'version'] = str(entry['version']) self.stop(json_job_details) job_to_start["job"]["control"] = "start" job_to_start["job"]["properties"][0]["version"] = str( job_version) post.to_lambda(job_name, job_to_start) else: return 0
def update(self, user_job_request): job_name = utils.get_metadata("name",user_job_request,0) job_version = utils.get_metadata("version",user_job_request,0) job_control = utils.get_metadata("control",user_job_request,0) if ddb.valid_job_request(job_name, job_version, job_control): # build job as per protocol processed_job_request = build.job(user_job_request) if not processed_job_request: return 0 # write process job to s3 if not s3.write(processed_job_request,"json"): return 0 # update DynamoDB as all other jobs on this gg group id will stop during manual deployment if not ddb.update(job_name, job_version, job_control, ""): return 0 # create an entry in DynamoDB if not ddb.write(user_job_request): return 0 post.to_lambda(job_name, processed_job_request) else: return 0 return 1
def stop(self, user_job_request): for i in range( 0, len(utils.get_metadata("properties", user_job_request, 0))): job_name = utils.get_metadata("name", user_job_request, i) job_version = utils.get_metadata("version", user_job_request, i) desired_job_control = "stop" jobs = ddb.read_ddb_jobid(job_name)["Items"] if not job_version: # Stop running version job_entry = ([ item for item in jobs if item['control'] == 'start' or item['control'] == 'update' ])[0] job_version = job_entry["version"] else: for entry in jobs: if str(entry["version"]) == str(job_version): job_entry = entry job_control = job_entry['control'] if job_control != desired_job_control: if ddb.valid_job_request(job_name): if not job_entry: logger.info( "There was an issue retrieving job {0}".format( job_name)) return 0 if not ddb.update(job_name, job_version, desired_job_control, job_entry['job_details']): return 0 # for item in stored_job_request: json_job = json.loads(job_entry["job_details"]) if str(job_entry["version"]) == str(job_version): job_to_stop = json_job job_to_stop["job"]["control"] = "stop" del job_to_stop["job"]["machine-details"] post.to_lambda(job_name, job_to_stop) else: return 0