def get(user_request): if utils.get_metadata("protocol", user_request, 0) == "slmp": return slmp.create_job(user_request) elif utils.get_metadata("protocol", user_request, 0) == "opcda": return user_request # add link to job builder for new protocol return 0
def push(self, user_job_request): job = None for i in range( 0, len(utils.get_metadata("properties", user_job_request, 0))): job_name = utils.get_metadata("name", user_job_request, i) job_version = utils.get_metadata("version", user_job_request, i) if ddb.valid_job_request(job_name): jobs = ddb.read_ddb_jobid(job_name)["Items"] if not jobs: return 0 if not job_version: job_version = max( [item.get("version", 1) for item in jobs], default=1) job = ([ item for item in jobs if item['version'] == job_version ])[0] else: for item in jobs: if str(item["version"]) == str(job_version): job = item break if job: job_to_push = json.loads(job["job_details"]) job_to_push["job"]["control"] = "push" post.to_lambda(job_name, job_to_push) else: logger.warn("No job found") else: return 0
def pull(self, user_job_request): for i in range(0, len(utils.get_metadata("properties",user_job_request,0))): job_name = utils.get_metadata("name",user_job_request,i) job_control = utils.get_metadata("control",user_job_request,0) if ddb.valid_job_request(job_name, "", job_control): post.to_lambda(job_name, user_job_request) else: return 0 return 1
def stop(job_data): global execution_control, ttl logger.info("User request: stop") execution_control = "stop" time.sleep(min(10 * ttl, 3)) temp = local.read(job_data) if temp: temp["job"]["control"] = utils.get_metadata("control", job_data,0) local.write(temp) if utils.get_metadata("control", job_data,0) == "stop": post.to_user("info", job_data, var.m2c2_job_stopped)
def generic_metrics(user_job_request): aws_metrics = {} aws_metrics['Solution'] = os.environ["SOL_ID"] aws_metrics['UUID'] = os.environ["UUID"] aws_metrics['TimeStamp'] = str(datetime.utcnow().isoformat()) aws_metrics["Data"] = { "EventType": utils.get_metadata("control", user_job_request, 0), "Version": os.environ["SOL_VER"], "JobName": utils.get_metadata("name", user_job_request, 0) } return aws_metrics
def test_fields(test_dict, json, index): test_keys = list(test_dict.keys()) for i in range (0, len(test_dict)): if utils.get_metadata(test_keys[i], json, index) != "": if type(utils.get_metadata(test_keys[i], json, index)) not in test_dict[test_keys[i]]: post.to_user("", "", "info", var.m2c2_err_json_key_type %(test_keys[i], str(test_dict[test_keys[i]]))) return 0 else: post.to_user("", "", "info", var.m2c2_err_json_key_not_found %(test_keys[i])) return 0 return 1
def __init__(self, current_target_system): self.protocol = utils.get_metadata("protocol", current_target_system, 0) self.server_address = (utils.get_metadata("machine-ip", current_target_system, 0), utils.get_metadata("port-number", current_target_system, 0)) self.ethernet = utils.get_metadata("ethernet", current_target_system, 0) self.communication_code = utils.get_metadata("communication-code", current_target_system, 0) self.scheduled_read = utils.get_metadata("data-frames", current_target_system, 0) if self.ethernet == "udp": self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) elif self.ethernet == "tcp": self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.frame_counter = 0
def write(user_job_request,type): job_name = utils.get_metadata("name",user_job_request,0) job_version = utils.get_metadata("version",user_job_request,0) job_protocol = utils.get_metadata("protocol",user_job_request,0) try: s3object = s3_client.Object(os.environ["JBM_BCK"], job_protocol + "-" + os.environ["JBM_KEY"] + job_name + "#v" + job_version + "." + type) s3object.put(Body=(bytes(json.dumps(user_job_request).encode('UTF-8')))) except Exception as err: logger.info("s3 write traceback:"+ str(err)) post.to_user(job_name, job_version, "error", var.m2c2_s3_fail_write %(os.environ["JBM_BCK"])) return 0 return 1
def to_user(type, job_data, msg): job_name = utils.get_metadata("name", job_data, 0) job_version = utils.get_metadata("version", job_data, 0) topic = "m2c2/job/" + job_name + "/" + type user_message = { "message": msg, "_id_": str(uuid.uuid4()), "_timestamp_": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"), "version": job_version } iot_client.publish(topic=topic, qos=1, payload=json.dumps(user_message))
def push(self, user_job_request): for i in range(0, len(utils.get_metadata("properties",user_job_request,0))): job_name = utils.get_metadata("name",user_job_request,i) job_version = utils.get_metadata("version",user_job_request,i) job_control = utils.get_metadata("control",user_job_request,i) if ddb.valid_job_request(job_name, job_version, job_control): stored_job_request = s3.read(job_name, job_version) if not stored_job_request: return 0 stored_job_request["job"]["control"] = "push" post.to_lambda(job_name, stored_job_request) else: return 0
def __init__(self, user_request): self.job_name = utils.get_metadata("name", user_request, 0) self.job_version = utils.get_metadata("version", user_request, 0) self.protocol = utils.get_metadata("protocol", user_request, 0) self.process = utils.get_metadata("process", user_request, 0) self.sitename = utils.get_metadata("site-name", user_request, 0) self.area = utils.get_metadata("area", user_request, 0) self.machinename = utils.get_metadata("machine-name", user_request, 0) self.lambda_path = os.environ["CON_KEY"] self.lambda_role = os.environ["CON_ROL"] self.kinesis_arn = os.environ["KINESIS_ARN"] self.m2c2_local_resource_path = var.m2c2_local_resource_path self.gg_group_id = utils.get_metadata("gg-group-id", user_request, 0) logger.info("GreenGrass Group Id: " + str(self.gg_group_id)) self.gg_connector_lambda = "" self.gg_connector_lambda_alias = "" self.gg_create_definition = ["", "", "", "", "", "", ""] self.gg_list_definitions = ["", "", "", "", "", "", ""] self.gg_get_definition_version = [[], [], [], [], [], [], []] self.gg_definition_version = ["", "", "", "", "", "", ""] self.gg_group = "" self.gg_group_version = "" self.mnemo_list = [ "Resource", "Core", "Device", "Function", "Logger", "Subscription", "Connector" ]
def get(user_request): if utils.get_metadata("protocol",user_request,0) == "slmp": if utils.get_metadata("network",user_request,0) > var.max_slmp_network: post.to_user("","","error", var.m2c2_err_json_range %("network")) return 0 if (utils.get_metadata("station",user_request,0) > var.max_slmp_station) and (utils.get_metadata("station",user_request,0) != 255) and (utils.get_metadata("station",user_request,0) != 0): post.to_user("","","error", var.m2c2_err_json_range %("station")) return 0 if utils.get_metadata("multidrop",user_request,0) > var.max_slmp_multidrop: post.to_user("","","error", var.m2c2_err_json_range %("multidrop")) return 0 if utils.get_metadata("subheader",user_request,0) not in var.slmp_subheader: post.to_user("","","error", var.m2c2_err_json_range %("subheader")) return 0 if utils.get_metadata("communication-code",user_request,0) not in var.slmp_communication_code: post.to_user("","","error", var.m2c2_err_json_range %("communication-code")) return 0 if utils.get_metadata("ethernet",user_request,0) not in var.slmp_ethernet: post.to_user("","","error", var.m2c2_err_json_range %("ethernet")) return 0 return 1 elif utils.get_metadata("protocol",user_request,0) == "opcda": return 1 # add limits for new protocol return 0
def stop(self, user_job_request): for i in range(0, len(utils.get_metadata("properties",user_job_request,0))): job_name = utils.get_metadata("name",user_job_request,i) job_version = utils.get_metadata("version",user_job_request,i) job_control = utils.get_metadata("control",user_job_request,0) if ddb.valid_job_request(job_name, job_version, job_control): stored_job_request = s3.read(job_name, job_version) if not stored_job_request: return 0 if not ddb.update(job_name, job_version, job_control, stored_job_request): return 0 stored_job_request["job"]["control"] = "stop" del stored_job_request["job"]["machine-details"] post.to_lambda(job_name, stored_job_request) else: return 0
def stop_group_entries(job_name, job_version, user_job_request): gg_group_id = utils.get_metadata("gg-group-id", user_job_request, 0) all_entries = ddb_table.scan() last_evaluated_key = all_entries.get('LastEvaluatedKey') while last_evaluated_key: more_entries = ddb_table.scan() all_entries['Items'].extend(more_entries['Items']) last_evaluated_key = more_entries.get('LastEvaluatedKey') jobs_to_stop = {"job": {"control": "stop", "properties": []}} other_job_stop_request = False for i in range(0, all_entries["Count"]): if all_entries["Items"][i]["gggroupid"] == gg_group_id: if all_entries["Items"][i]['control'] != "stop": other_job_stop_request = True jobs_to_stop["job"]["properties"].append({ 'name': all_entries["Items"][i]['jobid'], 'version': str(all_entries["Items"][i]['version']) }) if other_job_stop_request: post.to_user(job_name, job_version, "info", var.m2c2_ddb_controller_restart % (jobs_to_stop)) ddb_controlled_stop(jobs_to_stop) return 1
def deploy(self, user_job_request): job_name = utils.get_metadata("name",user_job_request,0) job_version = utils.get_metadata("version",user_job_request,0) control = utils.get_metadata("control",user_job_request,0) if ddb.valid_job_request(job_name, job_version, control): logger.info("Setting greengrass group") connector = gg.generate_greengrass_resources(user_job_request) if not connector.create_gg_definitions(): return 0 else: # write raw job to s3 logger.info("Writing raw job to s3") if not s3.write(user_job_request,"raw"): connector.delete_gg_definitions() return 0 # build job as per protocol logger.info("Processing job request") processed_job_request = build.job(user_job_request) if not processed_job_request: connector.delete_gg_definitions() return 0 # write process job to s3 logger.info("Writing processed job to s3") if not s3.write(processed_job_request,"json"): connector.delete_gg_definitions() return 0 # update DynamoDB as all other jobs on this gg group id will stop during manual deployment logger.info("Updating datase to stop all jobs for the group id") if not ddb.update(job_name, job_version, control, user_job_request): connector.delete_gg_definitions() return 0 # create an entry in DynamoDB logger.info("Creating database entry") if not ddb.write(user_job_request): connector.delete_gg_definitions() return 0 # collect metrics if os.environ["MET_ENB"].lower() == "true": metrics.get_metrics(user_job_request) # prompt user to deploy via the console post.to_user(job_name, job_version, "info", var.m2c2_user_deploy_request) return 1 else: return 0
def read(data): job_name = utils.get_metadata("name", data, 0) if os.path.exists(var.config_path + job_name + ".json"): with open(var.config_path + job_name + ".json") as file: data = json.load(file) return data else: post.to_user("error", data, var.m2c2_local_read % (job_name)) return ""
def get(user_request): if utils.get_metadata("protocol", user_request, 0) == "slmp": return { "port-number": [int], "machine-ip": [str], "network": [int], "station": [int], "module": [str], "multidrop": [int], "timer": [int], "subheader": [str], "communication-code": [str], "ethernet": [str] } elif utils.get_metadata("protocol", user_request, 0) == "opcda": return {"opcda-server-name": [str], "machine-ip": [str]} # add "connectivity-parameters" for new protocol return 0
def get_metrics(user_job_request): aws_metrics = {} aws_metrics = generic_metrics(user_job_request) if utils.get_metadata("control", user_job_request, 0) in ["start", "push", "update"]: aws_metrics = build.protocol_metrics(user_job_request, aws_metrics) post.to_metrics(aws_metrics) return 1
def ddb_controlled_stop(user_job_request): for i in range(0, len(utils.get_metadata("properties", user_job_request, 0))): job_name = utils.get_metadata("name", user_job_request, i) job_version = utils.get_metadata("version", user_job_request, i) job_control = utils.get_metadata("control", user_job_request, 0) temp_json = { "job": { "control": job_control, "properties": [{ "name": job_name, "version": job_version }] } } post.to_lambda(job_name, temp_json) else: return 0
def function_handler(job_data, context): global execution_control job_name = utils.get_metadata("name", job_data, 0) job_control = utils.get_metadata("control", job_data, 0) if (execution_control == "run") and (job_control in ["start"]): post.to_user("info", job_data, var.m2c2_already_started) elif (execution_control == "stop") and (job_control in ["stop"]): post.to_user("info", job_data, var.m2c2_already_stopped) else: if job_control == "start": start(job_data) elif job_control == "stop": stop(job_data) elif job_control == "push": push(job_data) elif job_control == "update": update(job_data) elif job_control == "pull": pull(job_data)
def generic_metrics(user_job_request): return { "Solution": os.environ["SOLUTION_ID"], "UUID": os.environ["UUID"], "TimeStamp": datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3], "Version": os.environ["SOLUTION_VERSION"], "Data": { "EventType": utils.get_metadata("control", user_job_request, 0) } }
def start(self, user_job_request): for i in range( 0, len(utils.get_metadata("properties", user_job_request, 0))): job_name = utils.get_metadata("name", user_job_request, i) job_version = utils.get_metadata("version", user_job_request, i) job_control = utils.get_metadata("control", user_job_request, i) jobs = ddb.read_ddb_jobid(job_name)["Items"] if not job_version: # Start latest version job_version = max([item.get("version", 0) for item in jobs], default=0) job_entry = ([ item for item in jobs if item['version'] == job_version ])[0] else: for entry in jobs: if str(entry["version"]) == str(job_version): job_entry = entry if ddb.valid_job_request(job_name): if not job_entry: logger.info( "There was an issue retriving job information for job {0}" .format(job_name)) return 0 json_job = json.loads(job_entry["job_details"]) if str(job_entry['version']) == str(job_version): job_to_start = json_job if not ddb.update(job_name, job_version, job_control, ""): return 0 for entry in jobs: if entry["version"] != int(job_version): json_job_details = json.loads(entry['job_details']) json_job_details['job']['properties'][0][ 'version'] = str(entry['version']) self.stop(json_job_details) job_to_start["job"]["control"] = "start" job_to_start["job"]["properties"][0]["version"] = str( job_version) post.to_lambda(job_name, job_to_start) else: return 0
def write(data): data["job"]["_last-update-timestamp_"] = datetime.now().strftime( "%Y-%m-%d %H:%M:%S.%f") job_name = utils.get_metadata("name", data, 0) try: with open(var.config_path + job_name + ".json", "w+", encoding="utf-8") as file: json.dump(data, file) return 1 except Exception as err: logger.info("Unable to write local file: " + str(err)) post.to_user("error", data, var.m2c2_local_write % (var.config_path)) return 0
def update(self, user_job_request): job_name = utils.get_metadata("name",user_job_request,0) job_version = utils.get_metadata("version",user_job_request,0) job_control = utils.get_metadata("control",user_job_request,0) if ddb.valid_job_request(job_name, job_version, job_control): # build job as per protocol processed_job_request = build.job(user_job_request) if not processed_job_request: return 0 # write process job to s3 if not s3.write(processed_job_request,"json"): return 0 # update DynamoDB as all other jobs on this gg group id will stop during manual deployment if not ddb.update(job_name, job_version, job_control, ""): return 0 # create an entry in DynamoDB if not ddb.write(user_job_request): return 0 post.to_lambda(job_name, processed_job_request) else: return 0 return 1
def transform(payload, job): #logger.info("payload: " +str(payload)) #logger.info("Job: " + str(job)) formatted_payload = {} decoded_responses = [] for i in range(0, len(payload)): for j in range(0, len(payload[i])): # Decode and sort by timestamp decoded_responses.append( decode_frame(utils.get_metadata("communication-code", job, 0), payload[i][j], utils.get_metadata("data-decode", job, j))) # organise by tag name for i in range(0, len(decoded_responses)): if decoded_responses[i] != 0: for j in range(0, len(decoded_responses[i])): if list(decoded_responses[i].keys())[j] != "timestamp": temp = { "timestamp": decoded_responses[i]["timestamp"], "value": decoded_responses[i][list( decoded_responses[i].keys())[j]] } if list(decoded_responses[i].keys())[j] not in list( formatted_payload.keys()): formatted_payload[list( decoded_responses[i].keys())[j]] = [temp] else: formatted_payload[list( decoded_responses[i].keys())[j]].append(temp) else: pass else: logger.info("Invalid dataframe format.") return 0 post.to_stream(utils.get_metadata("name", job, 0), utils.get_metadata("version", job, 0), formatted_payload) return 1
def stop(self, user_job_request): for i in range( 0, len(utils.get_metadata("properties", user_job_request, 0))): job_name = utils.get_metadata("name", user_job_request, i) job_version = utils.get_metadata("version", user_job_request, i) desired_job_control = "stop" jobs = ddb.read_ddb_jobid(job_name)["Items"] if not job_version: # Stop running version job_entry = ([ item for item in jobs if item['control'] == 'start' or item['control'] == 'update' ])[0] job_version = job_entry["version"] else: for entry in jobs: if str(entry["version"]) == str(job_version): job_entry = entry job_control = job_entry['control'] if job_control != desired_job_control: if ddb.valid_job_request(job_name): if not job_entry: logger.info( "There was an issue retrieving job {0}".format( job_name)) return 0 if not ddb.update(job_name, job_version, desired_job_control, job_entry['job_details']): return 0 # for item in stored_job_request: json_job = json.loads(job_entry["job_details"]) if str(job_entry["version"]) == str(job_version): job_to_stop = json_job job_to_stop["job"]["control"] = "stop" del job_to_stop["job"]["machine-details"] post.to_lambda(job_name, job_to_stop) else: return 0
def job_execution(): # runs while execution_control is set to run. Transmissions are time-based only. global execution_control, payload_content, connection_handler, loop_count, event, ttl, current_job if execution_control == "run": current_job = event start_time = time.time() try: payload_content.append(connection_handler.read()) except Exception as err: execution_control = "stop" logger.info("Unable to read from server: " + str(err)) post.to_user("error", current_job, var.m2c2_lost_communication) loop_count += 1 if loop_count >= utils.get_metadata("machine-query-iterations", current_job, 0): loop_count = 0 try: tform.transform(payload_content,current_job) except Exception as err: logger.info("Unable to transform dataset: " + str(err)) payload_content = [] ttl = time.time() - start_time Timer(utils.get_metadata("machine-query-time-interval", current_job, 0), job_execution).start() elif execution_control == "stop": loop_count = 0 if payload_content != []: try: tform.transform(payload_content,current_job) except Exception as err: logger.info("Unable to transform dataset: " + str(err)) payload_content = [] try: connection_handler.close() except: pass return 0
def retrieve_entry_value(user_job_request, attribute): job_name = utils.get_metadata("name", user_job_request, 0) try: job_value = ddb_table.query( Select='SPECIFIC_ATTRIBUTES', ProjectionExpression=attribute, KeyConditionExpression=Key('jobid').eq(job_name)) return job_value except Exception as err: logger.error("ddb version - retrieve traceback:" + str(err)) post.to_user( job_name, "", "error", var.m2c2_ddb_retrieving_values % (str(job_name), str(err))) return 0
def run(self, user_job_request): job_name = utils.get_metadata("name", user_job_request, 0) control = utils.get_metadata("control", user_job_request, 0) logger.info("Received {0} control for job id {1}".format( str(control), str(job_name))) if utils.get_metadata("control", user_job_request, 0) == "start": return self.start(user_job_request) elif utils.get_metadata("control", user_job_request, 0) == "stop": return self.stop(user_job_request) elif utils.get_metadata("control", user_job_request, 0) == "push": return self.push(user_job_request) elif utils.get_metadata("control", user_job_request, 0) == "pull": return self.pull(user_job_request) elif utils.get_metadata("control", user_job_request, 0) == "update": return self.update(user_job_request) elif utils.get_metadata("control", user_job_request, 0) == "deploy": return self.deploy(user_job_request)
def start_jobs_after_gg_deploy(user_job_request): gg_group_id = utils.get_metadata("gg-group-id", user_job_request, 0) job_name = utils.get_metadata("name", user_job_request, 0) jobs_to_restart = [] try: all_entries = ddb_table.scan( Select='ALL_ATTRIBUTES', FilterExpression=Attr('control').eq('start')) last_evaluated_key = all_entries.get('LastEvaluatedKey') while last_evaluated_key: more_entries = ddb_table.scan( Select='ALL_ATTRIBUTES', FilterExpression=Attr('control').eq('start'), ExclusiveStartKey=last_evaluated_key) all_entries['Items'].extend(more_entries['Items']) last_evaluated_key = more_entries.get('LastEvaluatedKey') for entry in all_entries['Items']: if job_name != entry['jobid'] and entry['gggroupid'] == gg_group_id: job_version = str(entry['version']) temp_json = { "job": { "control": entry['control'], "properties": [{ "name": entry['jobid'], "version": job_version }] } } jobs_to_restart.append(temp_json) return jobs_to_restart except Exception as err: logger.error("ddb update - update traceback:" + str(err)) post.to_user(job_name, job_version, "error", var.m2c2_jobs_to_restart % (str(err))) return 0