def stop_group_entries(job_name, job_version, user_job_request): gg_group_id = utils.get_metadata("gg-group-id", user_job_request, 0) all_entries = ddb_table.scan() last_evaluated_key = all_entries.get('LastEvaluatedKey') while last_evaluated_key: more_entries = ddb_table.scan() all_entries['Items'].extend(more_entries['Items']) last_evaluated_key = more_entries.get('LastEvaluatedKey') jobs_to_stop = {"job": {"control": "stop", "properties": []}} other_job_stop_request = False for i in range(0, all_entries["Count"]): if all_entries["Items"][i]["gggroupid"] == gg_group_id: if all_entries["Items"][i]['control'] != "stop": other_job_stop_request = True jobs_to_stop["job"]["properties"].append({ 'name': all_entries["Items"][i]['jobid'], 'version': str(all_entries["Items"][i]['version']) }) if other_job_stop_request: post.to_user(job_name, job_version, "info", var.m2c2_ddb_controller_restart % (jobs_to_stop)) ddb_controlled_stop(jobs_to_stop) return 1
def update(job_name, job_version, job_control, user_job_request): global job_in_ddb # Update key items control and timestamp everytime an action on the job is performed. if job_control == "deploy": # all ddb entries in the group must be stopped return stop_group_entries(job_name, job_version, user_job_request) elif job_control == "update": # all ddb entries for the job name must be stopped return stop_name_entries(job_name, job_version) elif job_control in ["start", "stop"]: job_detail = get_job_details(job_version) if not (job_detail["control"] == job_control): try: ddb_table.update_item( Key={ 'jobid': job_name, 'version': int(float(str(job_version))) }, UpdateExpression="set #t =:t, control =:c", ExpressionAttributeValues={ ':t': datetime.now().strftime("%d/%m/%Y,%H:%M:%S.%f"), ':c': job_control }, ExpressionAttributeNames={"#t": "timestamp"}) except Exception as err: logger.error("ddb update - start stop traceback:" + str(err)) post.to_user(job_name, job_version, "error", var.m2c2_ddb_access % ddb_table) return 0 return 1 return 0
def write(user_job_request): job_name = utils.get_metadata("name", user_job_request, 0) job_version = utils.get_metadata("version", user_job_request, 0) job_protocol = utils.get_metadata("protocol", user_job_request, 0) try: # first ensure that optional keys have a value that can be written to ddb na = "not specified" site = utils.get_metadata("site-name", user_job_request, 0) if site == "": site = na area = utils.get_metadata("area", user_job_request, 0) if area == "": area = na process = utils.get_metadata("process", user_job_request, 0) if process == "": process = na machine = utils.get_metadata("machine-name", user_job_request, 0) if machine == "": machine = na ddb_table.put_item( Item={ "jobid": job_name, "timestamp": datetime.now().strftime("%d/%m/%Y,%H:%M:%S.%f"), "site": site, "area": area, "process": process, "version": int(job_version), "s3 bucket": "https://" + os.environ["JBM_BCK"] + ".s3-" + os.environ["AWS_REGION"] + ".amazonaws.com" + "/" + job_protocol + "-" + os.environ["JBM_KEY"] + job_name + "#v" + job_version + ".json", "control": utils.get_metadata("control", user_job_request, 0), "gggroupid": utils.get_metadata("gg-group-id", user_job_request, 0), "machine": machine, "protocol": utils.get_metadata("protocol", user_job_request, 0), "address": utils.get_metadata("machine-ip", user_job_request, 0) }) return 1 except Exception as err: logger.info("ddb write traceback:" + str(err)) post.to_user(job_name, job_version, "error", var.m2c2_ddb_access % (os.environ["JBM_TBL"])) return 0
def create_group_version(self): logger.info("Previous definitions: " + str(self.gg_group_version)) logger.info("Updated definitions: " + str(self.gg_definition_version)) try: self.resource_gg_group_version = gg_client.create_group_version( GroupId=self.gg_group_id, ResourceDefinitionVersionArn=self.gg_definition_version[0] ["Arn"], CoreDefinitionVersionArn=self.gg_definition_version[1]["Arn"], DeviceDefinitionVersionArn=self.gg_definition_version[2] ["Arn"], FunctionDefinitionVersionArn=self.gg_definition_version[3] ["Arn"], LoggerDefinitionVersionArn=self.gg_definition_version[4] ["Arn"], SubscriptionDefinitionVersionArn=self.gg_definition_version[5] ["Arn"], ConnectorDefinitionVersionArn=self.gg_definition_version[6] ["Arn"]) except Exception as err: logger.info("create group version traceback: " + str(err)) post.to_user(self.job_name, self.job_version, "error", var.m2c2_gg_create_group_version) return 0 return 1
def __init__(self, job_request): validated_job_schema = json_check.check_schema(job_request) if validated_job_schema: self.run(validated_job_schema) else: post.to_user("", "", "error", var.m2c2_err_json_schema) return
def read_ddb_jobid(job_name): try: return ddb_table.query( KeyConditionExpression=Key("jobid").eq(job_name)) except: post.to_user(job_name, "", "error", var.m2c2_ddb_access % ddb_table) return 0
def read_ddb_jobid(job_name, job_version): try: return ddb_table.query( KeyConditionExpression=Key("jobid").eq(job_name)) except Exception as err: post.to_user(job_name, job_version, "error", var.m2c2_ddb_access % (os.environ["JBM_TBL"])) return 0
def read(data): job_name = utils.get_metadata("name", data, 0) if os.path.exists(var.config_path + job_name + ".json"): with open(var.config_path + job_name + ".json") as file: data = json.load(file) return data else: post.to_user("error", data, var.m2c2_local_read % (job_name)) return ""
def read(job_name, job_version): job = ddb.get_job_details(job_version) job_protocol = job["protocol"] try: s3object = s3_client.Object(os.environ["JBM_BCK"], job_protocol +"-" + os.environ["JBM_KEY"] + job_name + "#v" + job_version + ".json") return json.loads(s3object.get()['Body'].read().decode('utf-8')) except Exception as err: logger.info("s3 read traceback:"+ str(err)) post.to_user(job_name, job_version, "error", var.m2c2_s3_fail_read %(os.environ["JBM_BCK"])) return 0 return 1
def push(job_data): logger.info("User request: push") comms_handler = comms.DeviceCommunication(job_data) if comms_handler.open(): logger.info("successfully connected.") post.to_user("info", job_data, var.m2c2_job_push_success) else: logger.info("Failed to connect.") post.to_user("info", job_data, var.m2c2_job_push_fail) comms_handler.close() return 1
def stop(job_data): global execution_control, ttl logger.info("User request: stop") execution_control = "stop" time.sleep(min(10 * ttl, 3)) temp = local.read(job_data) if temp: temp["job"]["control"] = utils.get_metadata("control", job_data,0) local.write(temp) if utils.get_metadata("control", job_data,0) == "stop": post.to_user("info", job_data, var.m2c2_job_stopped)
def test_fields(test_dict, json, index): test_keys = list(test_dict.keys()) for i in range (0, len(test_dict)): if utils.get_metadata(test_keys[i], json, index) != "": if type(utils.get_metadata(test_keys[i], json, index)) not in test_dict[test_keys[i]]: post.to_user("", "", "info", var.m2c2_err_json_key_type %(test_keys[i], str(test_dict[test_keys[i]]))) return 0 else: post.to_user("", "", "info", var.m2c2_err_json_key_not_found %(test_keys[i])) return 0 return 1
def write(user_job_request,type): job_name = utils.get_metadata("name",user_job_request,0) job_version = utils.get_metadata("version",user_job_request,0) job_protocol = utils.get_metadata("protocol",user_job_request,0) try: s3object = s3_client.Object(os.environ["JBM_BCK"], job_protocol + "-" + os.environ["JBM_KEY"] + job_name + "#v" + job_version + "." + type) s3object.put(Body=(bytes(json.dumps(user_job_request).encode('UTF-8')))) except Exception as err: logger.info("s3 write traceback:"+ str(err)) post.to_user(job_name, job_version, "error", var.m2c2_s3_fail_write %(os.environ["JBM_BCK"])) return 0 return 1
def add_m2c2_connector(self): uniqueID = utils.unique_id() # import connector lambda to user account try: self.gg_connector_lambda = lambda_client.create_function( FunctionName="m2c2-" + self.protocol + "-connector-" + self.job_name + "-" + uniqueID, Runtime=builder.runtime(self.protocol), Role=self.lambda_role, Handler="m2c2-" + self.protocol + "-connector.function_handler", Code={ "S3Bucket": os.environ["SOURCE_S3_BUCKET"], "S3Key": "{prefix}/m2c2-{protocol}-connector.zip".format( prefix=os.environ["SOURCE_S3_PREFIX"], protocol=self.protocol) }, Description="m2c2-" + self.protocol + "-connector-lambda-" + self.job_name, Environment={ 'Variables': { 'process': self.process, 'sitename': self.sitename, 'area': self.area, 'machinename': self.machinename, 'kinesisstream': os.environ["KINESIS_STREAM"] } }, Timeout=5, MemorySize=128, Publish=True) except Exception as err: logger.debug("connector lambda traceback:" + str(err)) post.to_user( self.job_name, self.job_version, "error", var.m2c2_gg_add_lambda % (self.protocol, os.environ["SOURCE_S3_BUCKET"])) return 0 else: try: self.gg_connector_lambda_alias = lambda_client.create_alias( FunctionName=self.gg_connector_lambda["FunctionArn"], Name="m2c2-" + self.protocol + "-connector-" + self.job_name + "-" + uniqueID, FunctionVersion='1') except Exception as err: logger.debug("connector lambda alias traceback:" + str(err)) post.to_user(self.job_name, self.job_version, "error", var.m2c2_gg_add_lambda_alias) return 0 return 1
def write(data): data["job"]["_last-update-timestamp_"] = datetime.now().strftime( "%Y-%m-%d %H:%M:%S.%f") job_name = utils.get_metadata("name", data, 0) try: with open(var.config_path + job_name + ".json", "w+", encoding="utf-8") as file: json.dump(data, file) return 1 except Exception as err: logger.info("Unable to write local file: " + str(err)) post.to_user("error", data, var.m2c2_local_write % (var.config_path)) return 0
def deploy(self, user_job_request): job_name = utils.get_metadata("name",user_job_request,0) job_version = utils.get_metadata("version",user_job_request,0) control = utils.get_metadata("control",user_job_request,0) process = utils.get_metadata("process",user_job_request,0) sitename = utils.get_metadata("site-name",user_job_request,0) area = utils.get_metadata("area",user_job_request,0) machinename = utils.get_metadata("machine-name",user_job_request,0) if ddb.valid_job_request(job_name, job_version, control): logger.info("Setting greengrass group") connector = gg.generate_greengrass_resources(user_job_request) if not connector.create_gg_definitions(): return 0 else: # write raw job to s3 logger.info("Writing raw job to s3") if not s3.write(user_job_request,"raw"): connector.delete_gg_definitions() return 0 # build job as per protocol logger.info("Processing job request") processed_job_request = build.job(user_job_request) if not processed_job_request: connector.delete_gg_definitions() return 0 # write process job to s3 logger.info("Writing processed job to s3") if not s3.write(processed_job_request,"json"): connector.delete_gg_definitions() return 0 # update DynamoDB as all other jobs on this gg group id will stop during manual deployment logger.info("Updating datase to stop all jobs for the group id") if not ddb.update(job_name, job_version, control, user_job_request): connector.delete_gg_definitions() return 0 # create an entry in DynamoDB logger.info("Creating database entry") if not ddb.write(user_job_request): connector.delete_gg_definitions() return 0 # collect metrics if os.environ["MET_ENB"].lower() == "true": metrics.get_metrics(user_job_request) # prompt user to deploy via the console post.to_user(job_name, job_version, "info", var.m2c2_user_deploy_request) return 1 else: return 0
def retrieve_entry_value(user_job_request, attribute): job_name = utils.get_metadata("name", user_job_request, 0) try: job_value = ddb_table.query( Select='SPECIFIC_ATTRIBUTES', ProjectionExpression=attribute, KeyConditionExpression=Key('jobid').eq(job_name)) return job_value except Exception as err: logger.error("ddb version - retrieve traceback:" + str(err)) post.to_user( job_name, "", "error", var.m2c2_ddb_retrieving_values % (str(job_name), str(err))) return 0
def start(job_data): global event, connection_handler, execution_control logger.info("User request: start") local.write(job_data) event = job_data connection_handler = comms.DeviceCommunication(job_data) if connection_handler.open(): logger.info("successfully connected.") execution_control = "run" post.to_user("info", job_data, var.m2c2_job_started) job_execution() return 1 else: connection_handler.close() logger.info("Failed to connect.") post.to_user("info", job_data, var.m2c2_job_push_fail) return 0
def function_handler(job_data, context): global execution_control job_name = utils.get_metadata("name", job_data, 0) job_control = utils.get_metadata("control", job_data, 0) if (execution_control == "run") and (job_control in ["start"]): post.to_user("info", job_data, var.m2c2_already_started) elif (execution_control == "stop") and (job_control in ["stop"]): post.to_user("info", job_data, var.m2c2_already_stopped) else: if job_control == "start": start(job_data) elif job_control == "stop": stop(job_data) elif job_control == "push": push(job_data) elif job_control == "update": update(job_data) elif job_control == "pull": pull(job_data)
def stop_name_entries(job_name, job_version): global job_in_ddb for i in range(0, job_in_ddb["Count"]): try: ddb_table.update_item( Key={ 'jobid': job_name, 'version': job_in_ddb["Items"][i]['version'] }, UpdateExpression="set #t =:t, control =:c", ExpressionAttributeValues={ ':t': datetime.now().strftime("%d/%m/%Y,%H:%M:%S.%f"), ':c': "stop" }, ExpressionAttributeNames={"#t": "timestamp"}) except Exception as err: logger.info("ddb update - update traceback:" + str(err)) post.to_user(job_name, job_version, "error", var.m2c2_ddb_access % (os.environ["JBM_TBL"])) return 0 return 1
def deploy_gg_group(self): logger.info( "Deploying Greengrass updates to edge device for Greengrass group {}" .format(self.gg_group_id)) try: group_version = gg_client.get_group( GroupId=self.gg_group_id)['LatestVersion'] deployment = gg_client.create_deployment( GroupId=self.gg_group_id, GroupVersionId=group_version, DeploymentType='NewDeployment') deployment_id = deployment['DeploymentId'] logger.info("Deployment Id: {}".format(str(deployment_id))) deployment_info = gg_client.get_deployment_status( GroupId=self.gg_group_id, DeploymentId=deployment_id) deployment_status = deployment_info['DeploymentStatus'] logger.info( "Greengrass deployment status: {}".format(deployment_status)) while not (deployment_status == 'Success'): if deployment_status == 'Failure': logger.info( "The Greengrass group deployment has failed: {}". format(deployment_info['ErrorMessage'])) logger.info(deployment_info['ErrorDetails']) return 0 time.sleep(10) deployment_info = gg_client.get_deployment_status( GroupId=self.gg_group_id, DeploymentId=deployment_id) deployment_status = deployment_info['DeploymentStatus'] logger.info("Greengrass deployment status: {}".format( deployment_status)) time.sleep(25) except Exception as err: logger.info( "Deploying to Greengrass edge device trackback: {}".format( str(err))) post.to_user(self.job_name, self.job_version, "error", var.m2c2_gg_create_deployment % (self.gg_group_id)) return 0 return 1
def get_group_definition(self): try: self.gg_group = gg_client.get_group(GroupId=self.gg_group_id) except Exception as err: logger.info("gg group definition traceback:" + str(err)) post.to_user(self.job_name, self.job_version, "error", var.m2c2_gg_get_group_definition % (self.gg_group_id)) return 0 else: try: self.gg_group_version = gg_client.get_group_version( GroupId=self.gg_group["Id"], GroupVersionId=self.gg_group["LatestVersion"]) except Exception as err: logger.info("gg group definition version traceback:" + str(err)) post.to_user( self.job_name, self.job_version, "error", var.m2c2_gg_get_group_definition_version % (self.gg_group["Id"], self.gg_group["LatestVersion"])) return 0 return 1
def stop_group_entries(job_name, job_version, user_job_request): gg_group_id = utils.get_metadata("gg-group-id", user_job_request, 0) all_entries = ddb_table.scan() jobs_to_stop = {"job": {"control": "stop", "properties": []}} other_job_stop_request = False for i in range(0, all_entries["Count"]): if all_entries["Items"][i]["gggroupid"] == gg_group_id: if all_entries["Items"][i]['control'] != "stop": other_job_stop_request = True jobs_to_stop["job"]["properties"].append({ 'name': all_entries["Items"][i]['jobid'], 'version': str(all_entries["Items"][i]['version']) }) try: ddb_table.update_item( Key={ 'jobid': all_entries["Items"][i]['jobid'], 'version': all_entries["Items"][i]['version'] }, UpdateExpression="set #t =:t, control =:c", ExpressionAttributeValues={ ':t': datetime.now().strftime("%d/%m/%Y,%H:%M:%S.%f"), ':c': "stop" }, ExpressionAttributeNames={"#t": "timestamp"}) except Exception as err: logger.info("ddb update - deploy traceback:" + str(err)) post.to_user(job_name, job_version, "error", var.m2c2_ddb_access % (os.environ["JBM_TBL"])) return 0 if other_job_stop_request: logger.info("jobs to stop: " + str(jobs_to_stop)) post.to_user(job_name, job_version, "info", var.m2c2_ddb_controller_stop % (jobs_to_stop)) ddb_controlled_stop(jobs_to_stop) return 1
def job_execution(): # runs while execution_control is set to run. Transmissions are time-based only. global execution_control, payload_content, connection_handler, loop_count, event, ttl, current_job if execution_control == "run": current_job = event start_time = time.time() try: payload_content.append(connection_handler.read()) except Exception as err: execution_control = "stop" logger.info("Unable to read from server: " + str(err)) post.to_user("error", current_job, var.m2c2_lost_communication) loop_count += 1 if loop_count >= utils.get_metadata("machine-query-iterations", current_job, 0): loop_count = 0 try: tform.transform(payload_content,current_job) except Exception as err: logger.info("Unable to transform dataset: " + str(err)) payload_content = [] ttl = time.time() - start_time Timer(utils.get_metadata("machine-query-time-interval", current_job, 0), job_execution).start() elif execution_control == "stop": loop_count = 0 if payload_content != []: try: tform.transform(payload_content,current_job) except Exception as err: logger.info("Unable to transform dataset: " + str(err)) payload_content = [] try: connection_handler.close() except: pass return 0
def start_jobs_after_gg_deploy(user_job_request): gg_group_id = utils.get_metadata("gg-group-id", user_job_request, 0) job_name = utils.get_metadata("name", user_job_request, 0) jobs_to_restart = [] try: all_entries = ddb_table.scan( Select='ALL_ATTRIBUTES', FilterExpression=Attr('control').eq('start')) last_evaluated_key = all_entries.get('LastEvaluatedKey') while last_evaluated_key: more_entries = ddb_table.scan( Select='ALL_ATTRIBUTES', FilterExpression=Attr('control').eq('start'), ExclusiveStartKey=last_evaluated_key) all_entries['Items'].extend(more_entries['Items']) last_evaluated_key = more_entries.get('LastEvaluatedKey') for entry in all_entries['Items']: if job_name != entry['jobid'] and entry['gggroupid'] == gg_group_id: job_version = str(entry['version']) temp_json = { "job": { "control": entry['control'], "properties": [{ "name": entry['jobid'], "version": job_version }] } } jobs_to_restart.append(temp_json) return jobs_to_restart except Exception as err: logger.error("ddb update - update traceback:" + str(err)) post.to_user(job_name, job_version, "error", var.m2c2_jobs_to_restart % (str(err))) return 0
def get(user_request): if utils.get_metadata("protocol", user_request, 0) == "slmp": if utils.get_metadata("network", user_request, 0) > var.max_slmp_network: post.to_user("", "", "error", var.m2c2_err_json_range % ("network")) return 0 if (utils.get_metadata("station", user_request, 0) > var.max_slmp_station) and (utils.get_metadata( "station", user_request, 0) != 255) and ( utils.get_metadata("station", user_request, 0) != 0): post.to_user("", "", "error", var.m2c2_err_json_range % ("station")) return 0 if utils.get_metadata("multidrop", user_request, 0) > var.max_slmp_multidrop: post.to_user("", "", "error", var.m2c2_err_json_range % ("multidrop")) return 0 if utils.get_metadata("subheader", user_request, 0) not in var.slmp_subheader: post.to_user("", "", "error", var.m2c2_err_json_range % ("subheader")) return 0 if utils.get_metadata("communication-code", user_request, 0) not in var.slmp_communication_code: post.to_user("", "", "error", var.m2c2_err_json_range % ("communication-code")) return 0 if utils.get_metadata("ethernet", user_request, 0) not in var.slmp_ethernet: post.to_user("", "", "error", var.m2c2_err_json_range % ("ethernet")) return 0 return 1 elif utils.get_metadata("protocol", user_request, 0) == "opcda": return 1 # add limits for new protocol return 0
def valid_job_request(job_name, job_version, job_control): global job_in_ddb job_in_ddb = read_ddb_jobid(job_name, job_version) if job_in_ddb: checked_job = check_job(job_name, job_version) other_running = other_job_running(job_name, job_version) if job_control == "deploy": if checked_job == 0: return 1 else: post.to_user(job_name, job_version, "error", var.m2c2_ddb_job_name_exists % (job_name)) return 0 elif job_control == "update": if checked_job == 1: return 1 else: post.to_user( job_name, job_version, "error", var.m2c2_ddb_job_name_version_must_be_unique % (job_name, job_version)) return 0 elif job_control == "stop": if checked_job == 2: return 1 else: post.to_user( job_name, job_version, "error", var.m2c2_ddb_job_name_version_must_be_unique % (job_name, job_version)) return 0 elif job_control == "pull": if checked_job > 0: return 1 else: post.to_user(job_name, job_version, "error", var.m2c2_ddb_job_name_does_not_exist % (job_name)) return 0 elif job_control == "push": if checked_job == 2: return 1 else: post.to_user( job_name, job_version, "error", var.m2c2_ddb_job_name_version_must_be_unique % (job_name, job_version)) return 0 elif job_control == "start": if checked_job == 2 and not other_running: return 1 else: if other_running: post.to_user(job_name, job_version, "error", var.m2c2_ddb_job_name_running % (job_name)) return 0 else: post.to_user( job_name, job_version, "error", var.m2c2_ddb_job_name_version_must_be_unique % (job_name, job_version)) return 0 else: return 0
def update(job_data): logger.info("User request: update") stop(job_data) start(job_data) post.to_user("info", job_data, var.m2c2_job_updated)
def pull(job_data): logger.info("User request: pull") job_data = local.read(job_data) if job_data != "": post.to_user("info", job_data, job_data)
def write(user_job_request): job_name = utils.get_metadata("name", user_job_request, 0) job_protocol = utils.get_metadata("protocol", user_job_request, 0) control = utils.get_metadata("control", user_job_request, 0) job_version = utils.get_metadata("version", user_job_request, 0) try: # first ensure that optional keys have a value that can be written to ddb na = "not specified" site = utils.get_metadata("site-name", user_job_request, 0) if site == "": site = na area = utils.get_metadata("area", user_job_request, 0) if area == "": area = na process = utils.get_metadata("process", user_job_request, 0) if process == "": process = na machine = utils.get_metadata("machine-name", user_job_request, 0) if machine == "": machine = na if not job_version: job_versions = retrieve_entry_value(user_job_request, "version")["Items"] if control == "deploy": job_version = max( [item.get("version", 0) for item in job_versions], default=0) + 1 else: if not job_versions: logger.error( "There is no job entry by the name {0} to {1}".format( job_name, control)) return 0 else: job_version = max( [item["version"] for item in job_versions], default=1) ddb_table.put_item( Item={ "jobid": job_name, "control": control, "properties": utils.get_metadata("properties", user_job_request, 0), "version": int(job_version), "gggroupid": utils.get_metadata("gg-group-id", user_job_request, 0), "machine-details": utils.get_metadata("machine-details", user_job_request, 0), "process": process, "site": site, "area": area, "machine": machine, "data_parameters": utils.get_metadata("data-parameters", user_job_request, 0), "machine_query_iterations": utils.get_metadata("machine-query-iterations", user_job_request, 0), "machine_query_time_interval": utils.get_metadata("machine-query-time-interval", user_job_request, 0), "attributes": utils.get_metadata("protocol", user_job_request, 0), "protocol": job_protocol, "address": utils.get_metadata("machine-ip", user_job_request, 0), "job_details": json.dumps(user_job_request), "timestamp": datetime.now().strftime("%d/%m/%Y,%H:%M:%S.%f") }) return job_version except Exception as err: logger.error("ddb write traceback:" + str(err)) post.to_user(job_name, job_version, "error", var.m2c2_ddb_access % ddb_table) return 0