def foo(): # Periodically publish some new data payload = sparkplug.getDdataPayload() # Add some random data to the inputs addMetric(payload, "input/Frame Number", AliasMap.Device_frame_numberx, MetricDataType.Int16, frame_numberx) addMetric(payload, "input/number of objects", AliasMap.Device_num_rectsx, MetricDataType.Int16, num_rectsx) addMetric(payload, "input/Vehicle count", AliasMap.Device_counter1, MetricDataType.Int16, counter1) addMetric(payload, "input/Person count", AliasMap.Device_counter2, MetricDataType.Int16, counter2) # Note this data we're setting to STALE via the propertyset as an example metric = addMetric(payload, None, AliasMap.Device_Metric1, MetricDataType.Boolean, random.choice([True, False])) metric.properties.keys.extend(["Quality"]) propertyValue = metric.properties.values.add() propertyValue.type = ParameterDataType.Int32 propertyValue.int_value = 500 # Publish a message data byteArray = bytearray(payload.SerializeToString()) client.publish( "spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False) threading.Timer(WAIT_SECONDS, foo).start()
def input_changed(name, pin): lock.acquire() try: # Lock the block around the callback handler to prevent inproper access based on debounce outboundPayload = sparkplug.getDdataPayload() addMetric(outboundPayload, name, None, MetricDataType.Boolean, pin.read()); byteArray = bytearray(outboundPayload.SerializeToString()) client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + mySubNodeName, byteArray, 0, False) finally: lock.release()
def button_changed(pin): outboundPayload = sparkplug.getDdataPayload() buttonValue = pin.read() if buttonValue == 1: print("You pressed the button!") else: print("You released the button!") addMetric(outboundPayload, "button", None, MetricDataType.Boolean, buttonValue); byteArray = bytearray(outboundPayload.SerializeToString()) client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + mySubNodeName, byteArray, 0, False)
def updateDeviceData(self, deviceId, alias_value, data_type, data_value): #TODO: Direct dependency on Metric, to be refactored later deviceMap = self.node.getDeviceMap() device = deviceMap[deviceId] dataTopic = device.getDataTopic() dataPayload = sparkplug.getDdataPayload() addMetric(dataPayload, None, alias_value, data_type, data_value) SPGEngine.client_publish(self, dataTopic, dataPayload)
def _send_node_rebirth(client, groupid, eon): try: logging.debug ('sending NCMD rebirth...') command = sparkplug.getDdataPayload() addMetric(command, 'Node Control/Rebirth', 0, MetricDataType.Boolean, True) byteArray = bytearray(command.SerializeToString()) client.publish('spBv1.0/' + groupid + '/NCMD/' + eon, byteArray, 0, False) except: # print help information and exit: logging.error ('some error') logging.error (str(err)) return
def report_changed_metric(self, changed_list): if changed_list == None or len(changed_list) == 0: return payload = sparkplug.getDdataPayload() for change in changed_list: metric = change[0] new_val = change[1] sparkplug.addMetric(payload, None, metric.alias, metric.datatype, new_val) byteArray = bytearray(payload.SerializeToString()) self.client.publish( "spBv1.0/" + self.node.group_id + "/DDATA/" + self.node.node_id + "/" + self.device_name, byteArray, 0, False)
def foo(): # Periodically publish some new data payload = sparkplug.getDdataPayload() # Add some random data to the inputs addMetric(payload, "input/number of objects", AliasMap.Device_num_rectsx, MetricDataType.Int16, num_rectsx) addMetric(payload, "input/Frame Number", AliasMap.Device_frame_numberx, MetricDataType.Int16, frame_numberx) addMetric(payload, "input/Device Output1", AliasMap.Device_Output1, MetricDataType.Int16, Object1) addMetric(payload, "input/Device Output2", AliasMap.Device_Output2, MetricDataType.Int16, Object2) addMetric(payload, "input/Device Output3", AliasMap.Device_Output3, MetricDataType.Int16, Object3) addMetric(payload, "input/Device Output4", AliasMap.Device_Output4, MetricDataType.Int16, Object4) addMetric(payload, "input/Device Output5", AliasMap.Device_Output5, MetricDataType.Int16, Object5) addMetric(payload, "input/Device Output6", AliasMap.Device_Output6, MetricDataType.Int16, Object6) addMetric(payload, "input/Device Output7", AliasMap.Device_Output7, MetricDataType.Int16, Object7) addMetric(payload, "input/Device Output8", AliasMap.Device_Output8, MetricDataType.Int16, Object8) addMetric(payload, "input/Device Output9", AliasMap.Device_Output9, MetricDataType.Int16, Object9) addMetric(payload, "input/Device Output10", AliasMap.Device_Output10, MetricDataType.Int16, Object10) # Publish a message data byteArray = bytearray(payload.SerializeToString()) client.publish( "spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False) # Sit and wait for inbound or outbound events for _ in range(1): time.sleep(1) client.loop() threading.Timer(WAIT_SECONDS, foo).start()
def on_message(client, userdata, msg): print("Message arrived: " + msg.topic) tokens = msg.topic.split("/") if tokens[0] == "spBv1.0" and tokens[1] == myGroupId and tokens[ 2] == "DCMD" and tokens[3] == myNodeName: inboundPayload = sparkplug_b_pb2.Payload() inboundPayload.ParseFromString(msg.payload) outboundPayload = sparkplug.getDdataPayload() for metric in inboundPayload.metrics: print "Tag Name: " + metric.name if metric.name == "Outputs/e": pibrella.output.e.write(metric.boolean_value) addMetric(outboundPayload, "Outputs/e", None, MetricDataType.Boolean, pibrella.output.e.read()) elif metric.name == "Outputs/f": pibrella.output.f.write(metric.boolean_value) addMetric(outboundPayload, "Outputs/f", None, MetricDataType.Boolean, pibrella.output.f.read()) elif metric.name == "Outputs/g": pibrella.output.g.write(metric.boolean_value) addMetric(outboundPayload, "Outputs/g", None, MetricDataType.Boolean, pibrella.output.g.read()) elif metric.name == "Outputs/h": pibrella.output.h.write(metric.boolean_value) addMetric(outboundPayload, "Outputs/h", None, MetricDataType.Boolean, pibrella.output.h.read()) elif metric.name == "Outputs/LEDs/green": if metric.boolean_value: pibrella.light.green.on() else: pibrella.light.green.off() addMetric(outboundPayload, "Outputs/LEDs/green", None, MetricDataType.Boolean, pibrella.light.green.read()) elif metric.name == "Outputs/LEDs/red": if metric.boolean_value: pibrella.light.red.on() else: pibrella.light.red.off() addMetric(outboundPayload, "Outputs/LEDs/red", None, MetricDataType.Boolean, pibrella.light.red.read()) elif metric.name == "Outputs/LEDs/yellow": if metric.boolean_value: pibrella.light.yellow.on() else: pibrella.light.yellow.off() addMetric(outboundPayload, "Outputs/LEDs/yellow", None, MetricDataType.Boolean, pibrella.light.yellow.read()) elif metric.name == "buzzer_fail": pibrella.buzzer.fail() elif metric.name == "buzzer_success": pibrella.buzzer.success() byteArray = bytearray(outboundPayload.SerializeToString()) client.publish( "spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + mySubNodeName, byteArray, 0, False) elif tokens[0] == "spBv1.0" and tokens[1] == myGroupId and tokens[ 2] == "NCMD" and tokens[3] == myNodeName: inboundPayload = sparkplug_b_pb2.Payload() inboundPayload.ParseFromString(msg.payload) for metric in inboundPayload.metrics: if metric.name == "Node Control/Next Server": publishBirths() if metric.name == "Node Control/Rebirth": publishBirths() if metric.name == "Node Control/Reboot": publishBirths() else: print "Unknown command..." print "done publishing"
def on_message(client, userdata, msg): print("Message arrived: " + msg.topic) tokens = msg.topic.split("/") if tokens[0] == "spBv1.0" and tokens[1] == myGroupId and (tokens[2] == "NCMD" or tokens[2] == "DCMD") and tokens[3] == myNodeName: inboundPayload = sparkplug_b_pb2.Payload() inboundPayload.ParseFromString(msg.payload) for metric in inboundPayload.metrics: if metric.name == "Node Control/Next Server" or metric.alias == AliasMap.Next_Server: # 'Node Control/Next Server' is an NCMD used to tell the device/client application to # disconnect from the current MQTT server and connect to the next MQTT server in the # list of available servers. This is used for clients that have a pool of MQTT servers # to connect to. print( "'Node Control/Next Server' is not implemented in this example") elif metric.name == "Node Control/Rebirth" or metric.alias == AliasMap.Rebirth: # 'Node Control/Rebirth' is an NCMD used to tell the device/client application to resend # its full NBIRTH and DBIRTH again. MQTT Engine will send this NCMD to a device/client # application if it receives an NDATA or DDATA with a metric that was not published in the # original NBIRTH or DBIRTH. This is why the application must send all known metrics in # its original NBIRTH and DBIRTH messages. publishBirth() elif metric.name == "Node Control/Reboot" or metric.alias == AliasMap.Reboot: # 'Node Control/Reboot' is an NCMD used to tell a device/client application to reboot # This can be used for devices that need a full application reset via a soft reboot. # In this case, we fake a full reboot with a republishing of the NBIRTH and DBIRTH # messages. publishBirth() elif metric.name == "output/Device Metric2" or metric.alias == AliasMap.Device_Metric2: # This is a metric we declared in our DBIRTH message and we're emulating an output. # So, on incoming 'writes' to the output we must publish a DDATA with the new output # value. If this were a real output we'd write to the output and then read it back # before publishing a DDATA message. # We know this is an Int16 because of how we declated it in the DBIRTH newValue = metric.int_value print( "CMD message for output/Device Metric2 - New Value: {}".format(newValue)) # Create the DDATA payload - Use the alias because this isn't the DBIRTH payload = sparkplug.getDdataPayload() addMetric(payload, None, AliasMap.Device_Metric2, MetricDataType.Int16, newValue) # Publish a message data byteArray = bytearray(payload.SerializeToString()) client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False) elif metric.name == "output/Device Metric3" or metric.alias == AliasMap.Device_Metric3: # This is a metric we declared in our DBIRTH message and we're emulating an output. # So, on incoming 'writes' to the output we must publish a DDATA with the new output # value. If this were a real output we'd write to the output and then read it back # before publishing a DDATA message. # We know this is an Boolean because of how we declated it in the DBIRTH newValue = metric.boolean_value print( "CMD message for output/Device Metric3 - New Value: %r" % newValue) # Create the DDATA payload - use the alias because this isn't the DBIRTH payload = sparkplug.getDdataPayload() addMetric(payload, None, AliasMap.Device_Metric3, MetricDataType.Boolean, newValue) # Publish a message data byteArray = bytearray(payload.SerializeToString()) client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False) else: print( "Unknown command: " + metric.name) else: print( "Unknown command...") print( "Done publishing")
client.on_message = on_message client.username_pw_set(myUsername, myPassword) deathByteArray = bytearray(deathPayload.SerializeToString()) client.will_set("spBv1.0/" + myGroupId + "/NDEATH/" + myNodeName, deathByteArray, 0, False) client.connect(serverUrl, 1883, 60) # Short delay to allow connect callback to occur time.sleep(.1) client.loop() # Publish the birth certificates publishBirth() while True: # Periodically publish some new data payload = sparkplug.getDdataPayload() # Add some random data to the inputs addMetric(payload, None, AliasMap.Device_Metric0, MetricDataType.String, ''.join(random.choice(string.ascii_lowercase) for i in range(12))) # Note this data we're setting to STALE via the propertyset as an example metric = addMetric(payload, None, AliasMap.Device_Metric1, MetricDataType.Boolean, random.choice([True, False])) metric.properties.keys.extend(["Quality"]) propertyValue = metric.properties.values.add() propertyValue.type = ParameterDataType.Int32 propertyValue.int_value = 500 # Publish a message data byteArray = bytearray(payload.SerializeToString()) client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
def on_message(client, userdata, msg): print("Message arrived: " + msg.topic) tokens = msg.topic.split("/") if tokens[0] == "spBv1.0" and tokens[1] == myGroupId and tokens[2] == "DCMD" and tokens[3] == myNodeName: inboundPayload = sparkplug_b_pb2.Payload() inboundPayload.ParseFromString(msg.payload) outboundPayload = sparkplug.getDdataPayload() for metric in inboundPayload.metrics: print "Tag Name: " + metric.name if metric.name == "Outputs/e": pibrella.output.e.write(metric.boolean_value) addMetric(outboundPayload, "Outputs/e", None, MetricDataType.Boolean, pibrella.output.e.read()) elif metric.name == "Outputs/f": pibrella.output.f.write(metric.boolean_value) addMetric(outboundPayload, "Outputs/f", None, MetricDataType.Boolean, pibrella.output.f.read()) elif metric.name == "Outputs/g": pibrella.output.g.write(metric.boolean_value) addMetric(outboundPayload, "Outputs/g", None, MetricDataType.Boolean, pibrella.output.g.read()) elif metric.name == "Outputs/h": pibrella.output.h.write(metric.boolean_value) addMetric(outboundPayload, "Outputs/h", None, MetricDataType.Boolean, pibrella.output.h.read()) elif metric.name == "Outputs/LEDs/green": if metric.boolean_value: pibrella.light.green.on() else: pibrella.light.green.off() addMetric(outboundPayload, "Outputs/LEDs/green", None, MetricDataType.Boolean, pibrella.light.green.read()) elif metric.name == "Outputs/LEDs/red": if metric.boolean_value: pibrella.light.red.on() else: pibrella.light.red.off() addMetric(outboundPayload, "Outputs/LEDs/red", None, MetricDataType.Boolean, pibrella.light.red.read()) elif metric.name == "Outputs/LEDs/yellow": if metric.boolean_value: pibrella.light.yellow.on() else: pibrella.light.yellow.off() addMetric(outboundPayload, "Outputs/LEDs/yellow", None, MetricDataType.Boolean, pibrella.light.yellow.read()) elif metric.name == "buzzer_fail": pibrella.buzzer.fail() elif metric.name == "buzzer_success": pibrella.buzzer.success() byteArray = bytearray(outboundPayload.SerializeToString()) client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + mySubNodeName, byteArray, 0, False) elif tokens[0] == "spBv1.0" and tokens[1] == myGroupId and tokens[2] == "NCMD" and tokens[3] == myNodeName: inboundPayload = sparkplug_b_pb2.Payload() inboundPayload.ParseFromString(msg.payload) for metric in inboundPayload.metrics: if metric.name == "Node Control/Next Server": publishBirths() if metric.name == "Node Control/Rebirth": publishBirths() if metric.name == "Node Control/Reboot": publishBirths() else: print "Unknown command..." print "done publishing"
def on_message(client, userdata, msg): print("Message arrived: " + msg.topic) tokens = msg.topic.split("/") if tokens[0] == "spBv1.0" and tokens[1] == myGroupId and (tokens[2] == "NCMD" or tokens[2] == "DCMD") and tokens[3] == myNodeName: inboundPayload = sparkplug_b_pb2.Payload() inboundPayload.ParseFromString(msg.payload) for metric in inboundPayload.metrics: if metric.name == "Node Control/Next Server" or metric.alias == AliasMap.Next_Server: # 'Node Control/Next Server' is an NCMD used to tell the device/client application to # disconnect from the current MQTT server and connect to the next MQTT server in the # list of available servers. This is used for clients that have a pool of MQTT servers # to connect to. print "'Node Control/Next Server' is not implemented in this example" elif metric.name == "Node Control/Rebirth" or metric.alias == AliasMap.Rebirth: # 'Node Control/Rebirth' is an NCMD used to tell the device/client application to resend # its full NBIRTH and DBIRTH again. MQTT Engine will send this NCMD to a device/client # application if it receives an NDATA or DDATA with a metric that was not published in the # original NBIRTH or DBIRTH. This is why the application must send all known metrics in # its original NBIRTH and DBIRTH messages. publishBirth() elif metric.name == "Node Control/Reboot" or metric.alias == AliasMap.Reboot: # 'Node Control/Reboot' is an NCMD used to tell a device/client application to reboot # This can be used for devices that need a full application reset via a soft reboot. # In this case, we fake a full reboot with a republishing of the NBIRTH and DBIRTH # messages. publishBirth() elif metric.name == "output/Device Metric2" or metric.alias == AliasMap.Device_Metric2: # This is a metric we declared in our DBIRTH message and we're emulating an output. # So, on incoming 'writes' to the output we must publish a DDATA with the new output # value. If this were a real output we'd write to the output and then read it back # before publishing a DDATA message. # We know this is an Int16 because of how we declated it in the DBIRTH newValue = metric.int_value print "CMD message for output/Device Metric2 - New Value: {}".format(newValue) # Create the DDATA payload - Use the alias because this isn't the DBIRTH payload = sparkplug.getDdataPayload() addMetric(payload, None, AliasMap.Device_Metric2, MetricDataType.Int16, newValue) # Publish a message data byteArray = bytearray(payload.SerializeToString()) client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False) elif metric.name == "output/Device Metric3" or metric.alias == AliasMap.Device_Metric3: # This is a metric we declared in our DBIRTH message and we're emulating an output. # So, on incoming 'writes' to the output we must publish a DDATA with the new output # value. If this were a real output we'd write to the output and then read it back # before publishing a DDATA message. # We know this is an Boolean because of how we declated it in the DBIRTH newValue = metric.boolean_value print "CMD message for output/Device Metric3 - New Value: %r" % newValue # Create the DDATA payload - use the alias because this isn't the DBIRTH payload = sparkplug.getDdataPayload() addMetric(payload, None, AliasMap.Device_Metric3, MetricDataType.Boolean, newValue) # Publish a message data byteArray = bytearray(payload.SerializeToString()) client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False) else: print "Unknown command: " + metric.name else: print "Unknown command..." print "Done publishing"
client.on_message = on_message client.username_pw_set(myUsername, myPassword) deathByteArray = bytearray(deathPayload.SerializeToString()) client.will_set("spBv1.0/" + myGroupId + "/NDEATH/" + myNodeName, deathByteArray, 0, False) client.connect(serverUrl, 1883, 60) # Short delay to allow connect callback to occur time.sleep(.1) client.loop() # Publish the birth certificates publishBirth() while True: # Periodically publish some new data payload = sparkplug.getDdataPayload() # Add some random data to the inputs addMetric(payload, None, AliasMap.Device_Metric0, MetricDataType.String, ''.join(random.choice(string.lowercase) for i in range(12))) # Note this data we're setting to STALE via the propertyset as an example metric = addMetric(payload, None, AliasMap.Device_Metric1, MetricDataType.Boolean, random.choice([True, False])) metric.properties.keys.extend(["Quality"]) propertyValue = metric.properties.values.add() propertyValue.type = ParameterDataType.Int32 propertyValue.int_value = 500 # Publish a message data byteArray = bytearray(payload.SerializeToString()) client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)