コード例 #1
0
    def Operational(self):
        """ This is called from ODC once ODC is ready for us to be fully operational - normally after Build is complete"""
        self.LogDebug("Port Operational - {}".format(
            datetime.now().isoformat(" ")))
        # This is only done once - will self restart from the timer callback.
        odc.SetTimer(self.guid, 1, 500)  # Start the timer cycle

        odc.SetTimer(self.guid, 2, 100)  # Start the timer cycle
        return
コード例 #2
0
    def TimerHandler(self, TimerId):
        # self.LogDebug("TimerHander: ID {}, {}".format(TimerId, self.guid))

        self.producer.poll(0)  # Do any waiting processing, but dont wait!
        thresholdcount = 10000

        if (TimerId == 1):
            EventCount = 1
            starttime = datetime.now()

            # Get Events from the queue and process them, up until we have an empty queue or 100 entries OR
            # The local producer queue has 100 entries in it.
            # Then trigger the kafka library to send them.
            while ((EventCount < thresholdcount)
                   and (len(self.producer) < thresholdcount)):
                EventCount += 1
                EventType, Index, Time, Quality, Payload, Sender = odc.GetNextEvent(
                    self.guid)

                # The EventType will be an empty string if the queue is empty.
                if (len(EventType) == 0):
                    break

                # We already have the event method written to handle this so just use.
                # This means the config file flag can swap between the two processes with no code changes
                self.EventHandler(EventType, Index, Time, Quality, Payload,
                                  Sender)
                self.producer.poll(
                    0)  # Do any waiting processing, but dont wait!

            self.LogDebug(
                "Kafka Produced {} messages. Kafka queue size {}. Execution time {} msec"
                .format(EventCount, len(self.producer),
                        self.millisdiff(starttime)))

            # If we have pushed the maxiumum number of events in, we need to go faster...
            # If the producer queue hits the limit, this means the kafka cluster is not keeping up.
            if EventCount < thresholdcount:
                odc.SetTimer(self.guid, 1,
                             500)  # Make it so we fire again in .5 second.
            else:
                odc.SetTimer(self.guid, 1, 250)

        # Can use timer 2 to run poll() more frequently. Must be started in Operational()
        # Running poll more often does not seem to make a difference.
        if (TimerId == 2):
            odc.SetTimer(self.guid, 2, 100)

        return
コード例 #3
0
ファイル: PyPortRtuSim.py プロジェクト: huhu84821/opendatacon
    def TimerHandler(self, TimerId):
        self.LogDebug("TimerHander: ID {}, {}".format(TimerId, self.guid))
        # This will be used to send Sim value changes that are the result of mean and std deviation values at the UpdateRate
        # Run once every 10 seconds?
        if (TimerId == 1):
            self.UpdateAnalogSimValues(10)  # Pass in seconds since last run
            odc.SetTimer(self.guid, 1, 10 * 1000)  # Set up to run again

        return
コード例 #4
0
ファイル: PyPortSim.py プロジェクト: huhu84821/opendatacon
    def RestRequestHandler(self, url, content):
        self.LogTrace("RestRequestHander: {}".format(url))

        Response = {}   # Empty Dict
        if ("GET" in url):
            Response["test"] = "GET"
            Response["processedevents"] = self.processedevents
        else:
            Response["test"] = "POST"
        # Just to make sure it gets called and the call succeeds.
        currentqueuesize = odc.GetEventQueueSize(self.guid)

        odc.SetTimer(self.guid, self.i, 1001-self.i)    # Set a timer to go off in a period less than a second
        self.i = self.i + 1
        self.LogTrace("RestRequestHander: Sent Set Timer Command {}".format(self.i))

        return json.dumps(Response)
コード例 #5
0
ファイル: PyPortSim.py プロジェクト: huhu84821/opendatacon
    def TimerHandler(self,TimerId):
        self.LogTrace("TimerHander: ID {}, {}".format(TimerId, self.guid))

        if (TimerId == 1):
            #currentqueuesize = odc.GetEventQueueSize(self.guid)
            #self.LogDebug("TimerHander: Event Queue Size {}".format(currentqueuesize))
            # Get Events from the queue and process them
            while (True):
                JsonEvent, empty = odc.GetNextEvent(self.guid)

                if (empty == True):
                    break
                self.processedevents += 1     # Python is single threaded, so no concurrency issues (unless specipically enabled for multi)

            odc.SetTimer(self.guid, 1, 250)     #250 msec - timer 1 restarts itself!

        return
コード例 #6
0
 def Operational(self):
     """ This is called from ODC once ODC is ready for us to be fully operational - normally after Build is complete"""
     self.LogDebug("Port Operational - {}".format(
         datetime.now().isoformat(" ")))
     odc.SetTimer(self.guid, 1, 250)  #250 msec
     return
コード例 #7
0
ファイル: PyPortKafka.py プロジェクト: mkurt/opendatacon
    def TimerHandler(self, TimerId):
        # self.LogDebug("TimerHander: ID {}, {}".format(TimerId, self.guid))

        if (self.producer is not None):
            self.producer.poll(0)  # Do any waiting processing, but dont wait!

        if (TimerId == 1):

            MaxMessageCount = 5000
            longwaitmsec = 100
            shortwaitmsec = 5
            EventCount = 1
            starttime = datetime.now()
            self.measuretimeus = 0
            self.measuretimeus2 = 0

            if (self.producer is not None):
                # Get Events from the queue and process them, up until we have an empty queue or MaxMessageCount entries
                # Then trigger the Kafka library to send them.

                while ((EventCount < MaxMessageCount)):
                    EventCount += 1

                    self.timeusstart()
                    ### Takes about 3.2usec (old 8.4usec) per call (approx) on DEV server
                    JsonEventstr, empty = odc.GetNextEvent(self.guid)
                    self.measuretimeus += self.timeusstop()

                    # The EventType will be an empty string if the queue is empty.
                    if (empty == True):
                        break

                    try:
                        self.timeusstart()
                        # Now 32msec/5000, about 5usec per record. (old 45msec/5000 so 9usec/record)
                        # Can we only get a single delivery report per block of up to 5000 messages?
                        # If we set the re-try count to max int, then handling the delivery report does not make much sense - the buffer will just fill up and then we will get an exception
                        # here due to a full buffer. And we fill up the next buffer (in PyPort) (note we need to store the event we were about to send so we dont loose it!)
                        # Eventually we will loose events, but there is nothing we can do about that.
                        self.producer.produce(self.topic, value=JsonEventstr)
                        self.measuretimeus2 += self.timeusstop()
                        if self.QueueErrorState == 1:
                            self.LogError(
                                "Kafka Producer Queue Recovered - NOT full ({} messages awaiting delivery)"
                                .format(len(self.producer)))
                            self.QueueErrorState = 0

                    except BufferError:
                        if self.QueueErrorState == 0:
                            self.LogError(
                                "Kafka Producer Queue is full ({} messages awaiting delivery)"
                                .format(len(self.producer)))
                            self.QueueErrorState = 1
                        break

                    if (EventCount % 100 == 0):
                        self.producer.poll(
                            0)  # Do any waiting processing, but dont wait!

                self.EventQueueSize = odc.GetEventQueueSize(self.guid)
                #self.LogDebug("Kafka Produced {} messages. Kafka queue size {}. ODC Event queue size {} Execution time {} msec Timed code {}, {} us".format(EventCount,len(self.producer),self.EventQueueSize,self.millisdiff(starttime),self.measuretimeus,self.measuretimeus2))

                self.MessageIndex += EventCount

            # If we have pushed the maximum number of events in, we need to go faster...
            # If the producer queue hits the limit, this means the kafka cluster is not keeping up.
            if EventCount < MaxMessageCount:
                odc.SetTimer(
                    self.guid, 1,
                    longwaitmsec)  # We do not have messages waiting...
            else:
                odc.SetTimer(self.guid, 1,
                             shortwaitmsec)  # We do have messages waiting

        if (TimerId == 2):
            self.minutetimermessage()
            odc.SetTimer(self.guid, 2, 10000)  # Set to run again in 10 seconds

        if (self.producer is not None):
            self.producer.poll(0)  # Do any waiting processing, but dont wait!

        return