Beispiel #1
0
    def __init__(self, program):
        Subject.__init__(self)

        self.program = program
        LOG.debug("The App program is: %s" % program)

        # --------------------
        # APP STATE
        #
        self.resetAppState()
        self.resetBatchState()

        # --------------------
        # SPARKS
        #
        self.id = str(self.count)
        self.count += 1
        self.spark = Spark(self.id)

        # --------------------
        # THREADS
        #
        self.logfile = LogfileThread('logfile', self._getAppLog())
        self.scheduler = SchedulerThread('scheduler', interval=0.1)

        # app state events
        self.logfile.registerObserver(DiscreetSpecifyHostname(self.cbSpecifyHostname))
        self.logfile.registerObserver(DiscreetSpecifyProject(self.cbSpecifyProject))
        self.logfile.registerObserver(DiscreetSpecifyVolume(self.cbSpecifyVolume))
        self.logfile.registerObserver(DiscreetSpecifyUser(self.cbSpecifyUser))

        self.logfile.registerObserver(DiscreetTimedMessage(self.cbTimedMessage))

        # setup events
        self.logfile.registerObserver(DiscreetLoadSetup(self.cbLoadSetup))
        self.logfile.registerObserver(DiscreetSaveSetup(self.cbSaveSetup))
        self.logfile.registerObserver(DiscreetSaveCurrent(self.cbSaveCurrent))
        self.logfile.registerObserver(DiscreetLoadInformerSetup(self.cbLoadInformerSetup))

        # batch processing events
        self.logfile.registerObserver(DiscreetBatchProcess(self.cbBatchProcess))
        self.logfile.registerObserver(DiscreetBurnProcess(self.cbBurnProcess))
Beispiel #2
0
class App(Subject):
    count = 0
    def __init__(self, program):
        Subject.__init__(self)

        self.program = program
        LOG.debug("The App program is: %s" % program)

        # --------------------
        # APP STATE
        #
        self.resetAppState()
        self.resetBatchState()

        # --------------------
        # SPARKS
        #
        self.id = str(self.count)
        self.count += 1
        self.spark = Spark(self.id)

        # --------------------
        # THREADS
        #
        self.logfile = LogfileThread('logfile', self._getAppLog())
        self.scheduler = SchedulerThread('scheduler', interval=0.1)

        # app state events
        self.logfile.registerObserver(DiscreetSpecifyHostname(self.cbSpecifyHostname))
        self.logfile.registerObserver(DiscreetSpecifyProject(self.cbSpecifyProject))
        self.logfile.registerObserver(DiscreetSpecifyVolume(self.cbSpecifyVolume))
        self.logfile.registerObserver(DiscreetSpecifyUser(self.cbSpecifyUser))

        self.logfile.registerObserver(DiscreetTimedMessage(self.cbTimedMessage))

        # setup events
        self.logfile.registerObserver(DiscreetLoadSetup(self.cbLoadSetup))
        self.logfile.registerObserver(DiscreetSaveSetup(self.cbSaveSetup))
        self.logfile.registerObserver(DiscreetSaveCurrent(self.cbSaveCurrent))
        self.logfile.registerObserver(DiscreetLoadInformerSetup(self.cbLoadInformerSetup))

        # batch processing events
        self.logfile.registerObserver(DiscreetBatchProcess(self.cbBatchProcess))
        self.logfile.registerObserver(DiscreetBurnProcess(self.cbBurnProcess))

    def _getAppLog(self):
        # This was mirrored from $FLAME_HOME/bin/startApplication
        cmd = """cat $%s_HOME/%s_VERSION | cut -d '"' -f2 | sed 's/\.//g'"""
        cmd = cmd % (self.program.upper(), self.program.upper())
        version = commands.getoutput(cmd)
        root = "/usr/discreet/log/" + self.program
        hostname = commands.getoutput("hostname -s")

        appLog = "%s%s_%s_app.log" % (root, version, hostname)
        return appLog

    def isBurn(self):
        return self.program == 'burn'

    def resetAppState(self):
        self.user = None
        self.volume = None
        self.quality = None
        self.project = None
        self.hostname = None
        self.frameRate = None
        self.pixelAspectRatio = None

    def resetBatchState(self):
        self.shot = None
        self.setup = None
        self.events = []
        self.outputs = {}
        self.lastJob = None
        self.lastProcess = None
        self.ignoreFrames = False

    # ----------------------------------------------------------------------
    # App Control
    # ----------------------------------------------------------------------
    def start(self):
        self.resetAppState()
        self.resetBatchState()

        self.scheduler.register(self.logfile, 0.1)
        self.scheduler.process()
        self.scheduler.start()

    def stop(self):
        self.scheduler.stop()
        if self.scheduler.isAlive():
            LOG.debug("STOPPING INTERPRETER...")
            self.scheduler.join()
        else:
            LOG.debug("scheduler was dead.")

        for t in self.scheduler.threads.values():
            if t.isAlive():
                t.stop()
            LOG.debug("Thread [%s] is alive %s" % (t.name, t.isAlive()))

        LOG.debug("the scheduler is alive %s" % (self.scheduler.isAlive()))

    def _suspend(self):
        # suspend threads that alter the app state
        self.logfile.suspend()

    def _resume(self):
        # resume threads that alter the app state
        self.logfile.resume()


    # ----------------------------------------------------------------------
    # Sparks
    # ----------------------------------------------------------------------
    def _sparkCleanName(self, name):
        if name and name[-1] == '\n':
            name = name[0:-1]
        return name

    def sparkRegister(self, name):
        self.spark.name = self._sparkCleanName(name)
        return self.spark.name

    def sparkGetByName(self, name):
        name = self._sparkCleanName(name)
        return self.spark

    def sparkRename(self, oldName, newName):
        self.spark.name = newName

    def sparkProcessStart(self, name):
        """
        SparkProcessStart is called from the spark when the user enters
        the spark. The sequence of events is usually:
            * SparkProcessStart
            * FrameProcessStart
            * FrameProcessEnd
            * SparkProcessEnd

        However, the first time the spark is entered the sequence can be:
            * enter spark
            * FrameProcessStart
            * FrameProcessEnd
            * SparkProcessStart
            * SparkProcessEnd

        This is unfortunate since we process a frame when it should have
        been ignored.

        Since the app is not interested in calls to FrameProcess
        during edit SparkProcessStart sets a flag to ignore the
        next Frame to be processed.
        """
        self.ignoreFrames = True

    def sparkProcessEnd(self, name):
        """
        Always called after a SparkProcessEnd it marks the point where the
        app can once again save frames.
        """
        self.ignoreFrames = False

    # ----------------------------------------------------------------------
    # Events
    # ----------------------------------------------------------------------
    def flushEventQueue(self):
        LOG.info("(((( flushing event queue ))))")
        key = '_LAST_EVENT_'

        while self.events:
            appEvent = self.events.pop(0)
            if isinstance(appEvent, DiscreetAppBatchProcessEvent):
                # wait until we flush the queue to determine the batch outputs
                appEvent.outputs = self.outputs.keys()

            lastEvent = None
            if key in os.environ:
                lastEvent = float(os.environ[key])

            eventSeconds = float(datetimeToSeconds(appEvent.date))

            if lastEvent is None or eventSeconds > lastEvent:
                LOG.debug("SENDING EVENT... LOOKS GOOD")
                try:
                    client = Client()
                    if appEvent.job:
                        client.createRender(appEvent)
                    else:
                        client.createEvent(appEvent)
                except Exception, e:
                    LOG.warn("Error sending event: %s [%s]" % (e, type(e)))

                # set the last event time, even if it failed
                # an event is either a success or failure
                # only setting this on success would allow for retries
                # if a person exited batch and re-entered -- don't think
                # that is what we want
                #
                # explictly state the precision of the float
                os.environ[key] = "%.6f" % eventSeconds
            else:
                LOG.debug("SKIPPING EVENT! LAST EVENT WAS MORE RECENT...")